Validate slabs on oops I am not sure if this is a good thing to have but if there is slab corruption leading to a failure then this patch will detect a corrupted objects after the oops occured. [This should not go into mm. Just for discussion] Signed-off-by: Christoph Lameter --- mm/slub.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) Index: linux-2.6.21-rc5-mm4/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm4.orig/mm/slub.c 2007-04-08 18:12:23.000000000 -0700 +++ linux-2.6.21-rc5-mm4/mm/slub.c 2007-04-08 18:44:39.000000000 -0700 @@ -2310,6 +2310,75 @@ void *__kmalloc_node_track_caller(size_t #ifdef CONFIG_SYSFS +static int validate_slab(struct kmem_cache *s, struct page *page) +{ + void *p; + void *addr = page_address(page); + unsigned long map[BITS_TO_LONGS(s->objects)]; + + if (!check_slab(s, page) || + !on_freelist(s, page, NULL)) + return 0; + + /* Now we know that a valid freelist exists */ + bitmap_zero(map, s->objects); + + for(p = page->freelist; p; p = get_freepointer(s, p)) { + set_bit((p - addr) / s->size, map); + if (!check_object(s, page, p, 0)) + return 0; + } + + for(p = addr; p < addr + s->objects * s->size; p += s->size) + if (!test_bit((p - addr) / s->size, map)) + if (!check_object(s, page, p, 1)) + return 0; + return 1; +} + +static void validate_slab_slab(struct kmem_cache *s, struct page *page) +{ + if (slab_trylock(page)) { + validate_slab(s, page); + slab_unlock(page); + } else + printk(KERN_INFO "Skipped busy slab %p\n", page); +} + +static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) +{ + int count = 0; + struct page *page; + unsigned long flags; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) { + validate_slab_slab(s, page); + count++; + } + list_for_each_entry(page, &n->full, lru) { + validate_slab_slab(s, page); + count++; + } + spin_unlock_irqrestore(&n->list_lock, flags); + return count; +} + +static void validate_slab_cache(struct kmem_cache *s) +{ + int node; + int count = 0; + + flush_all(s); + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + + count += validate_slab_node(s, n); + } + printk(KERN_INFO "SLUB: Validated %d slabs in '%s'\n", + count, s->name); +} + static unsigned long count_partial(struct kmem_cache_node *n) { unsigned long flags; @@ -2432,7 +2501,6 @@ struct slab_attribute { static struct slab_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) - static ssize_t slab_size_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->size); @@ -2639,6 +2707,22 @@ static ssize_t store_user_store(struct k } SLAB_ATTR(store_user); +static ssize_t validate_show(struct kmem_cache *s, char *buf) +{ + return 0; +} + +static ssize_t validate_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + if (buf[0] == '1') + validate_slab_cache(s); + else + return -EINVAL; + return length; +} +SLAB_ATTR(validate); + #ifdef CONFIG_NUMA static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) { @@ -2678,6 +2762,7 @@ static struct attribute * slab_attrs[] = &red_zone_attr.attr, &poison_attr.attr, &store_user_attr.attr, + &validate_attr.attr, #ifdef CONFIG_ZONE_DMA &cache_dma_attr.attr, #endif