Validate slabs on oops I am not sure if this is a good thing to have but if there is slab corruption leading to a failure then this patch will detect a corrupted objects after the oops occured. [This should not go into mm. Just for discussion] Signed-off-by: Christoph Lameter Index: linux-2.6.21-rc5-mm3/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm3.orig/mm/slub.c 2007-03-30 21:51:52.000000000 -0700 +++ linux-2.6.21-rc5-mm3/mm/slub.c 2007-03-30 21:52:37.000000000 -0700 @@ -20,6 +20,7 @@ #include #include #include +#include /* * Lock order: @@ -128,10 +129,12 @@ LIST_HEAD(slab_caches); static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); +static void setup_slab_die(void); #else static int sysfs_slab_add(struct kmem_cache *s) { return 0; } static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } static void sysfs_slab_remove(struct kmem_cache *s) {} +static void setup_slab_die(void) {}; #endif /******************************************************************** @@ -2008,6 +2011,8 @@ void __init kmem_cache_init(void) "Processors=%d, Nodes=%d\n", KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES, nr_cpu_ids, nr_node_ids); + + setup_slab_die(); } /* @@ -2402,6 +2407,67 @@ struct slab_attribute { static struct slab_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) +/* + * Validation of slabs that are on the lists. The caller must + * provide synchronization if necessary. + */ +static void validate_slab(struct kmem_cache *s, struct page *page) +{ + void *p; + void *addr = page_address(page); + unsigned long map[BITS_TO_LONGS(s->objects)]; + + if (!check_slab(s, page) || + !on_freelist(s, page, NULL)) + return; + + printk(KERN_ERR "Detail check on slab %p\n", page); + /* Now we know that a valid freelist exists */ + bitmap_zero(map, s->objects); + + for(p = page->freelist; p; p = get_freepointer(s, p)) { + set_bit((p - addr) / s->size, map); + check_object(s, page, p, 0); + } + + for(p = addr; p < addr + s->objects * s->size; p += s->size) + if (!test_bit((p - addr) / s->size, map)) + check_object(s, page, p, 1); +} + +static void validate_slab_cache(struct kmem_cache *s) +{ + int node; + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + struct page *page; + + printk(KERN_ERR "Die: validating partial pages for slab %s\n", s->name); + list_for_each_entry(page, &n->partial, lru) + validate_slab(s, page); + } +} + +static int slab_die_call(struct notifier_block *self, + unsigned long val, void *data) +{ + struct kmem_cache *s; + + list_for_each_entry(s, &slab_caches, list) + validate_slab_cache(s); + + return NOTIFY_OK; +} + +struct notifier_block slab_die_notifier = { + .notifier_call = slab_die_call, +}; + +static void __init setup_slab_die(void) +{ + register_die_notifier(&slab_die_notifier); +} static ssize_t slab_size_show(struct kmem_cache *s, char *buf) {