Index: linux-2.6.21-rc4/mm/slub.c =================================================================== --- linux-2.6.21-rc4.orig/mm/slub.c 2007-03-21 21:59:54.000000000 -0700 +++ linux-2.6.21-rc4/mm/slub.c 2007-03-21 22:39:12.000000000 -0700 @@ -20,6 +20,7 @@ #include #include #include +#include /* * Lock order: @@ -109,10 +110,12 @@ static void sysfs_slab_add(struct kmem_cache *); static void sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); +static void setup_slab_die(void); #else static void sysfs_slab_add(struct kmem_cache *) {} static void sysfs_slab_alias(struct kmem_cache *, const char *) {} static void sysfs_slab_remove(struct kmem_cache *) {} +static void setup_slab_die(void) {} #endif /******************************************************************** @@ -314,7 +317,7 @@ if (s->flags & __OBJECT_POISON) { memset(p, POISON_FREE, s->objsize - 1); - p[s->objsize -1] = POISON_END; + p[s->objsize - 1] = POISON_END; } if (s->flags & SLAB_RED_ZONE) @@ -2052,6 +2055,8 @@ kmem_size = offsetof(struct kmem_cache, cpu_slab) + nr_cpu_ids * sizeof(struct page *); + setup_slab_die(); + printk(KERN_INFO "SLUB V6: General Slabs=%ld, HW alignment=%d, " "Processors=%d, Nodes=%d\n", (unsigned long)KMALLOC_SHIFT_HIGH + KMALLOC_EXTRAS + 1 @@ -2319,6 +2324,67 @@ #ifdef CONFIG_SYSFS +/* + * Validation of slabs that are on the lists. The caller must + * provide synchronization if necessary. + */ +static void validate_slab(struct kmem_cache *s, struct page *page) +{ + void *p; + void *addr = page_address(page); + unsigned long map[BITS_TO_LONGS(s->objects)]; + + if (!check_slab(s, page) || + !on_freelist(s, page, NULL)) + return; + + printk(KERN_ERR "Detail check on slab %p\n", page); + /* Now we know that a valid freelist exists */ + bitmap_zero(map, s->objects); + + for(p = page->freelist; p; p = get_freepointer(s, p)) { + set_bit((p - addr) / s->size, map); + check_object(s, page, p, 0); + } + + for(p = addr; p < addr + s->objects * s->size; p += s->size) + if (!test_bit((p - addr) / s->size, map)) + check_object(s, page, p, 1); +} + +static void validate_slab_cache(struct kmem_cache *s) +{ + int node; + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + struct page *page; + + printk(KERN_ERR "Die: validating partial pages for slab %s\n", s->name); + list_for_each_entry(page, &n->partial, lru) + validate_slab(s, page); + } +} + +static int slab_die_call(struct notifier_block *self, + unsigned long val, void *data) +{ + struct kmem_cache *s; + + list_for_each_entry(s, &slab_caches, list) + validate_slab_cache(s); + + return NOTIFY_OK; +} + +struct notifier_block slab_die_notifier = { + .notifier_call = slab_die_call, +}; + +static void __init setup_slab_die(void) +{ + register_die_notifier(&slab_die_notifier); +} /* From mm */ int sprint_symbol(char *buffer, unsigned long address)