Index: linux-2.6.21-rc5/mm/slub.c =================================================================== --- linux-2.6.21-rc5.orig/mm/slub.c 2007-03-26 19:53:44.000000000 -0700 +++ linux-2.6.21-rc5/mm/slub.c 2007-03-26 23:14:30.000000000 -0700 @@ -59,6 +59,25 @@ */ /* + * Issues still to be resolved: + * + * - Strange failures where the process context is corrupted on i386. + * + * - The per cpu array is updated for each new slab and and is a remote + * cacheline for most nodes. This could become a bouncing cacheline given + * enough frequent updates. There are 16 pointers in a cacheline.so at + * max 16 cpus could compete. Likely okay. + * + * - Support PAGE_ALLOC_DEBUG. Should be easy to do. + * + * - Support DEBUG_SLAB_LEAK. Trouble is we do not know where the full + * slabs are in SLUB. + * + * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of + * it. + */ + +/* * Flags from the regular SLAB that SLUB does not support: */ #define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL) @@ -1869,12 +1888,14 @@ size_t ksize(const void *object) BUG_ON(!page); s = page->slab; BUG_ON(!s); + /* * Debugging requires use of the padding between object * and whatever may come after it. */ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) return s->objsize; + /* * If we have the need to store the freelist pointer * back there or track user information then we can @@ -1882,6 +1903,7 @@ size_t ksize(const void *object) */ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) return s->inuse; + /* * Else we can use all the padding etc for the allocation */ @@ -2819,9 +2841,6 @@ static int sysfs_slab_add(struct kmem_ca static void sysfs_slab_remove(struct kmem_cache *s) { kobject_uevent(&s->kobj, KOBJ_REMOVE); - - /* Individual field destruct here */ - kobject_del(&s->kobj); }