Index: linux-2.6.21-rc3-mm2/lib/Kconfig.debug =================================================================== --- linux-2.6.21-rc3-mm2.orig/lib/Kconfig.debug 2007-03-16 02:15:02.000000000 -0700 +++ linux-2.6.21-rc3-mm2/lib/Kconfig.debug 2007-03-16 02:40:11.000000000 -0700 @@ -157,7 +157,8 @@ config TIMER_STATS config DEBUG_SLAB bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB + default y if SLUB + depends on (DEBUG_KERNEL && SLAB) || SLUB help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed Index: linux-2.6.21-rc3-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc3-mm2.orig/mm/slub.c 2007-03-16 02:36:23.000000000 -0700 +++ linux-2.6.21-rc3-mm2/mm/slub.c 2007-03-19 12:33:19.000000000 -0700 @@ -2304,3 +2304,29 @@ static int __init cpucache_init(void) __initcall(cpucache_init); #endif +/* + * These are not as efficient as kmalloc for the non debug case. + * We do not have the page struct available so we have to touch one + * cacheline in struct kmem_cache to check slab flags. + */ +void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) +{ + struct kmem_cache *s = get_slab(size, gfpflags); + void *object = kmem_cache_alloc(s, gfpflags); + + if (object && (s->flags & SLAB_STORE_USER)) + set_track(s, object, 0, caller); + return object; +} + +void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, + int node, void *caller) +{ + struct kmem_cache *s = get_slab(size, gfpflags); + void *object = kmem_cache_alloc_node(s, gfpflags, node); + + if (object && (s->flags & SLAB_STORE_USER)) + set_track(s, object, 0, caller); + return object; +} +