Index: linux-2.6.21-rc1/mm/slub.c =================================================================== --- linux-2.6.21-rc1.orig/mm/slub.c 2007-02-26 05:46:09.000000000 -0800 +++ linux-2.6.21-rc1/mm/slub.c 2007-02-26 06:57:44.000000000 -0800 @@ -42,36 +42,20 @@ * to push back per cpu slabs if they are unused * for a longer time period. * - * PageError Only a single object exists per slab. Objects are not - * cached instead we use the page allocator for - * object allocation and freeing. + * PageError Slab requires special handling due to debut + * options set or a single page slab */ /* - * Flags from the regular SLAB that we have not implemented: + * Flags from the regular SLAB that SLUB does not support: */ -#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_FREE | SLAB_DEBUG_INITIAL | \ +#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL | \ SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) /* - * Enabling SLUB_DEBUG results in internal consistency checks. - */ -#define SLUB_DEBUG - -/* - * SLUB_DEBUG_KFREE enables checking for double frees. In order to do this - * we have to look through the free lists of object in a slab on kfree which - * may slightly reduce performance. - */ -#ifdef SLUB_DEBUG -#define SLUB_DEBUG_KFREE -#endif - -/* * Set of flags that will prevent slab merging */ -#define SLUB_NEVER_MERGE (SLAB_DEBUG_FREE | SLAB_DEBUG_INITIAL | \ - SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ +#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_DESTROY_BY_RCU | SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA) #ifndef ARCH_KMALLOC_MINALIGN @@ -358,7 +342,6 @@ return get_any_partial(s, flags); } -#if defined(SLUB_DEBUG) || defined(SLUB_DEBUG_KFREE) static int check_valid_pointer(struct kmem_cache *s, struct page *page, void *object, void *origin) { @@ -382,9 +365,6 @@ return 1; } -/* - * Debugging checks - */ static void check_slab(struct page *page) { if (!PageSlab(page)) { @@ -406,7 +386,7 @@ void **object = page->freelist; void *origin = &page->lru; - if (PageError(page)) + if (s->objects == 1) return 0; check_slab(page); @@ -437,13 +417,9 @@ static void check_free_chain(struct kmem_cache *s, struct page *page) { - on_freelist(s, page, NULL); -} -#else -static void check_free_chain(struct kmem_cache *s, struct page *page) -{ + if (s->flags & SLAB_DEBUG_FREE) + on_freelist(s, page, NULL); } -#endif static void discard_slab(struct kmem_cache *s, struct page *page) { @@ -479,6 +455,9 @@ page->offset = s->offset; page->slab = s; page->flags |= 1 << PG_slab; + if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | + SLAB_STORE_USER) || s->objects == 1) + page->flags |= 1 << PG_error; if (s->objects > 1) { void *start = page_address(page); @@ -495,8 +474,7 @@ page->freelist = start; page->inuse = 0; check_free_chain(s, page); - } else - page->flags |= 1 << PG_error; + } out: if (flags & __GFP_WAIT) @@ -634,7 +612,8 @@ goto new_slab; slab_lock(page); - check_free_chain(s, page); + if (unlikely(PageError(page))) + check_free_chain(s, page); if (unlikely(!page->freelist)) goto another_slab; @@ -684,10 +663,9 @@ s->cpu_slab[cpu] = page; SetPageActive(page); - check_free_chain(s, page); #ifdef CONFIG_SMP - if (keventd_up() && !atomic_read(&s->cpu_slabs)) { + if (!atomic_read(&s->cpu_slabs) && keventd_up()) { atomic_inc(&s->cpu_slabs); schedule_delayed_work(&s->flush, 30 * HZ); } @@ -727,22 +705,23 @@ if (!s) s = page->slab; -#ifdef SLUB_DEBUG - if (unlikely(s != page->slab)) - goto slab_mismatch; - if (unlikely(!check_valid_pointer(s, page, object, NULL))) - goto dumpret; -#endif - local_irq_save(flags); - if (unlikely(PageError(page))) - goto single_object_slab; + + if (unlikely(PageError(page))) { + if (unlikely(s != page->slab)) + goto slab_mismatch; + if (s->objects == 1) + goto single_object_slab; + if (unlikely(!check_valid_pointer(s, page, object, NULL))) + goto dumpret; + } + slab_lock(page); -#ifdef SLUB_DEBUG_KFREE - if (on_freelist(s, page, object)) - goto double_free; -#endif + if (unlikely(PageError(page))) { + if (on_freelist(s, page, object)) + goto double_free; + } prior = object[page->offset] = page->freelist; page->freelist = object; @@ -771,18 +750,16 @@ slab_unlock(page); single_object_slab: discard_slab(s, page); +out: local_irq_restore(flags); return; -#ifdef SLUB_DEBUG_KFREE double_free: printk(KERN_CRIT "slab_free %s: object %p already free.\n", s->name, object); dump_stack(); goto out_unlock; -#endif -#ifdef SLUB_DEBUG slab_mismatch: if (!PageSlab(page)) { printk(KERN_CRIT "slab_free %s size %d: attempt to free " @@ -807,8 +784,7 @@ dump_stack(); printk(KERN_CRIT "***** Trying to continue by not " "freeing object.\n"); - return; -#endif + goto out; } EXPORT_SYMBOL(kmem_cache_free); @@ -861,6 +837,12 @@ */ static int slub_nomerge = 0; +/* + * Debug settings + */ +static int slub_debug = 0; +static char *slub_debug_slabs = NULL; + static int calculate_order(int size) { int order; @@ -988,6 +970,13 @@ { BUG_ON(flags & SLUB_UNIMPLEMENTED); memset(s, 0, KMEM_CACHE_SIZE); + + /* Enable debugging if selected on the kernel commandline */ + if (slub_debug && (!slub_debug_slabs || + strncmp(slub_debug_slabs, name, + strlen(slub_debug_slabs) == 0))) + flags |= slub_debug; + s->name = name; s->ctor = ctor; s->dtor = dtor; @@ -1232,6 +1221,31 @@ __setup("slub_nomerge", setup_slub_nomerge); +static int __init setup_slub_debug(char *str) +{ + printk(KERN_CRIT "setup_slub_debug=%s\n", str); + + if (!str || *str == ',') + slub_debug = SLAB_DEBUG_FREE | SLAB_RED_ZONE | + SLAB_STORE_USER | SLAB_POISON; + else + for( ;str && *str != ','; str++) + switch (*str) { + case 'f' : case 'F' : slub_debug |= SLAB_DEBUG_FREE;break; + case 'z' : case 'Z' : slub_debug |= SLAB_RED_ZONE;break; + case 'p' : case 'P' : slub_debug |= SLAB_POISON;break; + case 'u' : case 'U' : slub_debug |= SLAB_STORE_USER;break; + default: + printk(KERN_CRIT "slub_debug option '%c' unknown. skipped\n",*str); + } + + if (*str == ',') + slub_debug_slabs = str + 1; + return 1; +} + +__setup("slub_debug", setup_slub_debug); + static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, const char *name, int size, gfp_t flags) { @@ -1582,7 +1596,7 @@ unsigned long cpu_slabs; unsigned long partial_slabs; unsigned long objects; - unsigned char options[13]; + unsigned char options[14]; char *d = options; char *x; unsigned long nodes[nr_node_ids]; @@ -1602,7 +1616,7 @@ if (s->flags & SLAB_RECLAIM_ACCOUNT) *d++ = 'r'; if (s->flags & SLAB_PANIC) - *d++ = 'P'; + *d++ = 'p'; if (s->flags & SLAB_HWCACHE_ALIGN) *d++ = 'a'; if (s->flags & SLAB_MUST_HWCACHE_ALIGN) @@ -1613,6 +1627,10 @@ *d++ = 'I'; if (s->flags & SLAB_STORE_USER) *d++ = 'U'; + if (s->flags & SLAB_RED_ZONE) + *d++ = 'Z'; + if (s->flags & SLAB_POISON) + *d++ = 'P'; *d = 0;