Index: linux-2.6.21-rc1/mm/slub.c =================================================================== --- linux-2.6.21-rc1.orig/mm/slub.c 2007-02-27 23:27:05.000000000 -0800 +++ linux-2.6.21-rc1/mm/slub.c 2007-02-28 00:34:58.000000000 -0800 @@ -206,16 +206,18 @@ static void init_tracking(struct kmem_ca static void print_trailer(struct kmem_cache *s, u8 *p) { - unsigned int off = s->inuse; + unsigned int off; - if (off == s->offset) - off += sizeof(void *); + if (s->offset) + off = s->offset + sizeof(void *); + else + off = s->inuse; if (s->flags & SLAB_RED_ZONE) print_section("Redzone", p + s->objsize, s->inuse - s->objsize); - printk(KERN_ERR "FreePointer %p: %p\n", p + s->inuse, + printk(KERN_ERR "FreePointer %p: %p\n", p + s->offset, get_freepointer(s, p)); if (s->flags & SLAB_STORE_USER) { @@ -238,12 +240,10 @@ static void object_err(struct kmem_cache reason, s->name, object, page); printk(KERN_ERR " offset=%ld flags=%04lx inuse=%d freelist=%p\n", object - addr, page->flags, page->inuse, page->freelist); + if (object > addr + 16) + print_section("Bytes b4", object - 16, 16); print_section("Object", object, s->objsize); print_trailer(s, object); - if (object > addr) { - printk(KERN_ERR "Prior object trailer:\n"); - print_trailer(s, object - s->size); - } dump_stack(); } @@ -452,7 +452,7 @@ static void check_free_chain(struct kmem on_freelist(s, page, NULL); } -static void alloc_object_checks(struct kmem_cache *s, struct page *page, +static int alloc_object_checks(struct kmem_cache *s, struct page *page, void *object) { if (!check_slab(s, page)) @@ -470,7 +470,7 @@ static void alloc_object_checks(struct k } if (!object) - return; + return 1; if (!check_object(s, page, object, 0)) goto bad; @@ -483,13 +483,14 @@ static void alloc_object_checks(struct k page->freelist); dump_stack(); } - return; + return 1; dump: dump_stack(); bad: /* Mark slab full */ page->inuse = s->objects; page->freelist = NULL; + return 0; } static int free_object_checks(struct kmem_cache *s, struct page *page, void *object) @@ -963,7 +964,7 @@ static void flush_all(struct kmem_cache } #endif -static __always_inline void *__slab_alloc(struct kmem_cache *s, +static __always_inline void *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node) { struct page *page; @@ -980,12 +981,13 @@ static __always_inline void *__slab_allo slab_lock(page); if (unlikely(node != -1 && page_to_nid(page) != node)) goto another_slab; + if (unlikely(!page->freelist)) + goto another_slab; redo: object = page->freelist; - if (unlikely(!object)) - goto another_slab; if (unlikely(PageError(page))) { - alloc_object_checks(s, page, object); + if (!alloc_object_checks(s, page, object)) + goto another_slab; if (s->flags & SLAB_STORE_USER) set_tracking(s, object, 0); } @@ -1018,15 +1020,15 @@ new_slab: slab_lock(page); gotpage: - if (s->cpu_slab[cpu]) { + if (unlikely(s->cpu_slab[cpu])) { slab_unlock(page); discard_slab(s, page); page = s->cpu_slab[cpu]; slab_lock(page); + goto redo; + } - } else - s->cpu_slab[cpu] = page; - + s->cpu_slab[cpu] = page; SetPageActive(page); #ifdef CONFIG_SMP @@ -1040,14 +1042,14 @@ gotpage: void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - return __slab_alloc(s, gfpflags, -1); + return slab_alloc(s, gfpflags, -1); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - return __slab_alloc(s, gfpflags, node); + return slab_alloc(s, gfpflags, node); } EXPORT_SYMBOL(kmem_cache_alloc_node); #endif @@ -1325,7 +1327,7 @@ int kmem_cache_open(struct kmem_cache *s flags &= ~SLAB_POISON; } - s->name = kstrdup(name, gfpflags); + s->name = name; s->ctor = ctor; s->dtor = dtor; s->objsize = size; @@ -1773,7 +1775,7 @@ static struct kmem_cache *kmem_cache_dup char *x; atomic_inc(&s->refcount); - +#if 0 down_write(&slabstat_sem); if (!s->aliases) s->aliases = kstrdup(name, flags); @@ -1787,6 +1789,7 @@ static struct kmem_cache *kmem_cache_dup s->aliases = x; } up_write(&slabstat_sem); +#endif return s; } @@ -1859,8 +1862,8 @@ struct kmem_cache *kmem_cache_create(con (NR_CPUS - nr_cpu_ids) * sizeof(struct page *); s = kmalloc(kmem_size, GFP_KERNEL); - if (s && kmem_cache_open(s, GFP_KERNEL, name, size, align, - flags, ctor, dtor)) + if (s && kmem_cache_open(s, GFP_KERNEL, kstrdup(name, GFP_KERNEL), + size, align, flags, ctor, dtor)) return s; kfree(s); return NULL;