Index: linux-2.6.21-rc2/mm/slub.c =================================================================== --- linux-2.6.21-rc2.orig/mm/slub.c 2007-03-01 12:36:45.000000000 -0800 +++ linux-2.6.21-rc2/mm/slub.c 2007-03-01 13:33:26.000000000 -0800 @@ -674,7 +674,7 @@ static void __free_slab(struct kmem_cach { int pages = 1 << s->order; - if (unlikely(PageError(page))) { + if (unlikely(PageError(page) || s->dtor)) { void *start = page_address(page); void *end = start + (pages << PAGE_SHIFT); void *p; @@ -1300,6 +1300,7 @@ int init_kmem_cache_nodes(struct kmem_ca unsigned long flags; struct page *page; + BUG_ON(s->size < sizeof(struct kmem_cache_node)); local_irq_save(flags); page = new_slab(s, gfpflags, node); @@ -1806,9 +1807,13 @@ void __init kmem_cache_init(void) #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); #endif - printk("SLUB V3.1: General slabs=%d, HW alignment=%d\n", + if (nr_cpu_ids) /* Remove when nr_cpu_ids was fixed ! */ + kmem_size = offsetof(struct kmem_cache, cpu_slab) + + nr_cpu_ids * sizeof(struct page *); + + printk("SLUB V3.2: General Slabs=%d, HW alignment=%d, Processors=%d, Nodes=%d\n", KMALLOC_SHIFT_HIGH + KMALLOC_EXTRAS + 1 - KMALLOC_SHIFT_LOW, - L1_CACHE_BYTES); + L1_CACHE_BYTES, nr_cpu_ids, nr_node_ids); } static struct kmem_cache *kmem_cache_dup(struct kmem_cache *s, @@ -1895,12 +1900,6 @@ struct kmem_cache *kmem_cache_create(con name, (int)size, s->name, s->size); return kmem_cache_dup(s, GFP_KERNEL, name); } - - /* This needs to go elsewhere at some point */ - if (nr_cpu_ids) - kmem_size = sizeof(struct kmem_cache) - - (NR_CPUS - nr_cpu_ids) * sizeof(struct page *); - s = kmalloc(kmem_size, GFP_KERNEL); if (s && kmem_cache_open(s, GFP_KERNEL, kstrdup(name, GFP_KERNEL), size, align, flags, ctor, dtor))