Index: linux-2.6.21-rc1/mm/slub.c =================================================================== --- linux-2.6.21-rc1.orig/mm/slub.c 2007-02-26 15:59:02.000000000 -0800 +++ linux-2.6.21-rc1/mm/slub.c 2007-02-26 21:10:02.000000000 -0800 @@ -69,8 +69,8 @@ #define ARCH_SLAB_MINALIGN sizeof(void *) #endif -#define KMEM_CACHE_SIZE (sizeof(struct kmem_cache) - \ - (MAX_NUMNODES - nr_cpu_ids) * sizeof(struct page *)) +static int kmem_size = sizeof(struct kmem_cache); + /* * Forward declarations */ @@ -279,10 +279,11 @@ static int check_object(struct kmem_cach u8 *p = object; if (s->flags & SLAB_RED_ZONE) - if (!check_bytes(p + s->objsize, active ? RED_ACTIVE : RED_INACTIVE, + if (!check_bytes(p + s->objsize, + active ? RED_ACTIVE : RED_INACTIVE, s->inuse - s->objsize)) { object_err(s, page, object, - active ? "Redzone Active" : "Redzone Inactive"); + active ? "Redzone Active" : "Redzone Inactive"); return 0; } if ((s->flags & SLAB_POISON) && !active) @@ -294,7 +295,6 @@ static int check_object(struct kmem_cach return 1; } - /* * Locking for each individual slab using the pagelock */ @@ -1043,7 +1043,12 @@ int init_kmem_cache_nodes(struct kmem_ca { #ifdef CONFIG_NUMA int node; - int local_node = page_to_nid(virt_to_page(s)); + int local_node; + + if (slab_state == UP) + local_node = page_to_nid(virt_to_page(s)); + else + local_node = 0; for_each_online_node(node) { struct kmem_cache_node *n; @@ -1097,7 +1102,8 @@ int kmem_cache_open(struct kmem_cache *s void (*dtor)(void *, struct kmem_cache *, unsigned long)) { BUG_ON(flags & SLUB_UNIMPLEMENTED); - memset(s, 0, KMEM_CACHE_SIZE); + + memset(s, 0, kmem_size); /* Enable debugging if selected on the kernel commandline */ if (slub_debug && (!slub_debug_slabs || @@ -1105,6 +1111,15 @@ int kmem_cache_open(struct kmem_cache *s strlen(slub_debug_slabs)) == 0)) flags |= slub_debug; + if ((flags & SLAB_POISON) && ((flags & SLAB_DESTROY_BY_RCU) || + s->ctor || s->dtor)) { +// if (slub_debug & SLAB_POISON) + printk(KERN_WARNING "SLUB %s: Reset SLAB_POISON " + "because de/constructor exists.\n", + s->name); + } + flags &= ~SLAB_POISON; + s->name = name; s->ctor = ctor; s->dtor = dtor; @@ -1418,7 +1433,7 @@ static struct kmem_cache *get_slab(size_ flags &= ~__GFP_DMA; /* Dynamically create dma cache */ - x = kmalloc(KMEM_CACHE_SIZE, flags); + x = kmalloc(kmem_size, flags); if (!x) panic("Unable to allocate memory for dma cache\n"); @@ -1438,7 +1453,6 @@ static struct kmem_cache *get_slab(size_ text = kasprintf(flags, "kmalloc_dma-%d", (unsigned int)realsize); s = create_kmalloc_cache(x, text, realsize, flags); - kfree(text); kmalloc_caches_dma[index] = s; return s; } @@ -1537,11 +1551,11 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = 0; i <= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW; i++) { - char *name = kasprintf(GFP_KERNEL, "kmalloc-%d", - kmalloc_caches[i].size); + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + char *name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); + BUG_ON(!name); - kmalloc_caches[i].name = name; + kmalloc_caches[i - KMALLOC_SHIFT_LOW].name = name; }; #ifdef CONFIG_SMP @@ -1616,7 +1630,11 @@ struct kmem_cache *kmem_cache_create(con return kmem_cache_dup(s, GFP_KERNEL, name); } - s = kmalloc(KMEM_CACHE_SIZE, GFP_KERNEL); + if (nr_cpu_ids) + kmem_size = sizeof(struct kmem_cache) - + (NR_CPUS - nr_cpu_ids) * sizeof(struct page *); + + s = kmalloc(kmem_size, GFP_KERNEL); if (s && kmem_cache_open(s, GFP_KERNEL, name, size, align, flags, ctor, dtor)) return s; @@ -1641,21 +1659,6 @@ EXPORT_SYMBOL(kmem_cache_zalloc); * Slab proc interface *******************************************************************/ -static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) -{ - struct list_head *h; - - down_read(&slabstat_sem); - list_for_each(h, &slab_caches) { - struct kmem_cache *s = - container_of(h, struct kmem_cache, list); - - func(s, cpu); - } - up_read(&slabstat_sem); -} - - static void register_slab(struct kmem_cache *s) { down_write(&slabstat_sem); @@ -1801,6 +1804,21 @@ struct seq_operations slabinfo_op = { #ifdef CONFIG_SMP +static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) +{ + struct list_head *h; + + down_read(&slabstat_sem); + list_for_each(h, &slab_caches) { + struct kmem_cache *s = + container_of(h, struct kmem_cache, list); + + func(s, cpu); + } + up_read(&slabstat_sem); +} + + /* * Use the cpu notifier to insure that the thresholds are recalculated * when necessary.