--- include/linux/slub_def.h | 8 +++-- mm/slub.c | 68 +++++++++++++++++++++++++++++------------------ 2 files changed, 47 insertions(+), 29 deletions(-) Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-20 18:07:07.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-20 18:46:15.000000000 -0700 @@ -52,7 +52,6 @@ struct kmem_cache { #ifdef CONFIG_NUMA int defrag_ratio; - struct kmem_cache_node *node[MAX_NUMNODES]; #endif struct page *cpu_slab[NR_CPUS]; }; @@ -68,7 +67,10 @@ struct kmem_cache { * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +extern struct kmalloc_cache { + struct kmem_cache_node nodes[MAX_NUMNODES]; + struct kmem_cache cache; +} kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; /* * Determine the kmalloc array index given the object size. @@ -129,7 +131,7 @@ static inline struct kmem_cache *kmalloc extern void __kmalloc_size_too_large(void); __kmalloc_size_too_large(); } - return &kmalloc_caches[index]; + return &kmalloc_caches[index].cache; } #ifdef CONFIG_ZONE_DMA Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-20 18:07:28.000000000 -0700 +++ slub/mm/slub.c 2007-05-20 19:28:59.000000000 -0700 @@ -220,7 +220,7 @@ static inline void ClearSlabDebug(struct #define cache_line_size() L1_CACHE_BYTES #endif -static int kmem_size = sizeof(struct kmem_cache); +static int kmem_size = sizeof(struct kmalloc_cache); #ifdef CONFIG_SMP static struct notifier_block slab_notifier; @@ -268,10 +268,10 @@ int slab_is_available(void) return slab_state >= UP; } -static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) +struct kmem_cache_node *get_node(struct kmem_cache *s, int node) { #ifdef CONFIG_NUMA - return s->node[node]; + return ((struct kmem_cache_node **)s)[-(node + 1)]; #else return &s->local_node; #endif @@ -1807,6 +1807,12 @@ static void init_kmem_cache_node(struct } #ifdef CONFIG_NUMA +static inline void set_node(struct kmem_cache *s, int node, + struct kmem_cache_node *n) +{ + ((struct kmem_cache_node **)s)[-1-node] = n; +} + /* * No kmalloc_node yet so do it by hand. We know that this is the first * slab on the node for this slabcache. There are no concurrent accesses @@ -1820,20 +1826,21 @@ static struct kmem_cache_node * __init e { struct page *page; struct kmem_cache_node *n; + struct kmem_cache *s = &kmalloc_caches[0].cache; - BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); + BUG_ON(s->size < sizeof(struct kmem_cache_node)); - page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); + page = new_slab(s, gfpflags | GFP_THISNODE, node); /* new_slab() disables interupts */ local_irq_enable(); BUG_ON(!page); n = page->freelist; BUG_ON(!n); - page->freelist = get_freepointer(kmalloc_caches, n); + page->freelist = get_freepointer(s, n); page->inuse++; - kmalloc_caches->node[node] = n; - setup_object_debug(kmalloc_caches, page, n); + set_node(s, node, n); + setup_object_debug(s, page, n); init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); add_partial(n, page); @@ -1845,10 +1852,10 @@ static void free_kmem_cache_nodes(struct int node; for_each_online_node(node) { - struct kmem_cache_node *n = s->node[node]; + struct kmem_cache_node *n = get_node(s, node); if (n && n != &s->local_node) - kmem_cache_free(kmalloc_caches, n); - s->node[node] = NULL; + kmem_cache_free(&kmalloc_caches[0].cache, n); + set_node(s, node, NULL); } } @@ -1873,7 +1880,7 @@ static int init_kmem_cache_nodes(struct node); continue; } - n = kmem_cache_alloc_node(kmalloc_caches, + n = kmem_cache_alloc_node(&kmalloc_caches[0].cache, gfpflags, node); if (!n) { @@ -1882,7 +1889,7 @@ static int init_kmem_cache_nodes(struct } } - s->node[node] = n; + set_node(s, node, n); init_kmem_cache_node(n); } return 1; @@ -2138,11 +2145,15 @@ void kmem_cache_destroy(struct kmem_cach down_write(&slub_lock); s->refcount--; if (!s->refcount) { + void *v = s; + + v -= nr_node_ids * sizeof(struct kmem_cache_node *); + list_del(&s->list); if (kmem_cache_close(s)) WARN_ON(1); sysfs_slab_remove(s); - kfree(s); + kfree(v); } up_write(&slub_lock); } @@ -2152,7 +2163,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; +struct kmalloc_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA @@ -2239,7 +2250,8 @@ static struct kmem_cache *get_slab(size_ return s; /* Dynamically create dma cache */ - x = kmalloc(kmem_size, flags & ~SLUB_DMA); + x = kmalloc(kmem_size, flags & ~SLUB_DMA) + + nr_node_ids * sizeof(struct kmem_cache *); if (!x) panic("Unable to allocate memory for dma cache\n"); @@ -2259,7 +2271,7 @@ static struct kmem_cache *get_slab(size_ return s; } #endif - return &kmalloc_caches[index]; + return &kmalloc_caches[index].cache; } void *__kmalloc(size_t size, gfp_t flags) @@ -2713,7 +2725,7 @@ void __init kmem_cache_init(void) * struct kmem_cache_node's. There is special bootstrap code in * kmem_cache_open for slab_state == DOWN. */ - create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", + create_kmalloc_cache(&kmalloc_caches[0].cache, "kmem_cache_node", sizeof(struct kmem_cache_node), GFP_KERNEL); #endif @@ -2721,20 +2733,20 @@ void __init kmem_cache_init(void) slab_state = PARTIAL; /* Caches that are not of the two-to-the-power-of size */ - create_kmalloc_cache(&kmalloc_caches[1], + create_kmalloc_cache(&kmalloc_caches[1].cache, "kmalloc-96", 96, GFP_KERNEL); - create_kmalloc_cache(&kmalloc_caches[2], + create_kmalloc_cache(&kmalloc_caches[2].cache, "kmalloc-192", 192, GFP_KERNEL); for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - create_kmalloc_cache(&kmalloc_caches[i], + create_kmalloc_cache(&kmalloc_caches[i].cache, "kmalloc", 1 << i, GFP_KERNEL); slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - kmalloc_caches[i]. name = + kmalloc_caches[i].cache.name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); #ifdef CONFIG_SMP @@ -2742,7 +2754,8 @@ void __init kmem_cache_init(void) #endif kmem_size = offsetof(struct kmem_cache, cpu_slab) + - nr_cpu_ids * sizeof(struct page *); + nr_cpu_ids * sizeof(struct page *) + + nr_node_ids * sizeof(struct kmem_cache_node *); printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," " Processors=%d, Nodes=%d\n", @@ -2839,17 +2852,20 @@ struct kmem_cache *kmem_cache_create(con if (sysfs_slab_alias(s, name)) goto err; } else { - s = kmalloc(kmem_size, GFP_KERNEL); + void *v = kmalloc(kmem_size, GFP_KERNEL); + + s = v + nr_node_ids * sizeof(struct kmem_cache_node *); + if (s && kmem_cache_open(s, GFP_KERNEL, name, size, align, flags, ctor, ops)) { if (sysfs_slab_add(s)) { - kfree(s); + kfree(v); goto err; } list_add(&s->list, &slab_caches); raise_kswapd_order(s->order); } else - kfree(s); + kfree(v); } up_write(&slub_lock); return s;