--- mm/slub.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-06-02 16:05:50.000000000 -0700 +++ slub/mm/slub.c 2007-06-02 16:07:18.000000000 -0700 @@ -1850,6 +1850,8 @@ static void init_kmem_cache_node(struct } #ifdef CONFIG_NUMA +static struct kmem_cache node_cache; + /* * No kmalloc_node yet so do it by hand. We know that this is the first * slab on the node for this slabcache. There are no concurrent accesses @@ -1864,19 +1866,19 @@ static struct kmem_cache_node * __init e struct page *page; struct kmem_cache_node *n; - BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); + BUG_ON(node_cache.size < sizeof(struct kmem_cache_node)); - page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); + page = new_slab(&node_cache, gfpflags | GFP_THISNODE, node); /* new_slab() disables interupts */ local_irq_enable(); BUG_ON(!page); n = page->freelist; BUG_ON(!n); - page->freelist = get_freepointer(kmalloc_caches, n); + page->freelist = get_freepointer(&node_cache, n); page->inuse++; - kmalloc_caches->node[node] = n; - setup_object_debug(kmalloc_caches, page, n); + node_cache.node[node] = n; + setup_object_debug(&node_cache, page, n); init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); add_partial(n, page); @@ -1889,8 +1891,9 @@ static void free_kmem_cache_nodes(struct for_each_online_node(node) { struct kmem_cache_node *n = s->node[node]; + if (n && n != &s->local_node) - kmem_cache_free(kmalloc_caches, n); + kmem_cache_free(&node_cache, n); s->node[node] = NULL; } } @@ -1916,7 +1919,7 @@ static int init_kmem_cache_nodes(struct node); continue; } - n = kmem_cache_alloc_node(kmalloc_caches, + n = kmem_cache_alloc_node(&node_cache, gfpflags, node); if (!n) { @@ -2516,9 +2519,9 @@ void __init kmem_cache_init(void) * struct kmem_cache_node's. There is special bootstrap code in * kmem_cache_open for slab_state == DOWN. */ - create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", + create_kmalloc_cache(&node_cache, "kmem_cache_node", sizeof(struct kmem_cache_node), GFP_KERNEL); - kmalloc_caches[0].refcount = -1; + node_cache.refcount = -1; #endif /* Able to allocate the per node structures */