Index: linux-2.6.21-rc5-mm2/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc5-mm2.orig/include/linux/slub_def.h 2007-03-30 14:46:07.000000000 -0700 +++ linux-2.6.21-rc5-mm2/include/linux/slub_def.h 2007-03-30 15:01:05.000000000 -0700 @@ -28,8 +28,11 @@ struct kmem_cache { int objsize; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ atomic_t cpu_slabs; /* != 0 -> flusher scheduled. */ - int defrag_ratio; unsigned int order; +#ifdef CONFIG_NUMA + int defrag_ratio; + struct kmem_cache_node **node; +#endif /* * Avoid an extra cache line for UP, SMP and for the node local to @@ -52,9 +55,6 @@ struct kmem_cache { struct delayed_work flush; struct mutex flushing; #endif -#ifdef CONFIG_NUMA - struct kmem_cache_node *node[MAX_NUMNODES]; -#endif struct page *cpu_slab[NR_CPUS]; }; Index: linux-2.6.21-rc5-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm2.orig/mm/slub.c 2007-03-30 14:43:58.000000000 -0700 +++ linux-2.6.21-rc5-mm2/mm/slub.c 2007-03-30 15:00:19.000000000 -0700 @@ -141,10 +141,10 @@ static void sysfs_slab_remove(struct kme struct kmem_cache_node *get_node(struct kmem_cache *s, int node) { #ifdef CONFIG_NUMA - return s->node[node]; -#else - return &s->local_node; + if (s->node) + return s->node[node]; #endif + return &s->local_node; } /* @@ -1413,12 +1413,33 @@ static int init_kmem_cache_nodes(struct #ifdef CONFIG_NUMA int node; int local_node; + int node_array_size = sizeof(struct kmem_cache_node *) * nr_node_ids; if (slab_state >= UP) local_node = page_to_nid(virt_to_page(s)); else local_node = 0; + if (nr_node_ids > 1) { + if (slab_state > PARTIAL) + s->node = kmalloc(node_array_size, GFP_KERNEL); + else { + /* Hmmm... No kmalloc cache yet so allocate pages */ + struct page *page; + + page = alloc_pages(GFP_KERNEL, + get_order(node_array_size)); + if (!page) + return 0; + + s->node = page_address(page); + } + if (!s->node) + return 0; + + memset(s->node, 0, node_array_size); + } + for_each_online_node(node) { struct kmem_cache_node *n;