Index: linux-2.6.18-rc1-mm1/mm/slab.c
===================================================================
--- linux-2.6.18-rc1-mm1.orig/mm/slab.c	2006-07-10 10:33:44.362745604 -0700
+++ linux-2.6.18-rc1-mm1/mm/slab.c	2006-07-11 21:38:35.087397035 -0700
@@ -1030,16 +1030,25 @@ static inline int cache_free_alien(struc
 	int nodeid = slabp->nodeid;
 	struct kmem_list3 *l3;
 	struct array_cache *alien = NULL;
+	int node = numa_node_id();
 
 	/*
 	 * Make sure we are not freeing a object from another node to the array
 	 * cache on this cpu.
 	 */
-	if (likely(slabp->nodeid == numa_node_id()))
+	if (likely(slabp->nodeid == node))
 		return 0;
 
-	l3 = cachep->nodelists[numa_node_id()];
+	l3 = cachep->nodelists[node];
+	if (unlikely(!l3)) {
+		create_node(cachep, node, flags);
+		l3 = cachep->nodelists[node];
+	}
 	STATS_INC_NODEFREES(cachep);
+
+	if (unlikely(!l3->alien || !l3->alien[nodeid]))
+		create_alien(cachep, node, flags, nodeid);
+
 	if (l3->alien && l3->alien[nodeid]) {
 		alien = l3->alien[nodeid];
 		spin_lock_nested(&alien->lock, nesting);
@@ -2797,11 +2806,41 @@ bad:
 #define check_slabp(x,y) do { } while(0)
 #endif
 
+static void create_node(struct kmem_cache *cache, int node, gfp_t flags)
+{
+	struct kmem_list3 *l3;
+
+	l3 = kmalloc_node(sizeof(struct kmem_list3), flags, node);
+	if (!l3)
+		return;
+	kmem_list3_init(l3);
+	l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
+		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+
+	/*
+	 * The l3s don't come and go as CPUs come and
+	 * go.  cache_chain_mutex is sufficient
+	 * protection here.
+	 */
+	if (cmpxchg(cachep->nodelists + node, NULL, l3))
+		/* Someone else beat us to it */
+		kfree(l3);
+}
+
+static void create_shared(struct kmem_cache *cache, int node, gfp_t flags)
+{
+}
+
+static void create_alien(struct kmem_cache *cache, int node, gfp_t flags, int rnode)
+{
+}
+
 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
 {
 	int batchcount;
 	struct kmem_list3 *l3;
 	struct array_cache *ac;
+	int node;
 
 	check_irq_off();
 	ac = cpu_cache_get(cachep);
@@ -2815,7 +2854,13 @@ retry:
 		 */
 		batchcount = BATCHREFILL_LIMIT;
 	}
-	l3 = cachep->nodelists[numa_node_id()];
+
+	node = numa_node_id();
+	l3 = cachep->nodelists[node];
+	if (!l3) {
+		create_node(cachep, node, flags);
+		l3 = cachep->nodelists[node];
+	}
 
 	BUG_ON(ac->avail > 0 || !l3);
 	spin_lock(&l3->list_lock);