Index: linux-2.6.19-mm1/include/linux/slub_def.h
===================================================================
--- linux-2.6.19-mm1.orig/include/linux/slub_def.h	2006-12-13 16:25:42.000000000 -0800
+++ linux-2.6.19-mm1/include/linux/slub_def.h	2006-12-14 10:08:15.000000000 -0800
@@ -16,12 +16,15 @@
 struct active_slab {
 	struct page *page;
 	struct kmem_cache *slab;
+	void **freelist;
+	int nr_free;
 	int referenced;
 #ifdef CONFIG_SMP
 	int flush_active;
 	struct delayed_work flush;
 #endif
 }  ____cacheline_aligned_in_smp;
+
 /*
  * Slab cache management.
  */
Index: linux-2.6.19-mm1/mm/slub.c
===================================================================
--- linux-2.6.19-mm1.orig/mm/slub.c	2006-12-13 16:25:42.000000000 -0800
+++ linux-2.6.19-mm1/mm/slub.c	2006-12-14 10:08:18.000000000 -0800
@@ -459,9 +459,36 @@
 static void __always_inline deactivate_slab(struct active_slab *a)
 {
 	struct page *page = a->page;
+	struct kmem_cache *s = a->slab;
 
+	if (a->nr_free) {
+		if (unlikely(page->freelist)) {
+			/*
+			 * Deal with the rare case where we have two
+			 * freelists.
+			 *
+			 * Merge the two freelists. The freelist in the
+			 * active slab comes first.
+			 */
+			void **freelist = page->freelist;
+			void **p;
+
+			page->freelist = a->freelist;
+
+			for (p = a->freelist; p[s->offset]; p = p[s->offset])
+				page->inuse--;
+
+			p[s->offset] = freelist;
+
+		} else {
+			page->freelist = a->freelist;
+			page->inuse -= a->nr_free;
+		}
+	}
 	a->page = NULL;
 	a->referenced = 0;
+	a->nr_free = 0;
+	a->freelist = NULL;
 	__ClearPageActive(page);
 
 	putback_slab(a->slab, page);
@@ -530,56 +557,45 @@
 	struct active_slab *a;
 	struct page *page;
 	void **object;
-	void *next_object;
 	unsigned long flags;
 
 	local_irq_save(flags);
 	a = ACTIVE_SLAB(s, smp_processor_id());
-	if (!a->page)
+	if (unlikely(!a->page))
 		goto new_slab;
 
+	if (likely(a->nr_free))
+		goto have_object;
+
 	slab_lock(a->page);
 	check_free_chain(s, a->page);
-	if (unlikely(!a->page->freelist))
-		goto another_slab;
-
-	if (unlikely(node != -1 && page_to_nid(a->page) != node))
-		goto another_slab;
-redo:
-	a->page->inuse++;
-	object = a->page->freelist;
-	a->page->freelist = next_object = object[a->page->offset];
-	a->referenced = 1;
-	slab_unlock(a->page);
-	local_irq_restore(flags);
-	return object;
+	if (a->page->freelist)
+		goto switch_freelist;
 
-another_slab:
-	deactivate_slab(a);
+	if (node != -1 && page_to_nid(a->page) != node)
+		deactivate_slab(a);
 
 new_slab:
 	page = get_partial(s, gfpflags, node);
-	if (page)
-		goto gotpage;
-
-	page = new_slab(s, flags, node);
 	if (!page) {
-		local_irq_restore(flags);
-		return NULL;
-	}
+		page = new_slab(s, flags, node);
+		if (!page) {
+			local_irq_restore(flags);
+			return NULL;
+		}
 
-	/*
-	 * There is no point in putting single object slabs
-	 * on an active list.
-	 */
-	if (unlikely(s->objects == 1)) {
-		local_irq_restore(flags);
-		return page_address(a->page);
-	}
+		/*
+		 * There is no point in putting single object slabs
+		 * on an active list.
+		 */
+		if (unlikely(s->objects == 1)) {
+			local_irq_restore(flags);
+			return page_address(page);
+		}
 
-	slab_lock(a->page);
+		slab_lock(page);
+	}
 
-gotpage:
 	if (a->page) {
 		slab_unlock(page);
 		discard_slab(s, page);
@@ -596,7 +612,22 @@
 		schedule_delayed_work(&a->flush, 2 * HZ);
 	}
 #endif
-	goto redo;
+
+switch_freelist:
+	a->freelist = a->page->freelist;
+	a->page->freelist = NULL;
+	a->nr_free = s->objects - a->page->inuse;
+	a->page->inuse += a->nr_free;
+	slab_unlock(a->page);
+
+have_object:
+	/* Fastpath */
+	object = a->freelist;
+	a->nr_free--;
+	a->referenced = 1;
+	a->freelist = object[a->page->offset];
+	local_irq_restore(flags);
+	return object;
 }
 
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
@@ -710,7 +741,6 @@
 	dump_stack();
 	printk(KERN_CRIT "***** Trying to continue by not "
 			"freeing object.\n");
-	return;
 #endif
 }
 EXPORT_SYMBOL(kmem_cache_free);
@@ -1064,7 +1094,7 @@
 	unregister_slab(s);
 
 #ifdef CONFIG_NUMA
-	for_each_cpu(cpu)
+	for_each_online_cpu(cpu)
 		kfree(ACTIVE_SLAB(s, cpu));
 #endif
 	return 0;