---
 mm/slub.c |   11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2007-11-06 16:19:36.000000000 -0800
+++ linux-2.6/mm/slub.c	2007-11-06 16:26:03.000000000 -0800
@@ -144,14 +144,14 @@
  * Mininum number of partial slabs. These will be left on the partial
  * lists even if they are empty. kmem_cache_shrink may reclaim them.
  */
-#define MIN_PARTIAL 2
+#define MIN_PARTIAL 5
 
 /*
  * Maximum number of desirable partial slabs.
- * The existence of more partial slabs makes kmem_cache_shrink
- * sort the partial list by the number of objects in the.
+ * More slabs cause kmem_cache_shrink to sort the slabs by objects
+ * and triggers slab defragmentation.
  */
-#define MAX_PARTIAL 10
+#define MAX_PARTIAL 20
 
 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
 				SLAB_POISON | SLAB_STORE_USER)
@@ -3019,7 +3019,6 @@ static unsigned long __kmem_cache_shrink
 	struct page *page, *page2;
 	LIST_HEAD(zaplist);
 	int freed = 0;
-	int inuse;
 	unsigned long state;
 
 	spin_lock_irqsave(&n->list_lock, flags);
@@ -3100,10 +3099,10 @@ static unsigned long __kmem_cache_defrag
 	 * then no defragmentation is necessary.
 	 */
 	ratio = (objects_in_full_slabs + count_partial(n)) * 100 / capacity;
+	printk(KERN_INFO "defrag: %s ratio=%ld defrag_ratio=%d\n", s->name, ratio, s->defrag_ratio);
 	if (ratio > s->defrag_ratio)
 		return 0;
 
-	printk(KERN_INFO "defrag ratio=%d defrag_ratio=%d\n", ratio, s->defrag_ratio);
 	return __kmem_cache_shrink(s, n) << s->order;
 }