--- mm/slub.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-06 08:03:51.000000000 -0700 +++ slub/mm/slub.c 2007-05-06 08:17:40.000000000 -0700 @@ -136,14 +136,14 @@ * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ -#define MIN_PARTIAL 2 +#define MIN_PARTIAL 20 /* * Maximum number of desirable partial slabs. * The existence of more partial slabs makes kmem_cache_shrink * sort the partial list by the number of objects in the. */ -#define MAX_PARTIAL 10 +#define MAX_PARTIAL 30 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) @@ -1133,8 +1133,12 @@ static struct page *get_partial(struct k { struct page *page; int searchnode = (node == -1) ? numa_node_id() : node; + struct kmem_cache_node *n = get_node(s, searchnode); - page = get_partial_node(get_node(s, searchnode)); + if (n->nr_partial < MIN_PARTIAL) + return NULL; + + page = get_partial_node(n); if (page || (flags & __GFP_THISNODE)) return page; @@ -1155,7 +1159,7 @@ static void putback_slab(struct kmem_cac if (page->inuse) { if (page->freelist) - add_partial(n, page); + add_partial_tail(n, page); else if (DebugSlab(page) && (s->flags & SLAB_STORE_USER)) add_full(n, page); slab_unlock(page); @@ -1170,7 +1174,7 @@ static void putback_slab(struct kmem_cac * partial list stays small. kmem_cache_shrink can * reclaim empty slabs from the partial list. */ - add_partial_tail(n, page); + add_partial(n, page); slab_unlock(page); } else { slab_unlock(page);