--- mm/slub.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-07 23:36:13.054981367 -0800 +++ linux-2.6/mm/slub.c 2008-02-07 23:36:14.379000835 -0800 @@ -179,14 +179,14 @@ static inline void ClearSlabDebug(struct * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ -#define MIN_PARTIAL 5 +#define MIN_PARTIAL 20 /* * Maximum number of desirable partial slabs. * The existence of more partial slabs makes kmem_cache_shrink * sort the partial list by the number of objects in the. */ -#define MAX_PARTIAL 10 +#define MAX_PARTIAL 40 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) @@ -246,6 +246,8 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; +static void add_partial(struct kmem_cache_node *n, + struct page *page, int tail); #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); @@ -1121,6 +1123,7 @@ static struct page *new_slab(struct kmem BUG_ON(flags & GFP_SLAB_BUG_MASK); +redo: page = allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); if (!page) @@ -1152,6 +1155,10 @@ static struct page *new_slab(struct kmem page->freelist = start; page->inuse = 0; + if (n && n->nr_partial < MIN_PARTIAL) { + add_partial(n, page, 1); + goto redo; + } out: return page; } @@ -1245,11 +1252,9 @@ static void add_partial(struct kmem_cach spin_unlock(&n->list_lock); } -static void remove_partial(struct kmem_cache *s, +static void remove_partial(struct kmem_cache_node *n, struct page *page) { - struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - spin_lock(&n->list_lock); list_del(&page->lru); n->nr_partial--; @@ -1737,7 +1742,13 @@ slab_empty: /* * Slab still on the partial list. */ - remove_partial(s, page); + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + /* If we are low on partials then leave the partial page alone */ + if (n->nr_partial < MIN_PARTIAL) + goto out_unlock; + + remove_partial(n, page); stat(c, FREE_REMOVE_PARTIAL); } slab_unlock(page);