From d2a5526347b620d0a612fe85673addedccb79264 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 9 Aug 2007 07:51:46 -0700 Subject: [PATCH] SLUB: Consolidate add_partial and add_partial_tail to one function Add a parameter to add_partial instead of having separate functions. That allows the detailed control from multiple places when putting slabs back to the partial list. If we put slabs back to the front then they are likely immediately used for allocations. If they are put at the end then we can maximize the time that the partial slabs spent without allocations. When deactivating slab we can put the slabs that had remote objects freed to them at the end of the list so that the cache lines can cool down. Slabs that had objects from the local cpu freed to them are put in the front of the list to be reused ASAP in order to exploit the cache hot state. [This patch is already in mm] Signed-off-by: Christoph Lameter --- mm/slub.c | 29 ++++++++++++++--------------- 1 files changed, 14 insertions(+), 15 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 5cc4b7d..1ca0f55 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1195,19 +1195,15 @@ static __always_inline int slab_trylock(struct page *page) /* * Management of partially allocated slabs */ -static void add_partial_tail(struct kmem_cache_node *n, struct page *page) +static void add_partial(struct kmem_cache_node *n, + struct page *page, int tail) { spin_lock(&n->list_lock); n->nr_partial++; - list_add_tail(&page->lru, &n->partial); - spin_unlock(&n->list_lock); -} - -static void add_partial(struct kmem_cache_node *n, struct page *page) -{ - spin_lock(&n->list_lock); - n->nr_partial++; - list_add(&page->lru, &n->partial); + if (tail) + list_add_tail(&page->lru, &n->partial); + else + list_add(&page->lru, &n->partial); spin_unlock(&n->list_lock); } @@ -1335,7 +1331,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) * * On exit the slab lock will have been dropped. */ -static void unfreeze_slab(struct kmem_cache *s, struct page *page) +static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); @@ -1343,7 +1339,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) if (page->inuse) { if (page->freelist) - add_partial(n, page); + add_partial(n, page, tail); else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) add_full(n, page); slab_unlock(page); @@ -1358,7 +1354,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) * partial list stays small. kmem_cache_shrink can * reclaim empty slabs from the partial list. */ - add_partial_tail(n, page); + add_partial(n, page, 1); slab_unlock(page); } else { slab_unlock(page); @@ -1373,6 +1369,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { struct page *page = c->page; + int tail = 1; /* * Merge cpu freelist into freelist. Typically we get here * because both freelists are empty. So this is unlikely @@ -1381,6 +1378,8 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) while (unlikely(c->freelist)) { void **object; + tail = 0; /* Hot objects. Put the slab first */ + /* Retrieve object from cpu_freelist */ object = c->freelist; c->freelist = c->freelist[c->offset]; @@ -1391,7 +1390,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) page->inuse--; } c->page = NULL; - unfreeze_slab(s, page); + unfreeze_slab(s, page, tail); } static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) @@ -2021,7 +2020,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, #endif init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); - add_partial(n, page); + add_partial(n, page, 0); return n; } -- debian.1.5.3.7.1-dirty