From 9ef78039b72f596f9e87115b57c1c4c7dedb978b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 7 Jan 2008 23:20:28 -0800 Subject: [PATCH] Move kmem_cache_node determination into add_full/partial The kmem_cache_node determination can be moved into add_full() and add_partial(). This removes some code from the slab_free() slow path and reduces the register overhead that has to be managed in the slow path. Signed-off-by: Christoph Lameter Reviewed-by: Pekka Enberg Signed-off-by: Andrew Morton --- mm/slub.c | 29 +++++++++++++++++------------ 1 files changed, 17 insertions(+), 12 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 1c58bce..68ca29f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -803,8 +803,12 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, int all /* * Tracking of fully allocated slabs for debugging purposes. */ -static void add_full(struct kmem_cache_node *n, struct page *page) +static void add_full(struct kmem_cache *s, struct page *page) { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + if (!SlabDebug(page) || !(s->flags & SLAB_STORE_USER)) + return; spin_lock(&n->list_lock); list_add(&page->lru, &n->full); spin_unlock(&n->list_lock); @@ -1027,7 +1031,7 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } static inline int check_object(struct kmem_cache *s, struct page *page, void *object, int active) { return 1; } -static inline void add_full(struct kmem_cache_node *n, struct page *page) {} +static inline void add_full(struct kmem_cache *s, struct page *page) {} static inline unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, void (*ctor)(struct kmem_cache *, void *)) @@ -1198,9 +1202,11 @@ static __always_inline int slab_trylock(struct page *page) /* * Management of partially allocated slabs */ -static void add_partial(struct kmem_cache_node *n, +static void add_partial(struct kmem_cache *s, struct page *page, int tail) { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + spin_lock(&n->list_lock); n->nr_partial++; if (tail) @@ -1337,19 +1343,18 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) */ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) { - struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - ClearSlabFrozen(page); if (page->inuse) { if (page->freelist) - add_partial(n, page, tail); - else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) - add_full(n, page); + add_partial(s, page, tail); + else + add_full(s, page); slab_unlock(page); } else { - if (n->nr_partial < MIN_PARTIAL) { + if (get_node(s, page_to_nid(page))->nr_partial + < MIN_PARTIAL) { /* * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs @@ -1358,7 +1363,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) * partial list stays small. kmem_cache_shrink can * reclaim empty slabs from the partial list. */ - add_partial(n, page, 1); + add_partial(s, page, 1); slab_unlock(page); } else { slab_unlock(page); @@ -1616,7 +1621,7 @@ checks_ok: * then add it. */ if (unlikely(!prior)) - add_partial(get_node(s, page_to_nid(page)), page, 1); + add_partial(s, page, 1); out_unlock: slab_unlock(page); @@ -2024,7 +2029,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, #endif init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); - add_partial(n, page, 0); + add_partial(kmalloc_caches, page, 0); return n; } -- debian.1.5.3.7.1-dirty