From: Christoph Lameter The kmem_cache_node determination can be moved into add_full() and add_partial(). This removes some code from the slab_free() slow path and reduces the register overhead that has to be managed in the slow path. Signed-off-by: Christoph Lameter Reviewed-by: Pekka Enberg Signed-off-by: Andrew Morton --- mm/slub.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff -puN mm/slub.c~slub-move-kmem_cache_node-determination-into-add_full-and-add_partial mm/slub.c --- a/mm/slub.c~slub-move-kmem_cache_node-determination-into-add_full-and-add_partial +++ a/mm/slub.c @@ -800,8 +800,12 @@ static void trace(struct kmem_cache *s, /* * Tracking of fully allocated slabs for debugging purposes. */ -static void add_full(struct kmem_cache_node *n, struct page *page) +static void add_full(struct kmem_cache *s, struct page *page) { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + if (!SlabDebug(page) || !(s->flags & SLAB_STORE_USER)) + return; spin_lock(&n->list_lock); list_add(&page->lru, &n->full); spin_unlock(&n->list_lock); @@ -1024,7 +1028,7 @@ static inline int slab_pad_check(struct { return 1; } static inline int check_object(struct kmem_cache *s, struct page *page, void *object, int active) { return 1; } -static inline void add_full(struct kmem_cache_node *n, struct page *page) {} +static inline void add_full(struct kmem_cache *s, struct page *page) {} static inline unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, void (*ctor)(struct kmem_cache *, void *)) @@ -1195,9 +1199,11 @@ static __always_inline int slab_trylock( /* * Management of partially allocated slabs */ -static void add_partial(struct kmem_cache_node *n, +static void add_partial(struct kmem_cache *s, struct page *page, int tail) { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + spin_lock(&n->list_lock); n->nr_partial++; if (tail) @@ -1334,19 +1340,18 @@ static struct page *get_partial(struct k */ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) { - struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - ClearSlabFrozen(page); if (page->inuse) { if (page->freelist) - add_partial(n, page, tail); - else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) - add_full(n, page); + add_partial(s, page, tail); + else + add_full(s, page); slab_unlock(page); } else { - if (n->nr_partial < MIN_PARTIAL) { + if (get_node(s, page_to_nid(page))->nr_partial + < MIN_PARTIAL) { /* * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs @@ -1355,7 +1360,7 @@ static void unfreeze_slab(struct kmem_ca * partial list stays small. kmem_cache_shrink can * reclaim empty slabs from the partial list. */ - add_partial(n, page, 1); + add_partial(s, page, 1); slab_unlock(page); } else { slab_unlock(page); @@ -1613,7 +1618,7 @@ checks_ok: * then add it. */ if (unlikely(!prior)) - add_partial(get_node(s, page_to_nid(page)), page, 0); + add_partial(s, page, 0); out_unlock: slab_unlock(page); @@ -2021,7 +2026,7 @@ static struct kmem_cache_node *early_kme #endif init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); - add_partial(n, page, 0); + add_partial(kmalloc_caches, page, 0); return n; } _