From 7de63170255507dfa53dfb015c9e79de418c6df4 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 27 Oct 2007 19:32:50 -0700 Subject: [PATCH] SLUB: Noinline some functions to avoid them being folded into alloc/free Some function tend to get folded into __slab_free and __slab_alloc although they are rarely called. They cause register pressure that leads to bad code generation. Signed-off-by: Christoph Lameter --- mm/slub.c | 13 +++++++------ 1 files changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 1ca0f55..ad76744 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -831,8 +831,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, init_tracking(s, object); } -static int alloc_debug_processing(struct kmem_cache *s, struct page *page, - void *object, void *addr) +static noinline int alloc_debug_processing(struct kmem_cache *s, + struct page *page, void *object, void *addr) { if (!check_slab(s, page)) goto bad; @@ -871,8 +871,8 @@ bad: return 0; } -static int free_debug_processing(struct kmem_cache *s, struct page *page, - void *object, void *addr) +static noinline int free_debug_processing(struct kmem_cache *s, + struct page *page, void *object, void *addr) { if (!check_slab(s, page)) goto fail; @@ -1075,7 +1075,8 @@ static void setup_object(struct kmem_cache *s, struct page *page, s->ctor(s, object); } -static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +static noinline struct page *new_slab(struct kmem_cache *s, + gfp_t flags, int node) { struct page *page; struct kmem_cache_node *n; @@ -1207,7 +1208,7 @@ static void add_partial(struct kmem_cache_node *n, spin_unlock(&n->list_lock); } -static void remove_partial(struct kmem_cache *s, +static noinline void remove_partial(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); -- debian.1.5.3.7.1-dirty