From c3b3f0e466687c595f112a0b326cea6954209e46 Mon Sep 17 00:00:00 2001 From: Satyam Sharma Date: Wed, 15 Aug 2007 05:12:41 +0530 Subject: [PATCH] {slub, slob}: use unlikely() for kfree(ZERO_OR_NULL_PTR) check Considering kfree(NULL) would normally occur only in error paths and kfree(ZERO_SIZE_PTR) is uncommon as well, so let's use unlikely() for the condition check in SLUB's and SLOB's kfree() to optimize for the common case. SLAB has this already. [clameter@sgi.com: Apply to all uses of ZERO_OR_NULL_PTR] Signed-off-by: Satyam Sharma Signed-off-by: Christoph Lameter --- mm/slob.c | 2 +- mm/slub.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/slob.c b/mm/slob.c index ec33fcd..37a8b9a 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -466,7 +466,7 @@ void kfree(const void *block) { struct slob_page *sp; - if (ZERO_OR_NULL_PTR(block)) + if (unlikely(ZERO_OR_NULL_PTR(block))) return; sp = (struct slob_page *)virt_to_page(block); diff --git a/mm/slub.c b/mm/slub.c index 69d02e3..ac0728a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2401,7 +2401,7 @@ void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s = get_slab(size, flags); - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, flags, -1, __builtin_return_address(0)); @@ -2413,7 +2413,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s = get_slab(size, flags); - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, flags, node, __builtin_return_address(0)); @@ -2426,7 +2426,7 @@ size_t ksize(const void *object) struct page *page; struct kmem_cache *s; - if (ZERO_OR_NULL_PTR(object)) + if (unlikely(ZERO_OR_NULL_PTR(object))) return 0; page = get_object_page(object); @@ -2467,7 +2467,7 @@ void kfree(const void *x) * this comparison would be true for all "negative" pointers * (which would cover the whole upper half of the address space). */ - if (ZERO_OR_NULL_PTR(x)) + if (unlikely(ZERO_OR_NULL_PTR(x))) return; page = virt_to_head_page(x); @@ -2777,7 +2777,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, gfpflags, -1, caller); @@ -2788,7 +2788,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, { struct kmem_cache *s = get_slab(size, gfpflags); - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, gfpflags, node, caller); -- 1.5.2.4