From: Pekka Enberg Reduce the NUMA text size of mm/slab.o a little on x86 by using a local variable to store the result of numa_node_id(). text data bss dec hex filename 16858 2584 16 19458 4c02 mm/slab.o (before) 16804 2584 16 19404 4bcc mm/slab.o (after) Cc: Christoph Lameter Signed-off-by: Pekka Enberg Signed-off-by: Andrew Morton --- mm/slab.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff -puN mm/slab.c~slab-reduce-numa-text-size mm/slab.c --- a/mm/slab.c~slab-reduce-numa-text-size +++ a/mm/slab.c @@ -1107,15 +1107,18 @@ static inline int cache_free_alien(struc int nodeid = slabp->nodeid; struct kmem_list3 *l3; struct array_cache *alien = NULL; + int this_node; + + this_node = numa_node_id(); /* * Make sure we are not freeing a object from another node to the array * cache on this cpu. */ - if (likely(slabp->nodeid == numa_node_id())) + if (likely(slabp->nodeid == this_node)) return 0; - l3 = cachep->nodelists[numa_node_id()]; + l3 = cachep->nodelists[this_node]; STATS_INC_NODEFREES(cachep); if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; @@ -1353,6 +1356,7 @@ void __init kmem_cache_init(void) struct cache_names *names; int i; int order; + int this_node; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); @@ -1387,12 +1391,14 @@ void __init kmem_cache_init(void) * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ + this_node = numa_node_id(); + /* 1) create the cache_cache */ INIT_LIST_HEAD(&cache_chain); list_add(&cache_cache.next, &cache_chain); cache_cache.colour_off = cache_line_size(); cache_cache.array[smp_processor_id()] = &initarray_cache.cache; - cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; + cache_cache.nodelists[this_node] = &initkmem_list3[CACHE_CACHE]; cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); @@ -1501,7 +1507,7 @@ void __init kmem_cache_init(void) int node; /* Replace the static kmem_list3 structures for the boot cpu */ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], - numa_node_id()); + this_node); for_each_online_node(node) { init_list(malloc_sizes[INDEX_AC].cs_cachep, @@ -2922,6 +2928,9 @@ static void *cache_alloc_refill(struct k int batchcount; struct kmem_list3 *l3; struct array_cache *ac; + int this_node; + + this_node = numa_node_id(); check_irq_off(); ac = cpu_cache_get(cachep); @@ -2935,7 +2944,7 @@ retry: */ batchcount = BATCHREFILL_LIMIT; } - l3 = cachep->nodelists[numa_node_id()]; + l3 = cachep->nodelists[this_node]; BUG_ON(ac->avail > 0 || !l3); spin_lock(&l3->list_lock); @@ -2965,7 +2974,7 @@ retry: STATS_SET_HIGH(cachep); ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, - numa_node_id()); + this_node); } check_slabp(cachep, slabp); @@ -2984,7 +2993,7 @@ alloc_done: if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags, numa_node_id()); + x = cache_grow(cachep, flags, this_node); /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); _