From: Eric Dumazet We can avoid allocating empty shared caches and avoid unecessary check of cache->limit. We save some memory. We avoid bringing into CPU cache unecessary cache lines. All accesses to l3->shared are already checking NULL pointers so this patch is safe. Signed-off-by: Eric Dumazet Acked-by: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton --- mm/slab.c | 26 +++++++++++++++----------- 1 files changed, 15 insertions(+), 11 deletions(-) diff -puN mm/slab.c~slab-dont-allocate-empty-shared-caches mm/slab.c --- a/mm/slab.c~slab-dont-allocate-empty-shared-caches +++ a/mm/slab.c @@ -1223,19 +1223,20 @@ static int __cpuinit cpuup_callback(stru */ list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; - struct array_cache *shared; + struct array_cache *shared = NULL; struct array_cache **alien = NULL; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount); if (!nc) goto bad; - shared = alloc_arraycache(node, + if (cachep->shared) { + shared = alloc_arraycache(node, cachep->shared * cachep->batchcount, 0xbaadf00d); - if (!shared) - goto bad; - + if (!shared) + goto bad; + } if (use_alien_caches) { alien = alloc_alien_cache(node, cachep->limit); if (!alien) @@ -1317,8 +1318,8 @@ static int __cpuinit cpuup_callback(stru shared = l3->shared; if (shared) { - free_block(cachep, l3->shared->entry, - l3->shared->avail, node); + free_block(cachep, shared->entry, + shared->avail, node); l3->shared = NULL; } @@ -3867,12 +3868,15 @@ static int alloc_kmemlist(struct kmem_ca goto fail; } - new_shared = alloc_arraycache(node, + new_shared = NULL; + if (cachep->shared) { + new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, 0xbaadf00d); - if (!new_shared) { - free_alien_cache(new_alien); - goto fail; + if (!new_shared) { + free_alien_cache(new_alien); + goto fail; + } } l3 = cachep->nodelists[node]; _