From: Christoph Lameter This patch moves the interrupt check from slab_node into ___cache_alloc and adds an "unlikely()" to avoid pipeline stalls on some architectures. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/mempolicy.c | 3 --- mm/slab.c | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff -puN mm/mempolicy.c~mm-optimize-numa-policy-handling-in-slab-allocator mm/mempolicy.c --- 25/mm/mempolicy.c~mm-optimize-numa-policy-handling-in-slab-allocator Tue Jan 17 16:24:07 2006 +++ 25-akpm/mm/mempolicy.c Tue Jan 17 16:24:07 2006 @@ -1107,9 +1107,6 @@ static unsigned interleave_nodes(struct */ unsigned slab_node(struct mempolicy *policy) { - if (in_interrupt()) - return numa_node_id(); - switch (policy->policy) { case MPOL_INTERLEAVE: return interleave_nodes(policy); diff -puN mm/slab.c~mm-optimize-numa-policy-handling-in-slab-allocator mm/slab.c --- 25/mm/slab.c~mm-optimize-numa-policy-handling-in-slab-allocator Tue Jan 17 16:24:07 2006 +++ 25-akpm/mm/slab.c Tue Jan 17 16:24:07 2006 @@ -2574,7 +2574,7 @@ static inline void *____cache_alloc(kmem struct array_cache *ac; #ifdef CONFIG_NUMA - if (current->mempolicy) { + if (unlikely(current->mempolicy && !in_interrupt())) { int nid = slab_node(current->mempolicy); if (nid != numa_node_id()) _