From: Christoph Lameter This patch moves the interrupt check from slab_node into ___cache_alloc and adds an "unlikely()" to avoid pipeline stalls on some architectures. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/mempolicy.c | 3 --- mm/slab.c | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff -puN mm/mempolicy.c~mm-optimize-numa-policy-handling-in-slab-allocator mm/mempolicy.c --- devel/mm/mempolicy.c~mm-optimize-numa-policy-handling-in-slab-allocator 2005-12-22 05:08:48.000000000 -0800 +++ devel-akpm/mm/mempolicy.c 2005-12-22 05:08:48.000000000 -0800 @@ -770,9 +770,6 @@ static unsigned interleave_nodes(struct */ unsigned slab_node(struct mempolicy *policy) { - if (in_interrupt()) - return numa_node_id(); - switch (policy->policy) { case MPOL_INTERLEAVE: return interleave_nodes(policy); diff -puN mm/slab.c~mm-optimize-numa-policy-handling-in-slab-allocator mm/slab.c --- devel/mm/slab.c~mm-optimize-numa-policy-handling-in-slab-allocator 2005-12-22 05:08:48.000000000 -0800 +++ devel-akpm/mm/slab.c 2005-12-22 05:08:48.000000000 -0800 @@ -2530,7 +2530,7 @@ static inline void *____cache_alloc(kmem struct array_cache *ac; #ifdef CONFIG_NUMA - if (current->mempolicy) { + if (unlikely(current->mempolicy && !in_interrupt())) { int nid = slab_node(current->mempolicy); if (nid != numa_node_id()) _