From: Christoph Lameter Move irq handling out of new slab into __slab_alloc. That is useful for Mathieu's cmpxchg_local patchset and also allows us to remove the crude local_irq_off in early_kmem_cache_alloc(). Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/slub.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff -puN mm/slub.c~slub-simplify-irq-off-handling mm/slub.c --- a/mm/slub.c~slub-simplify-irq-off-handling +++ a/mm/slub.c @@ -1092,9 +1092,6 @@ static struct page *new_slab(struct kmem BUG_ON(flags & GFP_SLAB_BUG_MASK); - if (flags & __GFP_WAIT) - local_irq_enable(); - page = allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); if (!page) @@ -1127,8 +1124,6 @@ static struct page *new_slab(struct kmem page->freelist = start; page->inuse = 0; out: - if (flags & __GFP_WAIT) - local_irq_disable(); return page; } @@ -1512,7 +1507,14 @@ new_slab: goto load_freelist; } + if (gfpflags & __GFP_WAIT) + local_irq_enable(); + new = new_slab(s, gfpflags, node); + + if (gfpflags & __GFP_WAIT) + local_irq_disable(); + if (new) { c = get_cpu_slab(s, smp_processor_id()); if (c->page) { @@ -2045,12 +2047,6 @@ static struct kmem_cache_node *early_kme init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); add_partial(n, page); - - /* - * new_slab() disables interupts. If we do not reenable interrupts here - * then bootup would continue with interrupts disabled. - */ - local_irq_enable(); return n; } _