--- mm/slub.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-18 13:31:28.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-18 13:31:40.000000000 -0700 @@ -1669,16 +1669,12 @@ new_slab: goto load_freelist; } - if (gfpflags & __GFP_WAIT) - local_irq_enable(); + put_cpu(); new = new_slab(s, gfpflags, node); - if (gfpflags & __GFP_WAIT) - local_irq_disable(); - if (new) { - c = get_cpu_slab(s, smp_processor_id()); + c = get_cpu_slab(s, get_cpu()); if (c->page) { /* * Someone else populated the cpu_slab while we @@ -1702,6 +1698,7 @@ new_slab: c->page = new; goto load_freelist; } + get_cpu(); return NULL; debug: /* FIXME: Thjs onlu works on UP. There needs to be some way @@ -1731,11 +1728,9 @@ static void __always_inline *slab_alloc( gfp_t gfpflags, int node, void *addr) { void **object; - unsigned long flags; struct kmem_cache_cpu *c; - local_irq_save(flags); - c = get_cpu_slab(s, smp_processor_id()); + c = get_cpu_slab(s, get_cpu()); if (unlikely(!node_match(c, node))) goto slow; @@ -1743,7 +1738,7 @@ static void __always_inline *slab_alloc( if (unlikely(!object)) goto slow; back: - local_irq_restore(flags); + put_cpu(); if (unlikely((gfpflags & __GFP_ZERO))) memset(object, 0, c->objsize); @@ -1819,18 +1814,16 @@ static void __always_inline slab_free(st struct page *page, void *x, void *addr) { void **object = (void *)x; - unsigned long flags; struct kmem_cache_cpu *c; - local_irq_save(flags); + c = get_cpu_slab(s, get_cpu()); debug_check_no_locks_freed(object, s->objsize); - c = get_cpu_slab(s, smp_processor_id()); if (likely(page == c->page && c->node >= 0)) free_cpu_object(c, object); else __slab_free(s, page, x, addr, c->offset); - local_irq_restore(flags); + put_cpu(); } void kmem_cache_free(struct kmem_cache *s, void *x)