--- mm/slub.c | 62 ++++++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 22 deletions(-) Index: linux-2.6.22-rc6-mm1/mm/slub.c =================================================================== --- linux-2.6.22-rc6-mm1.orig/mm/slub.c 2007-07-09 15:04:46.000000000 -0700 +++ linux-2.6.22-rc6-mm1/mm/slub.c 2007-07-09 15:48:41.000000000 -0700 @@ -1467,13 +1467,17 @@ static void *__slab_alloc(struct kmem_ca { void **object; struct page *new; + unsigned long flags; + local_irq_save(flags); if (!c->page) goto new_slab; slab_lock(c->page); if (unlikely(!node_match(c, node))) goto another_slab; + if (unlikely(c->freelist)) + goto another_slab; load_freelist: object = c->page->freelist; if (unlikely(!object)) @@ -1486,7 +1490,14 @@ load_freelist: c->page->inuse = s->objects; c->page->freelist = NULL; c->node = page_to_nid(c->page); +out: slab_unlock(c->page); + local_irq_restore(flags); + preempt_enable(); + + if (unlikely((gfpflags & __GFP_ZERO))) + memset(object, 0, c->objsize); + return object; another_slab: @@ -1527,6 +1538,8 @@ new_slab: c->page = new; goto load_freelist; } + local_irq_restore(flags); + preempt_enable(); return NULL; debug: c->freelist = NULL; @@ -1536,8 +1549,7 @@ debug: c->page->inuse++; c->page->freelist = object[c->offset]; - slab_unlock(c->page); - return object; + goto out; } /* @@ -1554,23 +1566,20 @@ static void __always_inline *slab_alloc( gfp_t gfpflags, int node, void *addr) { void **object; - unsigned long flags; struct kmem_cache_cpu *c; - local_irq_save(flags); + preempt_disable(); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->page || !c->freelist || - !node_match(c, node))) +redo: + object = c->freelist; + if (unlikely(!object || !node_match(c, node))) + return __slab_alloc(s, gfpflags, node, addr, c); - object = __slab_alloc(s, gfpflags, node, addr, c); + if (cmpxchg_local(&c->freelist, object, object[c->offset]) != object) + goto redo; - else { - object = c->freelist; - c->freelist = object[c->offset]; - } - local_irq_restore(flags); - - if (unlikely((gfpflags & __GFP_ZERO) && object)) + preempt_enable(); + if (unlikely((gfpflags & __GFP_ZERO))) memset(object, 0, c->objsize); return object; @@ -1603,7 +1612,9 @@ static void __slab_free(struct kmem_cach { void *prior; void **object = (void *)x; + unsigned long flags; + local_irq_save(flags); slab_lock(page); if (unlikely(SlabDebug(page))) @@ -1629,6 +1640,8 @@ checks_ok: out_unlock: slab_unlock(page); + local_irq_restore(flags); + preempt_enable(); return; slab_empty: @@ -1639,6 +1652,8 @@ slab_empty: remove_partial(s, page); slab_unlock(page); + local_irq_restore(flags); + preempt_enable(); discard_slab(s, page); return; @@ -1663,18 +1678,21 @@ static void __always_inline slab_free(st struct page *page, void *x, void *addr) { void **object = (void *)x; - unsigned long flags; struct kmem_cache_cpu *c; + void **freelist; - local_irq_save(flags); + preempt_disable(); c = get_cpu_slab(s, smp_processor_id()); - if (likely(page == c->page && c->freelist)) { - object[c->offset] = c->freelist; - c->freelist = object; - } else - __slab_free(s, page, x, addr, c->offset); +redo: + freelist = c->freelist; + if (unlikely(page != c->page || !freelist)) + return __slab_free(s, page, x, addr, c->offset); + + object[c->offset] = freelist; + if (cmpxchg_local(&c->freelist, freelist, object) != object) + goto redo; - local_irq_restore(flags); + preempt_enable(); } void kmem_cache_free(struct kmem_cache *s, void *x)