SLUB: Single atomic instruction alloc/free using cmpxchg_local A cmpxchg allows us to avoid disabling and enabling interrupts. The cmpxchg is optimal to allow operations on per cpu freelist. We can stay on one processor by disabling preemption() and allowing concurrent interrupts thus avoiding the overhead of disabling and enabling interrupts. Pro: - No need to disable interrupts. Con: - Slightly complexer handling. Signed-off-by: Christoph Lameter --- mm/slub.c | 59 +++++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 20 deletions(-) Index: linux-2.6.22-rc6-mm1/mm/slub.c =================================================================== --- linux-2.6.22-rc6-mm1.orig/mm/slub.c 2007-07-12 19:44:21.000000000 -0700 +++ linux-2.6.22-rc6-mm1/mm/slub.c 2007-07-12 19:52:06.000000000 -0700 @@ -1467,12 +1467,14 @@ static void *__slab_alloc(struct kmem_ca { void **object; struct page *new; + unsigned long flags; + local_irq_save(flags); if (!c->page) goto new_slab; slab_lock(c->page); - if (unlikely(!node_match(c, node))) + if (unlikely(!node_match(c, node) || c->freelist)) goto another_slab; load_freelist: object = c->page->freelist; @@ -1486,7 +1488,14 @@ load_freelist: c->page->inuse = s->objects; c->page->freelist = NULL; c->node = page_to_nid(c->page); +out: slab_unlock(c->page); + local_irq_restore(flags); + preempt_enable(); + + if (unlikely((gfpflags & __GFP_ZERO))) + memset(object, 0, c->objsize); + return object; another_slab: @@ -1527,6 +1536,8 @@ new_slab: c->page = new; goto load_freelist; } + local_irq_restore(flags); + preempt_enable(); return NULL; debug: c->freelist = NULL; @@ -1536,8 +1547,7 @@ debug: c->page->inuse++; c->page->freelist = object[c->offset]; - slab_unlock(c->page); - return object; + goto out; } /* @@ -1557,20 +1567,19 @@ static void __always_inline *slab_alloc( unsigned long flags; struct kmem_cache_cpu *c; - local_irq_save(flags); + __local_begin(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->page || !c->freelist || - !node_match(c, node))) +redo: + object = c->freelist; + if (unlikely(!object || !node_match(c, node))) + return __slab_alloc(s, gfpflags, node, addr, c); - object = __slab_alloc(s, gfpflags, node, addr, c); + if (__local_cmpxchg(&c->freelist, object, object[c->offset]) != object) + goto redo; - else { - object = c->freelist; - c->freelist = object[c->offset]; - } - local_irq_restore(flags); + __local_end(flags); - if (unlikely((gfpflags & __GFP_ZERO) && object)) + if (unlikely((gfpflags & __GFP_ZERO))) memset(object, 0, c->objsize); return object; @@ -1603,7 +1612,9 @@ static void __slab_free(struct kmem_cach { void *prior; void **object = (void *)x; + unsigned long flags; + local_irq_save(flags); slab_lock(page); if (unlikely(SlabDebug(page))) @@ -1629,6 +1640,8 @@ checks_ok: out_unlock: slab_unlock(page); + local_irq_restore(flags); + preempt_enable(); return; slab_empty: @@ -1639,6 +1652,8 @@ slab_empty: remove_partial(s, page); slab_unlock(page); + local_irq_restore(flags); + preempt_enable(); discard_slab(s, page); return; @@ -1665,16 +1680,20 @@ static void __always_inline slab_free(st void **object = (void *)x; unsigned long flags; struct kmem_cache_cpu *c; + void **freelist; - local_irq_save(flags); + __local_begin(flags); c = get_cpu_slab(s, smp_processor_id()); - if (likely(page == c->page && c->freelist)) { - object[c->offset] = c->freelist; - c->freelist = object; - } else - __slab_free(s, page, x, addr, c->offset); +redo: + freelist = c->freelist; + if (unlikely(page != c->page || !freelist)) + return __slab_free(s, page, x, addr, c->offset); + + object[c->offset] = freelist; + if (__local_cmpxchg(&c->freelist, freelist, object) != freelist) + goto redo; - local_irq_restore(flags); + __local_end(flags); } void kmem_cache_free(struct kmem_cache *s, void *x)