Index: linux-2.6.21-rc5-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm2.orig/mm/slub.c 2007-03-30 10:11:21.000000000 -0700 +++ linux-2.6.21-rc5-mm2/mm/slub.c 2007-03-30 10:11:30.000000000 -0700 @@ -347,6 +347,9 @@ static void init_object(struct kmem_cach { u8 *p = object; + if (s->objects == 1) + return; + if (s->flags & __OBJECT_POISON) { memset(p, POISON_FREE, s->objsize - 1); p[s->objsize -1] = POISON_END; @@ -468,6 +471,10 @@ static int check_object(struct kmem_cach u8 *p = object; u8 *endobject = object + s->objsize; + /* Single object slabs do not get policed */ + if (s->objects == 1) + return 1; + if (s->flags & SLAB_RED_ZONE) { if (!check_bytes(endobject, active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, @@ -556,6 +563,9 @@ static int on_freelist(struct kmem_cache void *fp = page->freelist; void *object = NULL; + if (s->objects == 1) + return 0; + while (fp && nr <= s->objects) { if (fp == search) return 1; @@ -754,7 +764,8 @@ static struct page *new_slab(struct kmem page->slab = s; page->flags |= 1 << PG_slab; if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | - SLAB_STORE_USER | SLAB_TRACE)) + SLAB_STORE_USER | SLAB_TRACE) || + s->objects == 1) page->flags |= 1 << PG_error; if (s->objects > 1) { @@ -1168,6 +1179,11 @@ new_slab: return NULL; } + if (s->objects == 1) { + local_irq_restore(flags); + return page_address(page); + } + if (s->cpu_slab[cpu]) { /* * Someone else populated the cpu_slab while @@ -1229,6 +1245,10 @@ static void slab_free(struct kmem_cache unsigned long flags; local_irq_save(flags); + + if (unlikely(PageError(page)) && s->objects == 1) + goto single_object_slab; + slab_lock(page); if (unlikely(PageError(page)) && @@ -1239,40 +1259,31 @@ static void slab_free(struct kmem_cache page->freelist = object; page->inuse--; - if (unlikely(PageActive(page))) - /* - * Cpu slabs are never on partial lists and are - * never freed. - */ + if (likely(PageActive(page) || (page->inuse && prior))) goto out_unlock; - if (likely(page->inuse)) { + if (!prior) { /* - * Objects left in the slab. If it - * was not on the partial list before - * then add it. + * The slab was full before. It will have one free + * object now. So move to the partial list. */ - if (unlikely(!prior)) - add_partial(s, page); -out_unlock: - slab_unlock(page); - local_irq_restore(flags); - return; + add_partial(s, page); + goto out_unlock; } /* * All object have been freed. */ - if (prior) - /* - * The slab was partially used before and is empty - * now. So remove from partial list. - */ - remove_partial(s, page); - + remove_partial(s, page); slab_unlock(page); +single_object_slab: discard_slab(s, page); local_irq_restore(flags); + return; + +out_unlock: + slab_unlock(page); + local_irq_restore(flags); } void kmem_cache_free(struct kmem_cache *s, void *x)