--- include/linux/slub_def.h | 6 ++- mm/slub.c | 72 ++++++++++++++++++++++++++++++----------------- 2 files changed, 50 insertions(+), 28 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-01 23:03:34.539675907 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-01 23:17:46.131350723 -0800 @@ -32,8 +32,10 @@ enum stat_item { struct kmem_cache_cpu { void **freelist; /* Pointer to first free per cpu object */ - struct page *page; /* The slab from which we are allocating */ - int node; /* The node of the page (or -1 for debug) */ + struct page *alloc; /* The slab from which we are allocating */ + void **prior; /* Prior pointer taken at free activation */ + struct page *free; /* The slab to which we are freeing */ + int node; /* The node of the allocpage (or -1 for debug) */ unsigned int offset; /* Freepointer offset (in word units) */ unsigned int objsize; /* Size of an object (from kmem_cache) */ #ifdef CONFIG_SLUB_STATS Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-01 23:05:22.292054834 -0800 +++ linux-2.6/mm/slub.c 2008-02-01 23:24:31.186525509 -0800 @@ -1395,7 +1395,7 @@ static void unfreeze_slab(struct kmem_ca */ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { - struct page *page = c->page; + struct page *page = c->alloc; int tail = 1; if (c->freelist) @@ -1419,14 +1419,14 @@ static void deactivate_slab(struct kmem_ page->freelist = object; page->inuse--; } - c->page = NULL; + c->alloc = NULL; unfreeze_slab(s, page, tail); } static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { stat_cpu(c, CPUSLAB_FLUSH); - slab_lock(c->page); + slab_lock(c->alloc); deactivate_slab(s, c); } @@ -1438,7 +1438,7 @@ static inline void __flush_cpu_slab(stru { struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); - if (likely(c && c->page)) + if (likely(c && c->alloc)) flush_slab(s, c); } @@ -1498,25 +1498,25 @@ static void *__slab_alloc(struct kmem_ca void **object; struct page *new; - if (!c->page) + if (!c->alloc) goto new_slab; - slab_lock(c->page); + slab_lock(c->alloc); if (unlikely(!node_match(c, node))) goto another_slab; load_freelist: - object = c->page->freelist; + object = c->alloc->freelist; if (unlikely(!object)) goto another_slab; - if (unlikely(SlabDebug(c->page))) + if (unlikely(SlabDebug(c->alloc))) goto debug; - object = c->page->freelist; + object = c->alloc->freelist; c->freelist = object[c->offset]; - c->page->inuse = s->objects; - c->page->freelist = NULL; - c->node = page_to_nid(c->page); - slab_unlock(c->page); + c->alloc->inuse = s->objects; + c->alloc->freelist = NULL; + c->node = page_to_nid(c->alloc); + slab_unlock(c->alloc); stat_cpu(c, ALLOC_SLOWPATH); return object; @@ -1526,7 +1526,7 @@ another_slab: new_slab: new = get_partial(s, gfpflags, node); if (new) { - c->page = new; + c->alloc = new; stat_cpu(c, ALLOC_FROM_PARTIAL); goto load_freelist; } @@ -1541,24 +1541,24 @@ new_slab: if (new) { c = get_cpu_slab(s, smp_processor_id()); - if (c->page) + if (c->alloc) flush_slab(s, c); stat_cpu(c, ALLOC_SLAB); slab_lock(new); SetSlabFrozen(new); - c->page = new; + c->alloc = new; goto load_freelist; } return NULL; debug: - object = c->page->freelist; - if (!alloc_debug_processing(s, c->page, object, addr)) + object = c->alloc->freelist; + if (!alloc_debug_processing(s, c->alloc, object, addr)) goto another_slab; - c->page->inuse++; - c->page->freelist = object[c->offset]; + c->alloc->inuse++; + c->alloc->freelist = object[c->offset]; c->node = -1; - slab_unlock(c->page); + slab_unlock(c->alloc); return object; } @@ -1699,13 +1699,32 @@ static __always_inline void slab_free(st local_irq_save(flags); debug_check_no_locks_freed(object, s->objsize); c = get_cpu_slab(s, smp_processor_id()); - if (likely(page == c->page && c->node >= 0)) { + if (likely(page == c->alloc)) { + /* Local alloc slab. */ object[c->offset] = c->freelist; c->freelist = object; stat_cpu(c, FREE_FASTPATH); } else - __slab_free(s, page, x, addr, c->offset); + if (SlabFrozen(page)) { + /* + * Remotely held slab. No need to check partial list stuff + */ + slablock(page); + object[c->offset] = page->freelist; + page->freelist = object; + page=>inuse--; + slab_unlock(page); + } else { + if (unlikely(page != c->free)) { + if (c->free) { + slab_lock(c->free); + ClearSlabFrozen(c->free); + } + object[c->offset] = c->free->freelist; + c->free->freelist = object; + c->free->inuse--; + } local_irq_restore(flags); } @@ -1882,7 +1901,8 @@ static unsigned long calculate_alignment static void init_kmem_cache_cpu(struct kmem_cache *s, struct kmem_cache_cpu *c) { - c->page = NULL; + c->alloc = NULL; + c->free = NULL; c->freelist = NULL; c->node = 0; c->offset = s->offset / sizeof(void *); @@ -3554,7 +3574,7 @@ static unsigned long slab_objects(struct if (!c) continue; - page = c->page; + page = c->alloc; node = c->node; if (node < 0) continue; @@ -3616,7 +3636,7 @@ static int any_slab_objects(struct kmem_ for_each_possible_cpu(cpu) { struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); - if (c && c->page) + if (c && c->alloc) return 1; }