--- mm/slub.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-27 19:07:30.000000000 -0800 +++ linux-2.6/mm/slub.c 2008-02-27 19:10:06.000000000 -0800 @@ -315,6 +315,8 @@ static void *slab_address(struct page *p return page->end - PAGE_MAPPING_ANON; } +#define DEAD_END ((void *)PAGE_MAPPING_ANON) + static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) { @@ -1530,6 +1532,8 @@ load_freelist: object = c->page->freelist; if (unlikely(object == c->page->end)) goto another_slab; + if (unlikely(is_end(c->freelist))) + goto slowalloc; if (unlikely(SlabDebug(c->page))) goto debug; @@ -1598,6 +1602,7 @@ debug: if (!alloc_debug_processing(s, c->page, object, addr)) goto another_slab; +slowalloc: c->page->inuse++; c->page->freelist = object[c->offset]; c->node = -1; @@ -1638,8 +1643,9 @@ static __always_inline void *slab_alloc( #ifdef SLUB_FASTPATH c = get_cpu_slab(s, raw_smp_processor_id()); do { - object = c->freelist; + object = xchg(&c->freelist, DEAD_END); if (unlikely(is_end(object) || !node_match(c, node))) { + c->freelist = object; object = __slab_alloc(s, gfpflags, node, addr, c); break; } @@ -1797,7 +1803,8 @@ static __always_inline void slab_free(st * then any change of cpu_slab will cause the cmpxchg to fail * since the freelist pointers are unique per slab. */ - if (unlikely(page != c->page || c->node < 0)) { + if (unlikely(page != c->page || c->node < 0 || + freelist == DEAD_END)) { __slab_free(s, page, x, addr, c->offset); break; } @@ -1995,7 +2002,7 @@ static void init_kmem_cache_cpu(struct k struct kmem_cache_cpu *c) { c->page = NULL; - c->freelist = (void *)PAGE_MAPPING_ANON; + c->freelist = DEAD_END; c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize;