--- mm/slub.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-14 03:48:56.000000000 -0700 +++ linux-2.6/mm/slub.c 2008-03-14 03:52:26.000000000 -0700 @@ -301,6 +301,16 @@ static inline unsigned long slab_objects return s->max_objects; } +static inline void **get_freelist(struct kmem_cache_cpu *c) +{ + return c->freelist; +} + +static inline void set_freelist(struct kmem_cache_cpu *c, void **p) +{ + c->freelist = p; +} + /* Verify that a pointer has an address that is valid within a slab page */ static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) @@ -1402,6 +1412,7 @@ static void deactivate_slab(struct kmem_ { struct page *page = c->page; int tail = 1; + void **freelist; if (page->freelist) stat(c, DEACTIVATE_REMOTE_FREES); @@ -1410,20 +1421,23 @@ static void deactivate_slab(struct kmem_ * because both freelists are empty. So this is unlikely * to occur. */ - while (unlikely(c->freelist)) { + freelist = c->freelist; + while (unlikely(freelist)) { void **object; tail = 0; /* Hot objects. Put the slab first */ /* Retrieve object from cpu_freelist */ - object = c->freelist; - c->freelist = c->freelist[c->offset]; + object = freelist; + freelist = object[c->offset]; /* And put onto the regular freelist */ object[c->offset] = page->freelist; page->freelist = object; page->inuse--; } + if (!tail) + set_freelist(c, NULL); c->page = NULL; unfreeze_slab(s, page, tail); } @@ -1521,7 +1535,7 @@ load_freelist: if (unlikely(SlabDebug(c->page))) goto debug; - c->freelist = object[c->offset]; + set_freelist(c, object[c->offset]); c->page->inuse = slab_objects(s, c->page); c->page->freelist = NULL; c->node = page_to_nid(c->page); @@ -1589,13 +1603,13 @@ static __always_inline void *slab_alloc( local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->freelist || !node_match(c, node))) + if (unlikely(!get_freelist(c) || !node_match(c, node))) object = __slab_alloc(s, gfpflags, node, addr, c); else { - object = c->freelist; - c->freelist = object[c->offset]; + object = get_freelist(c); + set_freelist(c, object[c->offset]); stat(c, ALLOC_FASTPATH); } local_irq_restore(flags); @@ -1709,8 +1723,8 @@ static __always_inline void slab_free(st c = get_cpu_slab(s, smp_processor_id()); debug_check_no_locks_freed(object, c->objsize); if (likely(page == c->page && c->node >= 0)) { - object[c->offset] = c->freelist; - c->freelist = object; + object[c->offset] = get_freelist(c); + set_freelist(c, object); stat(c, FREE_FASTPATH); } else __slab_free(s, page, x, addr, c->offset); @@ -1893,7 +1907,7 @@ static void init_kmem_cache_cpu(struct k struct kmem_cache_cpu *c) { c->page = NULL; - c->freelist = NULL; + set_freelist(c, NULL); c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize;