From: Christoph Lameter Subject: slub: functions to set kmem_cache_cpu freelist pointer Add functions to set and retrieve the freelist pointer. The cmpxchg_local logic will need to change these operations in order to encode the version. Signed-off-by: Christoph Lameter --- mm/slub.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-16 23:44:25.299952432 -0700 +++ linux-2.6/mm/slub.c 2008-03-16 23:59:21.828755150 -0700 @@ -273,6 +273,16 @@ static inline struct kmem_cache_cpu *get #endif } +static inline void **get_freelist(struct kmem_cache_cpu *c) +{ + return c->freelist; +} + +static inline void set_freelist(struct kmem_cache_cpu *c, void **p) +{ + c->freelist = p; +} + /* Verify that a pointer has an address that is valid within a slab page */ static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) @@ -1384,6 +1394,7 @@ static void deactivate_slab(struct kmem_ { struct page *page = c->page; int tail = 1; + void **freelist; if (page->freelist) stat(c, DEACTIVATE_REMOTE_FREES); @@ -1392,20 +1403,23 @@ static void deactivate_slab(struct kmem_ * because both freelists are empty. So this is unlikely * to occur. */ - while (unlikely(c->freelist)) { + freelist = c->freelist; + while (unlikely(freelist)) { void **object; tail = 0; /* Hot objects. Put the slab first */ /* Retrieve object from cpu_freelist */ - object = c->freelist; - c->freelist = c->freelist[c->offset]; + object = freelist; + freelist = object[c->offset]; /* And put onto the regular freelist */ object[c->offset] = page->freelist; page->freelist = object; page->inuse--; } + if (!tail) + set_freelist(c, NULL); c->page = NULL; unfreeze_slab(s, page, tail); } @@ -1503,7 +1517,7 @@ load_freelist: if (unlikely(SlabDebug(c->page))) goto debug; - c->freelist = object[c->offset]; + set_freelist(c, object[c->offset]); c->page->inuse = c->page->objects; c->page->freelist = NULL; c->node = page_to_nid(c->page); @@ -1571,13 +1585,13 @@ static __always_inline void *slab_alloc( local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->freelist || !node_match(c, node))) + if (unlikely(!get_freelist(c) || !node_match(c, node))) object = __slab_alloc(s, gfpflags, node, addr, c); else { - object = c->freelist; - c->freelist = object[c->offset]; + object = get_freelist(c); + set_freelist(c, object[c->offset]); stat(c, ALLOC_FASTPATH); } local_irq_restore(flags); @@ -1691,8 +1705,8 @@ static __always_inline void slab_free(st c = get_cpu_slab(s, smp_processor_id()); debug_check_no_locks_freed(object, c->objsize); if (likely(page == c->page && c->node >= 0)) { - object[c->offset] = c->freelist; - c->freelist = object; + object[c->offset] = get_freelist(c); + set_freelist(c, object); stat(c, FREE_FASTPATH); } else __slab_free(s, page, x, addr, c->offset); @@ -1875,7 +1889,7 @@ static void init_kmem_cache_cpu(struct k struct kmem_cache_cpu *c) { c->page = NULL; - c->freelist = NULL; + set_freelist(c, NULL); c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize;