--- mm/slub.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 16 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-18 12:30:03.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-18 12:31:31.000000000 -0700 @@ -1440,6 +1440,42 @@ static void unfreeze_slab(struct kmem_ca slab_unlock(page); } +/* Free an object in a slab and return the old freelist pointer */ +static inline void **free_object(struct page *page, void **object, int offset) +{ + void **prior; + + prior = page->freelist; + object[offset] = prior; + page->freelist = object; + return prior; +} + +/** + * Allocate object from a slab. + */ +static inline void **alloc_object(struct page *page, int offset) +{ + void **object; + + object = page->freelist; + if (object) + page->freelist = object[offset]; + return object; +} + +/* + * Get all objects from a slab + */ +static inline void **get_freelist(struct page *page) +{ + void **list; + + list = page->freelist; + page->freelist = NULL; + return list; +} + /* * Remove the cpu slab */ @@ -1461,9 +1497,7 @@ static void deactivate_slab(struct kmem_ object = c->freelist; c->freelist = c->freelist[c->offset]; - /* And put onto the regular freelist */ - object[c->offset] = page->freelist; - page->freelist = object; + free_object(page, object, c->offset); } c->page = NULL; unfreeze_slab(s, page, tail); @@ -1550,15 +1584,14 @@ static void *__slab_alloc(struct kmem_ca if (unlikely(!node_match(c, node))) goto another_slab; load_freelist: - object = c->page->freelist; - if (unlikely(!object)) - goto another_slab; if (unlikely(SlabDebug(c->page))) goto debug; - object = c->page->freelist; + object = get_freelist(c->page); + if (unlikely(!object)) + goto another_slab; + c->freelist = object[c->offset]; - c->page->freelist = NULL; c->node = page_to_nid(c->page); slab_unlock(c->page); return object; @@ -1610,11 +1643,12 @@ new_slab: } return NULL; debug: - object = c->page->freelist; - if (!alloc_debug_processing(s, c->page, object, addr)) + if (!c->page->freelist) + goto another_slab; + if (!alloc_debug_processing(s, c->page, c->page->freelist, addr)) goto another_slab; - c->page->freelist = object[c->offset]; + object = alloc_object(c->page, c->offset); c->node = -1; slab_unlock(c->page); return object; @@ -1688,8 +1722,7 @@ static void __slab_free(struct kmem_cach if (unlikely(SlabDebug(page))) goto debug; checks_ok: - prior = object[offset] = page->freelist; - page->freelist = object; + prior = free_object(page, object, offset); /* * The slab is now guaranteed to have free objects. @@ -2082,9 +2115,7 @@ static struct kmem_cache_node *early_kme "in order to be able to continue\n"); } - n = page->freelist; - BUG_ON(!n); - page->freelist = get_freepointer(kmalloc_caches, n); + n = (struct kmem_cache_node *)alloc_object(page, 0); kmalloc_caches->node[node] = n; #ifdef CONFIG_SLUB_DEBUG init_object(kmalloc_caches, n, 1);