From 683d0baad3d6e18134927f8c28ee804dbe10fe71 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 7 Jan 2008 23:20:29 -0800 Subject: [PATCH] SLUB: Use non NULL end pointer. We use a NULL pointer on freelists to signal that there are no more objects. However the NULL pointers of all slabs match in contrast to the pointers to the real objects which are in different ranges for different slab pages. Change the end pointer to be a pointer to the first object and set bit 0. Every slab will then have a different end pointer. This is necessary to ensure that end markers can be matched to the source slab during cmpxchg_local. Bring back the use of the mapping field by SLUB since we would otherwise have to call a relatively expensive function page_address() in __slab_alloc(). Use of the mapping field allows avoiding a call to page_address() in various other functions as well. There is no need to change the page_mapping() function since bit 0 is set on the mapping as also for anonymous pages. page_mapping(slab_page) will therefore still return NULL although the mapping field is overloaded. Signed-off-by: Christoph Lameter Cc: Pekka Enberg Signed-off-by: Andrew Morton --- mm/slub.c | 49 ++++++++++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 19 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-06 23:40:16.147321764 -0800 +++ linux-2.6/mm/slub.c 2008-03-07 00:18:15.229079641 -0800 @@ -273,6 +273,13 @@ static inline struct kmem_cache_cpu *get #endif } +#define END NULL + +static inline int is_end(const void *x) +{ + return x == NULL; +} + /* Determine the maximum number of objects that a slab page can hold */ static inline unsigned long slab_objects(struct kmem_cache *s, struct page *page) { @@ -287,7 +294,7 @@ static inline int check_valid_pointer(st { void *base; - if (!object) + if (is_end(object)) return 1; base = page_address(page); @@ -324,7 +331,8 @@ static inline void set_freepointer(struc /* Scan freelist */ #define for_each_free_object(__p, __s, __free) \ - for (__p = (__free); __p; __p = get_freepointer((__s), __p)) + for (__p = (__free); !is_end(__p); __p = get_freepointer((__s),\ + __p)) /* Determine object index from a given position */ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) @@ -722,7 +730,7 @@ static int check_object(struct kmem_cach * of the free objects in this slab. May cause * another error because the object count is now wrong. */ - set_freepointer(s, p, NULL); + set_freepointer(s, p, END); return 0; } return 1; @@ -757,18 +765,18 @@ static int on_freelist(struct kmem_cache void *object = NULL; int objects = slab_objects(s, page); - while (fp && nr <= objects) { + while (!is_end(fp) && nr <= objects) { if (fp == search) return 1; if (!check_valid_pointer(s, page, fp)) { if (object) { object_err(s, page, object, "Freechain corrupt"); - set_freepointer(s, object, NULL); + set_freepointer(s, object, END); break; } else { slab_err(s, page, "Freepointer corrupt"); - page->freelist = NULL; + page->freelist = END; page->inuse = objects; slab_fix(s, "Freelist cleared"); return 0; @@ -874,7 +882,7 @@ bad: */ slab_fix(s, "Marking all objects used"); page->inuse = slab_objects(s, page); - page->freelist = NULL; + page->freelist = END; } return 0; } @@ -914,7 +922,7 @@ static int free_debug_processing(struct } /* Special debug activities for freeing objects */ - if (!SlabFrozen(page) && !page->freelist) + if (!SlabFrozen(page) && is_end(page->freelist)) remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_FREE, addr); @@ -1120,7 +1128,7 @@ static struct page *new_slab(struct kmem last = p; } setup_object(s, page, last); - set_freepointer(s, last, NULL); + set_freepointer(s, last, END); page->freelist = start; page->inuse = 0; @@ -1355,7 +1363,7 @@ static void unfreeze_slab(struct kmem_ca ClearSlabFrozen(page); if (page->inuse) { - if (page->freelist) { + if (!is_end(page->freelist)) { add_partial(n, page, tail); stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); } else { @@ -1393,6 +1401,7 @@ static void unfreeze_slab(struct kmem_ca static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { struct page *page = c->page; + void **freelist; int tail = 1; if (page->freelist) @@ -1402,14 +1411,15 @@ static void deactivate_slab(struct kmem_ * because both freelists are empty. So this is unlikely * to occur. */ - while (unlikely(c->freelist)) { + freelist = c->freelist; + while (unlikely(freelist)) { void **object; tail = 0; /* Hot objects. Put the slab first */ /* Retrieve object from cpu_freelist */ - object = c->freelist; - c->freelist = c->freelist[c->offset]; + object = freelist; + freelist = object[c->offset]; /* And put onto the regular freelist */ object[c->offset] = page->freelist; @@ -1417,6 +1427,7 @@ static void deactivate_slab(struct kmem_ page->inuse--; } c->page = NULL; + c->freelist = NULL; unfreeze_slab(s, page, tail); } @@ -1508,14 +1519,14 @@ static void *__slab_alloc(struct kmem_ca load_freelist: object = c->page->freelist; - if (unlikely(!object)) + if (unlikely(is_end(object))) goto another_slab; if (unlikely(SlabDebug(c->page))) goto debug; c->freelist = object[c->offset]; c->page->inuse = slab_objects(s, c->page); - c->page->freelist = NULL; + c->page->freelist = END; c->node = page_to_nid(c->page); unlock_out: slab_unlock(c->page); @@ -1582,7 +1593,7 @@ static __always_inline void *slab_alloc( local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->freelist || !node_match(c, node))) + if (unlikely(is_end(c->freelist)) || !node_match(c, node)) object = __slab_alloc(s, gfpflags, node, addr, c); @@ -1652,7 +1663,7 @@ checks_ok: * Objects left in the slab. If it was not on the partial list before * then add it. */ - if (unlikely(!prior)) { + if (unlikely(is_end(prior))) { add_partial(get_node(s, page_to_nid(page)), page, 1); stat(c, FREE_ADD_PARTIAL); } @@ -1662,7 +1673,7 @@ out_unlock: return; slab_empty: - if (prior) { + if (!is_end(prior)) { /* * Slab still on the partial list. */ @@ -1886,7 +1897,7 @@ static void init_kmem_cache_cpu(struct k struct kmem_cache_cpu *c) { c->page = NULL; - c->freelist = NULL; + c->freelist = END; c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize;