--- mm/slub.c | 52 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 20 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-14 02:31:56.000000000 -0700 +++ linux-2.6/mm/slub.c 2008-03-14 02:33:07.000000000 -0700 @@ -293,6 +293,13 @@ static inline struct kmem_cache_cpu *get #endif } +#define END NULL + +static inline int is_end(const void *freelist) +{ + return freelist == NULL; +} + /* Determine the maximum number of objects that a slab page can hold */ static inline unsigned long slab_objects(struct kmem_cache *s, struct page *page) { @@ -307,7 +314,7 @@ static inline int check_valid_pointer(st { void *base; - if (!object) + if (is_end(object)) return 1; base = page_address(page); @@ -344,7 +351,8 @@ static inline void set_freepointer(struc /* Scan freelist */ #define for_each_free_object(__p, __s, __free) \ - for (__p = (__free); __p; __p = get_freepointer((__s), __p)) + for (__p = (__free); !is_end(__p); __p = get_freepointer((__s),\ + __p)) /* Determine object index from a given position */ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) @@ -742,7 +750,7 @@ static int check_object(struct kmem_cach * of the free objects in this slab. May cause * another error because the object count is now wrong. */ - set_freepointer(s, p, NULL); + set_freepointer(s, p, END); return 0; } return 1; @@ -777,18 +785,18 @@ static int on_freelist(struct kmem_cache void *object = NULL; int objects = slab_objects(s, page); - while (fp && nr <= objects) { + while (!is_end(fp) && nr <= objects) { if (fp == search) return 1; if (!check_valid_pointer(s, page, fp)) { if (object) { object_err(s, page, object, "Freechain corrupt"); - set_freepointer(s, object, NULL); + set_freepointer(s, object, END); break; } else { slab_err(s, page, "Freepointer corrupt"); - page->freelist = NULL; + page->freelist = END; page->inuse = objects; slab_fix(s, "Freelist cleared"); return 0; @@ -894,7 +902,7 @@ bad: */ slab_fix(s, "Marking all objects used"); page->inuse = slab_objects(s, page); - page->freelist = NULL; + page->freelist = END; } return 0; } @@ -934,7 +942,7 @@ static int free_debug_processing(struct } /* Special debug activities for freeing objects */ - if (!SlabFrozen(page) && !page->freelist) + if (!SlabFrozen(page) && is_end(page->freelist)) remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_FREE, addr); @@ -1131,7 +1139,7 @@ static struct page *new_slab(struct kmem last = p; } setup_object(s, page, last); - set_freepointer(s, last, NULL); + set_freepointer(s, last, END); page->freelist = start; page->inuse = 0; @@ -1363,7 +1371,7 @@ static void unfreeze_slab(struct kmem_ca ClearSlabFrozen(page); if (page->inuse) { - if (page->freelist) { + if (!is_end(page->freelist)) { add_partial(n, page, tail); stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); } else { @@ -1402,28 +1410,32 @@ static void deactivate_slab(struct kmem_ { struct page *page = c->page; int tail = 1; + void **freelist; - if (page->freelist) + if (!is_end(page->freelist)) stat(c, DEACTIVATE_REMOTE_FREES); /* * Merge cpu freelist into slab freelist. Typically we get here * because both freelists are empty. So this is unlikely * to occur. */ - while (unlikely(c->freelist)) { + freelist = c->freelist; + while (unlikely(!is_end(freelist))) { void **object; tail = 0; /* Hot objects. Put the slab first */ /* Retrieve object from cpu_freelist */ - object = c->freelist; - c->freelist = c->freelist[c->offset]; + object = freelist; + freelist = object[c->offset]; /* And put onto the regular freelist */ object[c->offset] = page->freelist; page->freelist = object; page->inuse--; } + if (!tail) + c->freelist = END; c->page = NULL; unfreeze_slab(s, page, tail); } @@ -1516,14 +1528,14 @@ static void *__slab_alloc(struct kmem_ca load_freelist: object = c->page->freelist; - if (unlikely(!object)) + if (unlikely(is_end(object))) goto another_slab; if (unlikely(SlabDebug(c->page))) goto debug; c->freelist = object[c->offset]; c->page->inuse = slab_objects(s, c->page); - c->page->freelist = NULL; + c->page->freelist = END; c->node = page_to_nid(c->page); unlock_out: slab_unlock(c->page); @@ -1589,7 +1601,7 @@ static __always_inline void *slab_alloc( local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->freelist || !node_match(c, node))) + if (unlikely(is_end(c->freelist) || !node_match(c, node))) object = __slab_alloc(s, gfpflags, node, addr, c); @@ -1659,7 +1671,7 @@ checks_ok: * Objects left in the slab. If it was not on the partial list before * then add it. */ - if (unlikely(!prior)) { + if (unlikely(is_end(prior))) { add_partial(get_node(s, page_to_nid(page)), page, 1); stat(c, FREE_ADD_PARTIAL); } @@ -1669,7 +1681,7 @@ out_unlock: return; slab_empty: - if (prior) { + if (!is_end(prior)) { /* * Slab still on the partial list. */ @@ -1893,7 +1905,7 @@ static void init_kmem_cache_cpu(struct k struct kmem_cache_cpu *c) { c->page = NULL; - c->freelist = NULL; + c->freelist = END; c->node = 0; c->offset = s->offset / sizeof(void *); c->objsize = s->objsize;