SLUB: Provide unique end marker for each slab Currently we use the NULL pointer to signal that there are no more objects. However the NULL pointers of all slabs match in contrast to the pointers to the real objects which are distinctive for each slab page. Change the end pointer to be simply a pointer to the first object with bit 0 set. That way all end_pointers are different for each slab. Signed-off-by: Christoph Lameter --- mm/slub.c | 39 +++++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-19 15:08:51.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-19 15:11:13.000000000 -0700 @@ -276,12 +276,26 @@ static inline struct kmem_cache_cpu *get #endif } +/* + * The last pointer in a slab is special. It points to the first object in the + * slab but has bit 0 set to mark it as the last one. + */ +static inline void **make_end(void *addr) +{ + return (void *)addr + 1; +} + +static inline int is_end(const void *p) +{ + return (unsigned long)p & 1; +} + static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) { void *base; - if (!object) + if (is_end(object)) return 1; base = page_address(page); @@ -317,7 +331,7 @@ static inline void set_freepointer(struc /* Scan freelist */ #define for_each_free_object(__p, __s, __free) \ - for (__p = (__free); __p; __p = get_freepointer((__s), __p)) + for (__p = (__free); !is_end(__p); __p = get_freepointer((__s), __p)) /* Determine object index from a given position */ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) @@ -714,7 +728,7 @@ static int check_object(struct kmem_cach * of the free objects in this slab. May cause * another error because the object count is now wrong. */ - set_freepointer(s, p, NULL); + set_freepointer(s, p, make_end(page_address(page))); return 0; } return 1; @@ -748,14 +762,15 @@ static int on_freelist(struct kmem_cache void *fp = page->freelist; void *object = NULL; - while (fp && nr <= s->objects) { + while (!is_end(fp) && nr <= s->objects) { if (fp == search) return 1; if (!check_valid_pointer(s, page, fp)) { if (object) { object_err(s, page, object, "Freechain corrupt"); - set_freepointer(s, object, NULL); + set_freepointer(s, object, + make_end(page_address(page))); break; } else { slab_err(s, page, "Freepointer corrupt"); @@ -1112,7 +1127,7 @@ static struct page *new_slab(struct kmem last = p; } setup_object(s, page, last); - set_freepointer(s, last, NULL); + set_freepointer(s, last, make_end(start)); page->freelist = start; page->inuse = 0; @@ -1343,7 +1358,7 @@ static void unfreeze_slab(struct kmem_ca ClearSlabFrozen(page); if (page->inuse) { - if (page->freelist) + if (!is_end(page->freelist)) add_partial(n, page); else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) add_full(n, page); @@ -1379,7 +1394,7 @@ static void deactivate_slab(struct kmem_ * because both freelists are empty. So this is unlikely * to occur. */ - while (unlikely(c->freelist)) { + while (unlikely(!is_end(c->freelist))) { void **object; /* Retrieve object from cpu_freelist */ @@ -1477,7 +1492,7 @@ static void *__slab_alloc(struct kmem_ca goto another_slab; load_freelist: object = c->page->freelist; - if (unlikely(!object)) + if (unlikely(is_end(object))) goto another_slab; if (unlikely(SlabDebug(c->page))) goto debug; @@ -1569,7 +1584,7 @@ static void __always_inline *slab_alloc( local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->freelist || !node_match(c, node))) { + if (unlikely(is_end(c->freelist) || !node_match(c, node))) { object = __slab_alloc(s, gfpflags, node, addr, c); if (unlikely(!object)) { @@ -1636,7 +1651,7 @@ checks_ok: * was not on the partial list before * then add it. */ - if (unlikely(!prior)) + if (unlikely(is_end(prior))) add_partial(get_node(s, page_to_nid(page)), page); out_unlock: @@ -1644,7 +1659,7 @@ out_unlock: return; slab_empty: - if (prior) + if (!is_end(prior)) /* * Slab still on the partial list. */