Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-20 12:28:28.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-20 12:33:53.000000000 -0700 @@ -1590,6 +1590,7 @@ c->page->inuse++; c->page->freelist = object[c->offset]; + c->freelist = end(NULL); c->node = -1; goto unlock_out; } @@ -1683,6 +1684,9 @@ local_irq_save(flags); #endif + if (!page) + page = virt_to_head_page(addr); + slab_lock(page); if (unlikely(SlabDebug(page))) @@ -1733,6 +1737,10 @@ goto checks_ok; } +static inline int same_page(void *p, void *q) +{ + return ((unsigned long)p ^ (unsigned long)q) < PAGE_SIZE; +} /* * Fastpath with forced inlining to produce a kfree and kmem_cache_free that * can perform fastpath freeing without additional function calls. @@ -1769,7 +1777,7 @@ * then any change of cpu_slab will cause the cmpxchg to fail * since the freelist pointers are unique per slab. */ - if (unlikely(page != c->page || c->node < 0)) { + if (unlikely(!same_page(freelist, object))) { __slab_free(s, page, x, addr, c->offset); break; } @@ -1782,7 +1790,7 @@ local_irq_save(flags); debug_check_no_locks_freed(object, s->objsize); c = get_cpu_slab(s, smp_processor_id()); - if (likely(page == c->page && c->node >= 0)) { + if (likely(!same_page(freelist, object))) { object[c->offset] = c->freelist; c->freelist = object; } else @@ -1794,11 +1802,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) { - struct page *page; - - page = virt_to_head_page(x); - - slab_free(s, page, x, __builtin_return_address(0)); + slab_free(s, NULL, x, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_free);