--- include/linux/slub_def.h | 1 - mm/slub.c | 35 ++++++++++++++--------------------- 2 files changed, 14 insertions(+), 22 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2007-10-30 16:00:01.000000000 -0700 +++ linux-2.6/include/linux/slub_def.h 2007-10-30 16:00:11.000000000 -0700 @@ -15,7 +15,6 @@ struct kmem_cache_cpu { void **freelist; struct page *page; int node; - unsigned int offset; }; struct kmem_cache_node { Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-30 16:00:08.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-30 16:00:11.000000000 -0700 @@ -282,13 +282,6 @@ static inline int check_valid_pointer(st return 1; } -/* - * Slow version of get and set free pointer. - * - * This version requires touching the cache lines of kmem_cache which - * we avoid to do in the fast alloc free paths. There we obtain the offset - * from the page struct. - */ static inline void *get_freepointer(struct kmem_cache *s, void *object) { return *(void **)(object + s->offset); @@ -1446,10 +1439,10 @@ static void deactivate_slab(struct kmem_ /* Retrieve object from cpu_freelist */ object = c->freelist; - c->freelist = c->freelist[c->offset]; + c->freelist = get_freepointer(s, c->freelist); /* And put onto the regular freelist */ - object[c->offset] = page->freelist; + set_freepointer(s, object, page->freelist); page->freelist = object; page->inuse--; } @@ -1606,7 +1599,7 @@ load_freelist: goto debug; object = c->page->freelist; - c->freelist = object[c->offset]; + c->freelist = get_freepointer(s, object); c->page->inuse = s->objects; c->page->freelist = c->page->end; c->node = page_to_nid(c->page); @@ -1635,7 +1628,7 @@ debug: goto another_slab; c->page->inuse++; - c->page->freelist = object[c->offset]; + c->page->freelist = get_freepointer(s, object); c->node = -1; goto unlock_out; } @@ -1668,8 +1661,8 @@ static void __always_inline *slab_alloc( } break; } - } while (cmpxchg_local(&c->freelist, object, object[c->offset]) - != object); + } while (cmpxchg_local(&c->freelist, object, + get_freepointer(s, object)) != object); put_cpu(); #else unsigned long flags; @@ -1685,7 +1678,7 @@ static void __always_inline *slab_alloc( } } else { object = c->freelist; - c->freelist = object[c->offset]; + c->freelist = get_freepointer(s, object); } local_irq_restore(flags); #endif @@ -1719,7 +1712,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * handling required then we can return immediately. */ static void __slab_free(struct kmem_cache *s, struct page *page, - void *x, void *addr, unsigned int offset) + void *x, void *addr) { void *prior; void **object = (void *)x; @@ -1735,7 +1728,8 @@ static void __slab_free(struct kmem_cach if (unlikely(state & SLABDEBUG)) goto debug; checks_ok: - prior = object[offset] = page->freelist; + prior = page->freelist; + set_freepointer(s, object, prior); page->freelist = object; page->inuse--; @@ -1817,10 +1811,10 @@ static void __always_inline slab_free(st * since the freelist pointers are unique per slab. */ if (unlikely(page != c->page || c->node < 0)) { - __slab_free(s, page, x, addr, c->offset); + __slab_free(s, page, x, addr); break; } - object[c->offset] = freelist; + set_freepointer(s, object, freelist); } while (cmpxchg_local(&c->freelist, freelist, object) != freelist); put_cpu(); #else @@ -1830,10 +1824,10 @@ static void __always_inline slab_free(st debug_check_no_locks_freed(object, s->objsize); c = get_cpu_slab(s, smp_processor_id()); if (likely(page == c->page && c->node >= 0)) { - object[c->offset] = c->freelist; + set_freepointer(s, object, c->freelist); c->freelist = object; } else - __slab_free(s, page, x, addr, c->offset); + __slab_free(s, page, x, addr); local_irq_restore(flags); #endif @@ -2015,7 +2009,6 @@ static void init_kmem_cache_cpu(struct k c->page = NULL; c->freelist = (void *)PAGE_MAPPING_ANON; c->node = 0; - c->offset = s->offset / sizeof(void *); } static void init_kmem_cache_node(struct kmem_cache_node *n)