--- include/linux/slub_def.h | 3 ++- mm/slub.c | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-06 21:16:41.553735936 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-06 21:17:54.440557180 -0800 @@ -15,8 +15,9 @@ struct kmem_cache_cpu { unsigned long freemap; /* Bitmap of free objects */ struct page *page; /* The slab from which we are allocating */ void *address; /* Page address */ - unsigned int size; /* Slab size */ int node; /* The node of the page (or -1 for debug) */ + unsigned int size; /* Slab size */ + u32 recip_val; /* Reciprocal division value */ unsigned int objsize; /* Size of an object (from kmem_cache) */ }; Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-06 21:17:57.312509680 -0800 +++ linux-2.6/mm/slub.c 2008-02-06 22:03:49.287617682 -0800 @@ -21,6 +21,7 @@ #include #include #include +#include /* * Lock order: @@ -1626,12 +1627,11 @@ static __always_inline void slab_free(st unsigned long flags; struct kmem_cache_cpu *c; int index; - unsigned long offset; + unsigned long offset = object - page->address; local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - offset = object - page->address; - index = offset / c->size; + index = reciprocal_divide(offset, c->recip_val); // debug_check_no_locks_freed(x, s->objsize); if (likely(page == c->page && c->node >= 0)) __set_bit(index, &c->freemap); @@ -1819,6 +1819,7 @@ static void init_kmem_cache_cpu(struct k c->node = 0; c->objsize = s->objsize; c->size = s->size; + c->recip_val = reciprocal_value(s->size); } static void init_kmem_cache_node(struct kmem_cache_node *n)