--- mm/slub.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-12-21 14:51:03.242407195 -0800 +++ linux-2.6/mm/slub.c 2007-12-21 15:06:52.145123303 -0800 @@ -1581,6 +1581,15 @@ void *kmem_cache_alloc_node(struct kmem_ EXPORT_SYMBOL(kmem_cache_alloc_node); #endif +static void rcu_free_object(struct rcu_head *h) +{ + struct page *page = virt_to_head_page(h); + struct kmem_cache *s = page->slab; + void ** object = (void *)h - s->offset; + + kfree(object); +} + /* * Slow patch handling. This may still be called frequently since objects * have a longer lifetime than the cpu slabs in most processing loads. @@ -1595,7 +1604,13 @@ static void __slab_free(struct kmem_cach void *prior; void **object = (void *)x; - slab_lock(page); + if (unlikely(!slab_trylock(page))) { + /* Free via RCU since the slab is contended */ + struct rcu_head *h = (void *)(object + offset); + + call_rcu(h, rcu_free_object); + return; + } if (unlikely(SlabDebug(page))) goto debug; @@ -2142,7 +2157,7 @@ static int calculate_sizes(struct kmem_c * destructor or are poisoning the objects. */ s->offset = size; - size += sizeof(void *); + size += sizeof(struct rcu_head); } #ifdef CONFIG_SLUB_DEBUG