Index: linux-2.6.16-rc4-mm2/mm/slab.c =================================================================== --- linux-2.6.16-rc4-mm2.orig/mm/slab.c 2006-03-01 18:32:11.000000000 -0800 +++ linux-2.6.16-rc4-mm2/mm/slab.c 2006-03-01 18:35:15.000000000 -0800 @@ -3043,7 +3043,6 @@ static inline void __cache_free(struct k { struct array_cache *ac = cpu_cache_get(cachep); - check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); /* Make sure we are not freeing a object from another @@ -3058,36 +3057,38 @@ static inline void __cache_free(struct k int nodeid = slabp->nodeid; struct kmem_list3 *l3; + preempt_disable(); l3 = cachep->nodelists[numa_node_id()]; STATS_INC_NODEFREES(cachep); if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; - spin_lock(&alien->lock); + spin_lock_irqsave(&alien->lock, flags); if (unlikely(alien->avail == alien->limit)) __drain_alien_cache(cachep, alien, nodeid); alien->entry[alien->avail++] = objp; - spin_unlock(&alien->lock); + spin_unlock_irqrestore(&alien->lock, flags); } else { - spin_lock(&(cachep->nodelists[nodeid])-> - list_lock); + spin_lock_irqsave(&(cachep->nodelists[nodeid])-> + list_lock, flags); free_block(cachep, &objp, 1, nodeid); - spin_unlock(&(cachep->nodelists[nodeid])-> + spin_unlock_irqrestore(&(cachep->nodelists[nodeid])-> list_lock); } + preempt_enable(); return; } } #endif - if (likely(ac->avail < ac->limit)) { + local_irq_save(flags); + if (likely(ac->avail < ac->limit)) STATS_INC_FREEHIT(cachep); - ac->entry[ac->avail++] = objp; - return; - } else { + else { STATS_INC_FREEMISS(cachep); cache_flusharray(cachep, ac); - ac->entry[ac->avail++] = objp; } + ac->entry[ac->avail++] = objp; + local_irq_restore(flags); } /** @@ -3310,9 +3311,7 @@ void kmem_cache_free(struct kmem_cache * { unsigned long flags; - local_irq_save(flags); __cache_free(cachep, objp); - local_irq_restore(flags); } EXPORT_SYMBOL(kmem_cache_free);