slab: extract cache_free function A simple direct free of a slab object to a slab without the use of any caches is needed in several places. Extract the common code. Allow cpu caches to not exist. In that case we go directly to the slab lists bypassing all caches. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc4/mm/slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/slab.c 2006-08-08 18:46:07.668706369 -0700 +++ linux-2.6.18-rc4/mm/slab.c 2006-08-08 18:46:37.135634927 -0700 @@ -957,8 +957,19 @@ static int transfer_objects(struct array return nr; } -#ifdef CONFIG_NUMA +/* + * Simple slow and direct free of a single object to the l3 lists of + * the indicated node. + */ +static void cache_free(struct kmem_cache *cachep, void *objp, int node) +{ + spin_lock(&(cachep->nodelists[node])->list_lock); + free_block(cachep, &objp, 1, node); + spin_unlock(&(cachep->nodelists[node])->list_lock); +} + static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); +#ifdef CONFIG_NUMA static void *alternate_node_alloc(struct kmem_cache *, gfp_t); static void free_alien_cache(struct kmem_list3 *l3) @@ -1067,11 +1078,8 @@ static inline int cache_free_alien(struc } alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); - } else { - spin_lock(&(cachep->nodelists[nodeid])->list_lock); - free_block(cachep, &objp, 1, nodeid); - spin_unlock(&(cachep->nodelists[nodeid])->list_lock); - } + } else + cache_free(cachep, objp, nodeid); return 1; } @@ -2265,6 +2273,8 @@ static void do_drain(void *arg) check_irq_off(); ac = cpu_cache_get(cachep); + if (!ac) + return; spin_lock(&cachep->nodelists[node]->list_lock); free_block(cachep, ac->entry, ac->avail, node); spin_unlock(&cachep->nodelists[node]->list_lock); @@ -2834,6 +2844,9 @@ static void *cache_alloc_refill(struct k check_irq_off(); ac = cpu_cache_get(cachep); + if (!ac) + return __cache_alloc_node(cachep, flags, numa_node_id()); + batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* @@ -2968,6 +2981,9 @@ static inline void *____cache_alloc(stru check_irq_off(); ac = cpu_cache_get(cachep); + if (unlikely(!ac)) + return __cache_alloc_node(cachep, flags, numa_node_id()); + if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); ac->touched = 1; @@ -3018,6 +3034,7 @@ static void *alternate_node_alloc(struct return __cache_alloc_node(cachep, flags, nid_alloc); return NULL; } +#endif /* * A interface to enable slab creation on nodeid @@ -3057,7 +3074,6 @@ static void *__cache_alloc_node(struct k spin_unlock(&l3->list_lock); return obj; } -#endif /* * Caller needs to acquire correct kmem_list's list_lock @@ -3169,6 +3185,10 @@ static inline void __cache_free(struct k { struct array_cache *ac = cpu_cache_get(cachep); + if (unlikely(!ac)) { + cache_free(cachep, objp, numa_node_id()); + return; + } check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));