slab: put alien pointer array into the kmem_list3 structure. The alien array pointer arrayis always allocated when the kmem_list3 structure is allocated. It is easiest if we just put them together. That saves us the logic to do a special allocation for the alien cache array and makes it similar to the kmem_cache structure that contains arrays of per cpu caches. Plus we do not have to check if the alien pointer is NULL anymore. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc4/mm/slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/slab.c 2006-08-08 16:18:33.629005636 -0700 +++ linux-2.6.18-rc4/mm/slab.c 2006-08-08 16:31:05.730000310 -0700 @@ -301,9 +301,9 @@ struct kmem_list3 { unsigned int colour_next; /* Per-node cache coloring */ spinlock_t list_lock; struct array_cache *shared; /* shared per node */ - struct array_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ + struct array_cache *alien[MAX_NUMNODES]; }; /* @@ -961,46 +961,33 @@ static int transfer_objects(struct array static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); -static void alloc_alien_cache(struct kmem_list3 *l3, int node) +static void free_alien_cache(struct kmem_list3 *l3) { - struct array_cache **ac_ptr; - int memsize = sizeof(void *) * MAX_NUMNODES; int i; - ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); - if (ac_ptr) { - for_each_node(i) { - if (i == node || !node_online(i)) { - ac_ptr[i] = NULL; - continue; - } - ac_ptr[i] = alloc_arraycache(node, ALIEN_LIMIT, 0xbaadf00d); - if (!ac_ptr[i]) { - for (i--; i <= 0; i--) - kfree(ac_ptr[i]); - kfree(ac_ptr); - goto fail; - } - } - l3->alien = ac_ptr; - return; + for_each_node(i) { + kfree(l3->alien[i]); + l3->alien[i] = NULL; } -fail: - printk(KERN_ERR "slab: alien cache alloc failed" - " continuing without alien cache.\n"); - l3->alien = NULL; } -static void free_alien_cache(struct kmem_list3 *l3) +static void alloc_alien_cache(struct kmem_list3 *l3, int node) { int i; - if (!l3->alien) - return; - for_each_node(i) - kfree(l3->alien[i]); - kfree(l3->alien); - l3->alien = NULL; + for_each_node(i) { + if (i == node || !node_online(i)) { + l3->alien[i] = NULL; + continue; + } + l3->alien[i] = alloc_arraycache(node, ALIEN_LIMIT, 0xbaadf00d); + if (!l3->alien[i]) { + free_alien_cache(l3); + printk(KERN_ERR "slab: alien cache alloc failed" + " continuing without alien cache.\n"); + return; + } + } } static void __drain_alien_cache(struct kmem_cache *cachep, @@ -1030,14 +1017,11 @@ static void __drain_alien_cache(struct k static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) { int node = __get_cpu_var(reap_node); + struct array_cache *ac = l3->alien[node]; - if (l3->alien) { - struct array_cache *ac = l3->alien[node]; - - if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { - __drain_alien_cache(cachep, ac, node); - spin_unlock_irq(&ac->lock); - } + if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { + __drain_alien_cache(cachep, ac, node); + spin_unlock_irq(&ac->lock); } } @@ -1048,9 +1032,6 @@ static void drain_alien_cache(struct kme struct array_cache *ac; unsigned long flags; - if (l3->alien) - return; - for_each_online_node(i) { ac = l3->alien[i]; if (ac) { @@ -1077,7 +1058,7 @@ static inline int cache_free_alien(struc l3 = cachep->nodelists[numa_node_id()]; STATS_INC_NODEFREES(cachep); - if (l3->alien && l3->alien[nodeid]) { + if (l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) { @@ -1101,7 +1082,6 @@ static inline int cache_free_alien(struc static inline void alloc_alien_cache(struct kmem_list3 *l3, int node) { - l3->alien[node]= (struct array_cache **) 0x01020304ul; } static inline void free_alien_cache(struct kmem_list3 *l3) @@ -1219,7 +1199,6 @@ static int __cpuinit cpuup_callback(stru list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; cpumask_t mask; mask = node_to_cpumask(node); @@ -1250,16 +1229,11 @@ static int __cpuinit cpuup_callback(stru l3->shared = NULL; } - alien = l3->alien; - l3->alien = NULL; - spin_unlock_irq(&l3->list_lock); kfree(shared); - if (alien) { - drain_alien_cache(cachep, l3); - free_alien_cache(l3); - } + drain_alien_cache(cachep, l3); + free_alien_cache(l3); free_array_cache: kfree(nc); }