slab: remove alien cache pointer arguments. Simplify the slab code by removing the alien cache arguments. We can pass a l3 pointer instead. Since alien caches are fixed we will never have to reallocate them during the resizing of other slab caches. Simplify the reallocation logic. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc4/mm/slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/slab.c 2006-08-08 15:41:58.128883482 -0700 +++ linux-2.6.18-rc4/mm/slab.c 2006-08-08 16:09:16.418314747 -0700 @@ -965,7 +965,7 @@ static int transfer_objects(struct array static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); -static struct array_cache **alloc_alien_cache(int node) +static void alloc_alien_cache(struct kmem_list3 *l3, int node) { struct array_cache **ac_ptr; int memsize = sizeof(void *) * MAX_NUMNODES; @@ -983,22 +983,28 @@ static struct array_cache **alloc_alien_ for (i--; i <= 0; i--) kfree(ac_ptr[i]); kfree(ac_ptr); - return NULL; + goto fail; } } + l3->alien = ac_ptr; + return; } - return ac_ptr; +fail: + printk(KERN_ERR "slab: alien cache alloc failed" + " continuing without alien cache.\n"); + l3->alien = NULL; } -static void free_alien_cache(struct array_cache **ac_ptr) +static void free_alien_cache(struct kmem_list3 *l3) { int i; - if (!ac_ptr) + if (!l3->alien) return; for_each_node(i) - kfree(ac_ptr[i]); - kfree(ac_ptr); + kfree(l3->alien[i]); + kfree(l3->alien); + l3->alien = NULL; } static void __drain_alien_cache(struct kmem_cache *cachep, @@ -1046,6 +1052,9 @@ static void drain_alien_cache(struct kme struct array_cache *ac; unsigned long flags; + if (alien) + return; + for_each_online_node(i) { ac = alien[i]; if (ac) { @@ -1094,12 +1103,12 @@ static inline int cache_free_alien(struc #define drain_alien_cache(cachep, alien) do { } while (0) #define reap_alien(cachep, l3) do { } while (0) -static inline struct array_cache **alloc_alien_cache(int node) +static inline void alloc_alien_cache(struct kmem_list3 *l3, int node) { - return (struct array_cache **) 0x01020304ul; + l3->alien[node]= (struct array_cache **) 0x01020304ul; } -static inline void free_alien_cache(struct array_cache **ac_ptr) +static inline void free_alien_cache(struct kmem_list3 *l3) { } @@ -1165,7 +1174,6 @@ static int __cpuinit cpuup_callback(stru list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount); @@ -1177,12 +1185,10 @@ static int __cpuinit cpuup_callback(stru if (!shared) goto bad; - alien = alloc_alien_cache(node); - if (!alien) - goto bad; cachep->array[cpu] = nc; l3 = cachep->nodelists[node]; BUG_ON(!l3); + alloc_alien_cache(l3, node); spin_lock_irq(&l3->list_lock); if (!l3->shared) { @@ -1193,15 +1199,8 @@ static int __cpuinit cpuup_callback(stru l3->shared = shared; shared = NULL; } -#ifdef CONFIG_NUMA - if (!l3->alien) { - l3->alien = alien; - alien = NULL; - } -#endif spin_unlock_irq(&l3->list_lock); kfree(shared); - free_alien_cache(alien); } mutex_unlock(&cache_chain_mutex); break; @@ -1263,7 +1262,7 @@ static int __cpuinit cpuup_callback(stru kfree(shared); if (alien) { drain_alien_cache(cachep, alien); - free_alien_cache(alien); + free_alien_cache(l3); } free_array_cache: kfree(nc); @@ -1852,7 +1851,7 @@ static void __kmem_cache_destroy(struct l3 = cachep->nodelists[i]; if (l3) { kfree(l3->shared); - free_alien_cache(l3->alien); + free_alien_cache(l3); kfree(l3); } } @@ -2311,7 +2310,7 @@ static void drain_cpu_caches(struct kmem check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; - if (l3 && l3->alien) + if (l3) drain_alien_cache(cachep, l3->alien); } @@ -3531,21 +3530,14 @@ static int alloc_kmemlist(struct kmem_ca int node; struct kmem_list3 *l3; struct array_cache *new_shared; - struct array_cache **new_alien; for_each_online_node(node) { - new_alien = alloc_alien_cache(node); - if (!new_alien) - goto fail; - new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, 0xbaadf00d); - if (!new_shared) { - free_alien_cache(new_alien); + if (!new_shared) goto fail; - } l3 = cachep->nodelists[node]; if (l3) { @@ -3558,20 +3550,15 @@ static int alloc_kmemlist(struct kmem_ca shared->avail, node); l3->shared = new_shared; - if (!l3->alien) { - l3->alien = new_alien; - new_alien = NULL; - } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; spin_unlock_irq(&l3->list_lock); + free_alien_cache(l3); kfree(shared); - free_alien_cache(new_alien); continue; } l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); if (!l3) { - free_alien_cache(new_alien); kfree(new_shared); goto fail; } @@ -3580,10 +3567,11 @@ static int alloc_kmemlist(struct kmem_ca l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; l3->shared = new_shared; - l3->alien = new_alien; l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; cachep->nodelists[node] = l3; + alloc_alien_cache(l3, node); + } return 0; @@ -3596,7 +3584,7 @@ fail: l3 = cachep->nodelists[node]; kfree(l3->shared); - free_alien_cache(l3->alien); + free_alien_cache(l3); kfree(l3); cachep->nodelists[node] = NULL; }