Index: linux-2.6.18-rc1/mm/slab.c =================================================================== --- linux-2.6.18-rc1.orig/mm/slab.c 2006-07-11 22:27:03.604860712 -0700 +++ linux-2.6.18-rc1/mm/slab.c 2006-07-11 22:54:58.913368309 -0700 @@ -295,9 +295,9 @@ struct kmem_list3 { unsigned int colour_next; /* Per-node cache coloring */ spinlock_t list_lock; struct array_cache *shared; /* shared per node */ - struct array_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ + struct array_cache *alien[MAX_NUMNODES]; }; /* @@ -351,7 +351,7 @@ static void kmem_list3_init(struct kmem_ INIT_LIST_HEAD(&parent->slabs_partial); INIT_LIST_HEAD(&parent->slabs_free); parent->shared = NULL; - parent->alien = NULL; + memset(parent->alien, 0, sizeof(parent->alien)); parent->colour_next = 0; spin_lock_init(&parent->list_lock); parent->free_objects = 0; @@ -928,42 +928,14 @@ static int transfer_objects(struct array static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); -static struct array_cache **alloc_alien_cache(int node, int limit) +static void free_alien_cache(struct kmem_list3 *l3) { - struct array_cache **ac_ptr; - int memsize = sizeof(void *) * MAX_NUMNODES; int i; - if (limit > 1) - limit = 12; - ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); - if (ac_ptr) { - for_each_node(i) { - if (i == node || !node_online(i)) { - ac_ptr[i] = NULL; - continue; - } - ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); - if (!ac_ptr[i]) { - for (i--; i <= 0; i--) - kfree(ac_ptr[i]); - kfree(ac_ptr); - return NULL; - } - } + for_each_node(i) { + kfree(l3->alien[i]); + l3->alien[i] = NULL; } - return ac_ptr; -} - -static void free_alien_cache(struct array_cache **ac_ptr) -{ - int i; - - if (!ac_ptr) - return; - for_each_node(i) - kfree(ac_ptr[i]); - kfree(ac_ptr); } static void __drain_alien_cache(struct kmem_cache *cachep, @@ -993,26 +965,22 @@ static void __drain_alien_cache(struct k static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) { int node = __get_cpu_var(reap_node); + struct array_cache *ac = l3->alien[node]; - if (l3->alien) { - struct array_cache *ac = l3->alien[node]; - - if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { - __drain_alien_cache(cachep, ac, node); - spin_unlock_irq(&ac->lock); - } + if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { + __drain_alien_cache(cachep, ac, node); + spin_unlock_irq(&ac->lock); } } -static void drain_alien_cache(struct kmem_cache *cachep, - struct array_cache **alien) +static void drain_alien_cache(struct kmem_cache *cache, struct kmem_list3 *l3) { int i = 0; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { - ac = alien[i]; + ac = l3->alien[i]; if (ac) { spin_lock_irqsave(&ac->lock, flags); __drain_alien_cache(cachep, ac, i); @@ -1039,7 +1007,15 @@ static inline int cache_free_alien(struc l3 = cachep->nodelists[node]; STATS_INC_NODEFREES(cachep); - if (l3->alien && l3->alien[nodeid]) { + + if (!l3->alien[nodeid]) + /* + * WARNING: We are allocating with GFP_KERNEL! + * We need to support different allocation flags! + */ + l3->alien[nodeid] = alloc_arraycache(node, 12, 0xbaadf00d); + + if (l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock_nested(&alien->lock, nesting); if (unlikely(alien->avail == alien->limit)) { @@ -1061,12 +1037,7 @@ static inline int cache_free_alien(struc #define drain_alien_cache(cachep, alien) do { } while (0) #define reap_alien(cachep, l3) do { } while (0) -static inline struct array_cache **alloc_alien_cache(int node, int limit) -{ - return (struct array_cache **) 0x01020304ul; -} - -static inline void free_alien_cache(struct array_cache **ac_ptr) +static inline void free_alien_cache(struct kmem_list3 *l3) { } @@ -1133,7 +1104,6 @@ static int __devinit cpuup_callback(stru list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount); @@ -1145,9 +1115,6 @@ static int __devinit cpuup_callback(stru if (!shared) goto bad; - alien = alloc_alien_cache(node, cachep->limit); - if (!alien) - goto bad; cachep->array[cpu] = nc; l3 = cachep->nodelists[node]; BUG_ON(!l3); @@ -1161,15 +1128,8 @@ static int __devinit cpuup_callback(stru l3->shared = shared; shared = NULL; } -#ifdef CONFIG_NUMA - if (!l3->alien) { - l3->alien = alien; - alien = NULL; - } -#endif spin_unlock_irq(&l3->list_lock); kfree(shared); - free_alien_cache(alien); } mutex_unlock(&cache_chain_mutex); break; @@ -1192,7 +1152,6 @@ static int __devinit cpuup_callback(stru list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; cpumask_t mask; mask = node_to_cpumask(node); @@ -1223,16 +1182,11 @@ static int __devinit cpuup_callback(stru l3->shared = NULL; } - alien = l3->alien; - l3->alien = NULL; - + drain_alien_cache(l3); + free_alien_cache(l3); spin_unlock_irq(&l3->list_lock); kfree(shared); - if (alien) { - drain_alien_cache(cachep, alien); - free_alien_cache(alien); - } free_array_cache: kfree(nc); } @@ -2264,8 +2218,8 @@ static void drain_cpu_caches(struct kmem check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; - if (l3 && l3->alien) - drain_alien_cache(cachep, l3->alien); + if (l3) + drain_alien_cache(l3); } for_each_online_node(node) { @@ -2407,7 +2361,7 @@ int kmem_cache_destroy(struct kmem_cache l3 = cachep->nodelists[i]; if (l3) { kfree(l3->shared); - free_alien_cache(l3->alien); + free_alien_cache(l3); kfree(l3); } } @@ -3491,28 +3445,21 @@ const char *kmem_cache_name(struct kmem_ EXPORT_SYMBOL_GPL(kmem_cache_name); /* - * This initializes kmem_list3 or resizes varioius caches for all nodes. + * This initializes kmem_list3 or resizes various caches for all nodes. */ static int alloc_kmemlist(struct kmem_cache *cachep) { int node; struct kmem_list3 *l3; struct array_cache *new_shared; - struct array_cache **new_alien; for_each_online_node(node) { - new_alien = alloc_alien_cache(node, cachep->limit); - if (!new_alien) - goto fail; - new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, 0xbaadf00d); - if (!new_shared) { - free_alien_cache(new_alien); + if (!new_shared) goto fail; - } l3 = cachep->nodelists[node]; if (l3) { @@ -3525,20 +3472,14 @@ static int alloc_kmemlist(struct kmem_ca shared->avail, node); l3->shared = new_shared; - if (!l3->alien) { - l3->alien = new_alien; - new_alien = NULL; - } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; spin_unlock_irq(&l3->list_lock); kfree(shared); - free_alien_cache(new_alien); continue; } l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); if (!l3) { - free_alien_cache(new_alien); kfree(new_shared); goto fail; } @@ -3547,7 +3488,6 @@ static int alloc_kmemlist(struct kmem_ca l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; l3->shared = new_shared; - l3->alien = new_alien; l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; cachep->nodelists[node] = l3; @@ -3563,7 +3503,6 @@ fail: l3 = cachep->nodelists[node]; kfree(l3->shared); - free_alien_cache(l3->alien); kfree(l3); cachep->nodelists[node] = NULL; }