Index: linux-2.6.18-rc1/mm/slab.c =================================================================== --- linux-2.6.18-rc1.orig/mm/slab.c 2006-07-11 23:02:44.201977426 -0700 +++ linux-2.6.18-rc1/mm/slab.c 2006-07-11 23:20:36.970565363 -0700 @@ -310,8 +310,7 @@ struct kmem_list3 __initdata initkmem_li #define SIZE_AC 1 #define SIZE_L3 (1 + MAX_NUMNODES) -static int drain_freelist(struct kmem_cache *cache, - struct kmem_list3 *l3, int tofree); +static int drain_freelist(struct kmem_list3 *l3, int tofree); static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); static void enable_cpucache(struct kmem_cache *cachep); @@ -991,7 +990,7 @@ static void __drain_alien_cache(struct k /* * Called from cache_reap() to regularly drain alien caches round robin. */ -static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) +static void reap_alien(struct kmem_list3 *l3) { int node = __get_cpu_var(reap_node); @@ -999,7 +998,7 @@ static void reap_alien(struct kmem_cache struct array_cache *ac = l3->alien[node]; if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { - __drain_alien_cache(cachep, ac, node); + __drain_alien_cache(l3->cache, ac, node); spin_unlock_irq(&ac->lock); } } @@ -1266,24 +1265,22 @@ static struct notifier_block __cpuinitda /* * swap the static kmem_list3 with kmalloced memory */ -static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, - int nodeid) +static void copy_list(struct kmem_list3 *l3, int nodeid) { - struct kmem_list3 *ptr; + struct kmem_list3 *l3_new; - BUG_ON(cachep->nodelists[nodeid] != list); - ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); - BUG_ON(!ptr); + l3_new = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); + BUG_ON(!l3_new); local_irq_disable(); - memcpy(ptr, list, sizeof(struct kmem_list3)); + memcpy(l3_new, l3, sizeof(struct kmem_list3)); /* * Do not assume that spinlocks can be initialized via memcpy: */ - spin_lock_init(&ptr->list_lock); + spin_lock_init(&l3_new->list_lock); - MAKE_ALL_LISTS(cachep, ptr, nodeid); - cachep->nodelists[nodeid] = ptr; + MAKE_ALL_LISTS(l3->cache, l3_new, nodeid); + l3->cache->nodelists[nodeid] = l3_new; local_irq_enable(); } @@ -1444,16 +1441,13 @@ void __init kmem_cache_init(void) { int node; /* Replace the static kmem_list3 structures for the boot cpu */ - init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], - numa_node_id()); + copy_list(&initkmem_list3[CACHE_CACHE], numa_node_id()); for_each_online_node(node) { - init_list(malloc_sizes[INDEX_AC].cs_cachep, - &initkmem_list3[SIZE_AC + node], node); + copy_list(&initkmem_list3[SIZE_AC + node], node); if (INDEX_AC != INDEX_L3) { - init_list(malloc_sizes[INDEX_L3].cs_cachep, - &initkmem_list3[SIZE_L3 + node], + copy_list(&initkmem_list3[SIZE_L3 + node], node); } } @@ -1811,10 +1805,13 @@ static void set_up_list3s(struct kmem_ca int node; for_each_online_node(node) { - cachep->nodelists[node] = &initkmem_list3[index + node]; - cachep->nodelists[node]->next_reap = jiffies + - REAPTIMEOUT_LIST3 + + struct kmem_list3 *l3 = &initkmem_list3[index + node]; + + l3->cache = cachep; + l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; + cachep->nodelists[node] = l3; + } } @@ -2240,7 +2237,7 @@ static void check_spinlock_acquired_node #define check_spinlock_acquired_node(x, y) do { } while(0) #endif -static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, +static void drain_array(struct kmem_list3 *l3, struct array_cache *ac, int force, int node); @@ -2274,7 +2271,7 @@ static void drain_cpu_caches(struct kmem for_each_online_node(node) { l3 = cachep->nodelists[node]; if (l3) - drain_array(cachep, l3, l3->shared, 1, node); + drain_array(l3, l3->shared, 1, node); } } @@ -2284,8 +2281,7 @@ static void drain_cpu_caches(struct kmem * * Returns the actual number of slabs released. */ -static int drain_freelist(struct kmem_cache *cache, - struct kmem_list3 *l3, int tofree) +static int drain_freelist(struct kmem_list3 *l3, int tofree) { struct list_head *p; int nr_freed; @@ -2310,9 +2306,9 @@ static int drain_freelist(struct kmem_ca * Safe to drop the lock. The slab is no longer linked * to the cache. */ - l3->free_objects -= cache->num; + l3->free_objects -= l3->cache->num; spin_unlock_irq(&l3->list_lock); - slab_destroy(cache, slabp); + slab_destroy(l3->cache, slabp); nr_freed++; } out: @@ -2332,7 +2328,7 @@ static int __cache_shrink(struct kmem_ca if (!l3) continue; - drain_freelist(cachep, l3, l3->free_objects); + drain_freelist(l3, l3->free_objects); ret += !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); @@ -3699,7 +3695,7 @@ static void enable_cpucache(struct kmem_ * necessary. Note that the l3 listlock also protects the array_cache * if drain_array() is used on the shared array. */ -void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, +void drain_array(struct kmem_list3 *l3, struct array_cache *ac, int force, int node) { int tofree; @@ -3714,7 +3710,7 @@ void drain_array(struct kmem_cache *cach tofree = force ? ac->avail : (ac->limit + 4) / 5; if (tofree > ac->avail) tofree = (ac->avail + 1) / 2; - free_block(cachep, ac->entry, tofree, node); + free_block(l3->cache, ac->entry, tofree, node); ac->avail -= tofree; memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); @@ -3758,9 +3754,9 @@ static void cache_reap(void *unused) */ l3 = searchp->nodelists[node]; - reap_alien(searchp, l3); + reap_alien(l3); - drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); + drain_array(l3, cpu_cache_get(searchp), 0, node); /* * These are racy checks but it does not matter @@ -3771,14 +3767,14 @@ static void cache_reap(void *unused) l3->next_reap = jiffies + REAPTIMEOUT_LIST3; - drain_array(searchp, l3, l3->shared, 0, node); + drain_array(l3, l3->shared, 0, node); if (l3->free_touched) l3->free_touched = 0; else { int freed; - freed = drain_freelist(searchp, l3, (l3->free_limit + + freed = drain_freelist(l3, (l3->free_limit + 5 * searchp->num - 1) / (5 * searchp->num)); STATS_ADD_REAPED(searchp, freed); }