--- mm/slub.c | 53 +++++------------------------------------------------ 1 file changed, 5 insertions(+), 48 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-04-02 18:59:06.933756608 -0700 +++ linux-2.6/mm/slub.c 2008-04-02 18:59:30.937505873 -0700 @@ -2309,7 +2309,7 @@ static int kmem_cache_open(struct kmem_c goto error; s->refcount = 1; - s->defrag_ratio = 30; + s->defrag_ratio = 20; #ifdef CONFIG_NUMA s->remote_node_defrag_ratio = 100; #endif @@ -2927,50 +2927,6 @@ static unsigned long __kmem_cache_shrink return freed; } -static unsigned long __kmem_cache_defrag(struct kmem_cache *s, int node) -{ - unsigned long guestimate_objects_in_full_slabs; - unsigned long ratio; - struct kmem_cache_node *n = get_node(s, node); -#ifdef CONFIG_SLUB_DEBUG - unsigned long nr_slabs = atomic_long_read(&n->nr_slabs); - unsigned long total = atomic_long_read(&n->total_objects); -#else - unsigned long nr_slabs = 2 * n->nr_partial; - unsigned long total = nr_slabs * oo_objects(s->oo); -#endif - /* - * An insignificant number of partial slabs means that the - * slab cache does not need any defragmentation. - */ - if (n->nr_partial <= MAX_PARTIAL) - return 0; - - guestimate_objects_in_full_slabs = (nr_slabs - n->nr_partial) - * oo_objects(s->oo); - /* - * Worst case calculation: If we would be over the ratio - * even if all partial slabs would only have one object - * then we can skip the next test that requires a scan - * through all the partial page structs to sum up the actual - * number of objects in the partial slabs. - */ - ratio = (guestimate_objects_in_full_slabs + 1 * n->nr_partial) * 100 - / total; - if (ratio > s->defrag_ratio) - return 0; - - /* - * Now for the real calculation. If usage ratio is more than required - * then no defragmentation is necessary. - */ - ratio = (total - count_partial(n, count_free)) * 100 / total; - if (ratio > s->defrag_ratio) - return 0; - - return __kmem_cache_shrink(s, n); -} - /* * Defrag slabs conditional on the amount of fragmentation on each node. */ @@ -2993,9 +2949,10 @@ int kmem_cache_defrag(int node) int nid; for_each_node_state(nid, N_NORMAL_MEMORY) - slabs += __kmem_cache_defrag(s, nid); + slabs += __kmem_cache_shrink(s, nid, + MAX_PARTIAL); } else - slabs += __kmem_cache_defrag(s, node); + slabs += __kmem_cache_shrink(s, node, MAX_PARTIAL); } up_read(&slub_lock); return slabs; @@ -3013,7 +2970,7 @@ int kmem_cache_shrink(struct kmem_cache flush_all(s); for_each_node_state(node, N_NORMAL_MEMORY) - __kmem_cache_shrink(s, get_node(s, node)); + __kmem_cache_shrink(s, node, 0); return 0; }