--- mm/slub.c | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-26 18:02:14.376668605 -0700 +++ linux-2.6/mm/slub.c 2008-03-26 18:06:44.207121229 -0700 @@ -2731,6 +2731,8 @@ void kmem_cache_setup_defrag(struct kmem void *(*get)(struct kmem_cache *, int nr, void **), void (*kick)(struct kmem_cache *, int nr, void **, void *private)) { + int max_objects = oo_objects(s->max); + /* * Defragmentable slabs must have a ctor otherwise objects may be * in an undetermined state after they are allocated. @@ -2740,8 +2742,8 @@ void kmem_cache_setup_defrag(struct kmem s->kick = kick; down_write(&slub_lock); list_move(&s->list, &slab_caches); - if (s->max_objects > max_defrag_slab_objects) - max_defrag_slab_objects = s->max_objects; + if (max_objects > max_defrag_slab_objects) + max_defrag_slab_objects = max_objects; up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_setup_defrag); @@ -2803,7 +2805,7 @@ static int kmem_cache_vacate(struct page BUG_ON(!SlabFrozen(page)); s = page->slab; - objects = slab_objects(s, page); + objects = page->objects; map = scratch + max_defrag_slab_objects * sizeof(void **); if (!page->inuse || !s->kick) goto out; @@ -2889,7 +2891,7 @@ static unsigned long __kmem_cache_shrink spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(page, page2, &n->partial, lru) { - if (page->inuse > slab_objects(s, page) / 4) + if (page->inuse > page->objects / 4) continue; if (!slab_trylock(page)) continue; @@ -2929,7 +2931,13 @@ static unsigned long __kmem_cache_defrag unsigned long guestimate_objects_in_full_slabs; unsigned long ratio; struct kmem_cache_node *n = get_node(s, node); - +#ifdef CONFIG_SLUB_DEBUG + unsigned long nr_slabs = atomic_long_read(&n->nr_slabs); + unsigned long total = atomic_long_read(&n->total_objects); +#else + unsigned long nr_slabs = 2 * n->nr_partial; + unsigned long total = nr_slabs * oo_objects(s->oo); +#endif /* * An insignificant number of partial slabs means that the * slab cache does not need any defragmentation. @@ -2937,9 +2945,8 @@ static unsigned long __kmem_cache_defrag if (n->nr_partial <= MAX_PARTIAL) return 0; - guestimate_objects_in_full_slabs = - (atomic_long_read(&n->nr_slabs) - n->nr_partial) - * s->max_objects; + guestimate_objects_in_full_slabs = (nr_slabs - n->nr_partial) + * oo_objects(s->oo); /* * Worst case calculation: If we would be over the ratio * even if all partial slabs would only have one object @@ -2948,7 +2955,7 @@ static unsigned long __kmem_cache_defrag * number of objects in the partial slabs. */ ratio = (guestimate_objects_in_full_slabs + 1 * n->nr_partial) * 100 - / atomic_long_read(&n->total_objects); + / total; if (ratio > s->defrag_ratio) return 0; @@ -2956,13 +2963,11 @@ static unsigned long __kmem_cache_defrag * Now for the real calculation. If usage ratio is more than required * then no defragmentation is necessary. */ - ratio = (atomic_long_read(&n->total_objects) - - count_partial(n, count_free)) * 100 - / atomic_long_read(&n->total_objects); + ratio = (total - count_partial(n, count_free)) * 100 / total; if (ratio > s->defrag_ratio) return 0; - return __kmem_cache_shrink(s, n) << s->order; + return __kmem_cache_shrink(s, n); } /* @@ -2971,7 +2976,7 @@ static unsigned long __kmem_cache_defrag int kmem_cache_defrag(int node) { struct kmem_cache *s; - unsigned long pages = 0; + unsigned long slabs = 0; /* * kmem_cache_defrag may be called from the reclaim path which may be @@ -2987,12 +2992,12 @@ int kmem_cache_defrag(int node) int nid; for_each_node_state(nid, N_NORMAL_MEMORY) - pages += __kmem_cache_defrag(s, nid); + slabs += __kmem_cache_defrag(s, nid); } else - pages += __kmem_cache_defrag(s, node); + slabs += __kmem_cache_defrag(s, node); } up_read(&slub_lock); - return pages; + return slabs; } EXPORT_SYMBOL(kmem_cache_defrag);