diff --git a/mm/slub.c b/mm/slub.c index 39ebe57..2f44a75 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2727,13 +2727,13 @@ void kmem_cache_setup_defrag(struct kmem_cache *s, s->kick = kick; down_write(&slub_lock); list_move(&s->list, &slab_caches); - if (s->objects > max_defrag_slab_objects) - max_defrag_slab_objects = s->objects; + if (s->max_objects > max_defrag_slab_objects) + max_defrag_slab_objects = s->max_objects; up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_setup_defrag); -static unsigned long count_partial(struct kmem_cache_node *n) +static unsigned long count_partial_inuse(struct kmem_cache_node *n) { unsigned long flags; unsigned long x = 0; @@ -2789,7 +2789,7 @@ static int kmem_cache_vacate(struct page *page, void *scratch) BUG_ON(!SlabFrozen(page)); s = page->slab; - objects = s->objects; + objects = slab_objects(s, page); map = scratch + max_defrag_slab_objects * sizeof(void **); if (!page->inuse || !s->kick || !SlabKickable(page)) goto out; @@ -2801,7 +2801,7 @@ static int kmem_cache_vacate(struct page *page, void *scratch) count = 0; memset(vector, 0, objects * sizeof(void **)); - for_each_object(p, s, addr) + for_each_object(p, s, addr, objects) if (test_bit(slab_index(p, s, addr), map)) vector[count++] = p; @@ -2877,7 +2877,7 @@ static unsigned long __kmem_cache_shrink(struct kmem_cache *s, spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(page, page2, &n->partial, lru) { - if (page->inuse > s->objects / 4 || + if (page->inuse > slab_objects(s, page) / 4 || (s->kick && !SlabKickable(page))) continue; if (!slab_trylock(page)) @@ -2926,10 +2926,10 @@ static unsigned long __kmem_cache_defrag(struct kmem_cache *s, int node) if (n->nr_partial <= MAX_PARTIAL) return 0; - capacity = atomic_long_read(&n->nr_slabs) * s->objects; + capacity = atomic_long_read(&n->nr_slabs) * s->max_objects; objects_in_full_slabs = (atomic_long_read(&n->nr_slabs) - n->nr_partial) - * s->objects; + * s->max_objects; /* * Worst case calculation: If we would be over the ratio * even if all partial slabs would only have one object @@ -2945,7 +2945,8 @@ static unsigned long __kmem_cache_defrag(struct kmem_cache *s, int node) * Now for the real calculation. If usage ratio is more than required * then no defragmentation is necessary. */ - ratio = (objects_in_full_slabs + count_partial(n)) * 100 / capacity; + ratio = (objects_in_full_slabs + + count_partial_inuse(n)) * 100 / capacity; if (ratio > s->defrag_ratio) return 0;