Variable Page Cache Size: Fix up reclaim counters We can now reclaim larger pages. Adjust the VM counters to deal with it. Signed-off-by: Christoph Lameter --- mm/vmscan.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) Index: linux-2.6.21-rc7-mm1/mm/vmscan.c =================================================================== --- linux-2.6.21-rc7-mm1.orig/mm/vmscan.c 2007-04-24 13:19:13.000000000 -0700 +++ linux-2.6.21-rc7-mm1/mm/vmscan.c 2007-04-25 08:06:30.000000000 -0700 @@ -458,7 +458,7 @@ static unsigned long shrink_page_list(st VM_BUG_ON(PageActive(page)); - sc->nr_scanned++; + sc->nr_scanned += compound_pages(page); /* * MADV_DONTNEED pages get reclaimed lazily, unless the @@ -483,7 +483,7 @@ static unsigned long shrink_page_list(st /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) - sc->nr_scanned++; + sc->nr_scanned += compound_pages(page); if (PageWriteback(page)) goto keep_locked; @@ -587,7 +587,7 @@ static unsigned long shrink_page_list(st free_it: unlock_page(page); - nr_reclaimed++; + nr_reclaimed += compound_pages(page); if (!pagevec_add(&freed_pvec, page)) __pagevec_release_nonlru(&freed_pvec); continue; @@ -671,7 +671,7 @@ static unsigned long isolate_lru_pages(u unsigned long nr_taken = 0; unsigned long scan; - for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { + for (scan = 0; scan < nr_to_scan && !list_empty(src); ) { struct page *page; unsigned long pfn; unsigned long end_pfn; @@ -686,7 +686,7 @@ static unsigned long isolate_lru_pages(u switch (__isolate_lru_page(page, active)) { case 0: list_move(&page->lru, dst); - nr_taken++; + nr_taken += compound_pages(page); break; case -EBUSY: @@ -732,7 +732,7 @@ static unsigned long isolate_lru_pages(u switch (__isolate_lru_page(cursor_page, active)) { case 0: list_move(&cursor_page->lru, dst); - nr_taken++; + nr_taken += compound_pages(cursor_page); scan++; break; @@ -743,6 +743,7 @@ static unsigned long isolate_lru_pages(u break; } } + scan += compound_pages(page); } *scanned = scan; @@ -978,7 +979,7 @@ force_reclaim_mapped: ClearPageActive(page); list_move(&page->lru, &zone->inactive_list); - pgmoved++; + pgmoved += compound_pages(page); if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); spin_unlock_irq(&zone->lru_lock); @@ -1006,7 +1007,7 @@ force_reclaim_mapped: SetPageLRU(page); VM_BUG_ON(!PageActive(page)); list_move(&page->lru, &zone->active_list); - pgmoved++; + pgmoved += compound_pages(page); if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); pgmoved = 0;