From 1694080e59c40ad78ae7d9dccb26c1926e9f2453 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 25 Jul 2007 20:26:59 -0700 Subject: [PATCH] Large blocksize support: Fix up reclaim counters We now have to reclaim compound pages of arbitrary order. Adjust the counting in vmscan.c to could the number of base pages. Also change the active and inactive accounting to do the same. Signed-off-by: Christoph Lameter --- include/linux/mm_inline.h | 36 +++++++++++++++++++++++++++--------- mm/vmscan.c | 22 ++++++++++++---------- 2 files changed, 39 insertions(+), 19 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 895bc4e..5bf34aa 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -2,39 +2,57 @@ static inline void add_page_to_active_list(struct zone *zone, struct page *page) { list_add(&page->lru, &zone->active_list); - __inc_zone_state(zone, NR_ACTIVE); + if (!PageHead(page)) + __inc_zone_state(zone, NR_ACTIVE); + else + __inc_zone_page_state(page, NR_ACTIVE); } static inline void add_page_to_inactive_list(struct zone *zone, struct page *page) { list_add(&page->lru, &zone->inactive_list); - __inc_zone_state(zone, NR_INACTIVE); + if (!PageHead(page)) + __inc_zone_state(zone, NR_INACTIVE); + else + __inc_zone_page_state(page, NR_INACTIVE); } static inline void del_page_from_active_list(struct zone *zone, struct page *page) { list_del(&page->lru); - __dec_zone_state(zone, NR_ACTIVE); + if (!PageHead(page)) + __dec_zone_state(zone, NR_ACTIVE); + else + __dec_zone_page_state(page, NR_ACTIVE); } static inline void del_page_from_inactive_list(struct zone *zone, struct page *page) { list_del(&page->lru); - __dec_zone_state(zone, NR_INACTIVE); + if (!PageHead(page)) + __dec_zone_state(zone, NR_INACTIVE); + else + __dec_zone_page_state(page, NR_INACTIVE); } static inline void del_page_from_lru(struct zone *zone, struct page *page) { + enum zone_stat_item counter = NR_ACTIVE; + list_del(&page->lru); - if (PageActive(page)) { + if (PageActive(page)) __ClearPageActive(page); - __dec_zone_state(zone, NR_ACTIVE); - } else { - __dec_zone_state(zone, NR_INACTIVE); - } + else + counter = NR_INACTIVE; + + if (!PageHead(page)) + __dec_zone_state(zone, counter); + else + __dec_zone_page_state(page, counter); } + diff --git a/mm/vmscan.c b/mm/vmscan.c index d419e10..eb5ec4b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -449,14 +449,14 @@ static unsigned long shrink_page_list(struct list_head *page_list, VM_BUG_ON(PageActive(page)); - sc->nr_scanned++; + sc->nr_scanned += compound_pages(page); if (!sc->may_swap && page_mapped(page)) goto keep_locked; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) - sc->nr_scanned++; + sc->nr_scanned += compound_pages(page); if (PageWriteback(page)) goto keep_locked; @@ -560,7 +560,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, free_it: unlock_page(page); - nr_reclaimed++; + nr_reclaimed += compound_pages(page); if (!pagevec_add(&freed_pvec, page)) __pagevec_release_nonlru(&freed_pvec); continue; @@ -652,22 +652,23 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long nr_taken = 0; unsigned long scan; - for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { + for (scan = 0; scan < nr_to_scan && !list_empty(src); ) { struct page *page; unsigned long pfn; unsigned long end_pfn; unsigned long page_pfn; + int pages; int zone_id; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); - + pages = compound_pages(page); VM_BUG_ON(!PageLRU(page)); switch (__isolate_lru_page(page, mode)) { case 0: list_move(&page->lru, dst); - nr_taken++; + nr_taken += pages; break; case -EBUSY: @@ -713,8 +714,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, switch (__isolate_lru_page(cursor_page, mode)) { case 0: list_move(&cursor_page->lru, dst); - nr_taken++; - scan++; + nr_taken += compound_pages(cursor_page); + scan += compound_pages(cursor_page); break; case -EBUSY: @@ -724,6 +725,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, break; } } + scan += pages; } *scanned = scan; @@ -957,7 +959,7 @@ force_reclaim_mapped: ClearPageActive(page); list_move(&page->lru, &zone->inactive_list); - pgmoved++; + pgmoved += compound_pages(page); if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); spin_unlock_irq(&zone->lru_lock); @@ -985,7 +987,7 @@ force_reclaim_mapped: SetPageLRU(page); VM_BUG_ON(!PageActive(page)); list_move(&page->lru, &zone->active_list); - pgmoved++; + pgmoved += compound_pages(page); if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); pgmoved = 0; -- 1.4.4.4