From: Andrew Morton Change all the vmscan functions to retunr the number-of-reclaimed pages and remove scan_conrtol.nr_reclaimed. Saves ten-odd bytes of text and makes things clearer and more consistent. The patch also changes the behaviour of zone_reclaim() when it falls back to slab shrinking. Christoph says "Setting this to one means that we will rescan and shrink the slab for each allocation if we are out of zone memory and RECLAIM_SLAB is set. Plus if we do an order 0 allocation we do not go off node as intended. "We better set this to zero. This means the allocation will go offnode despite us having potentially freed lots of memory on the zone. Future allocations can then again be done from this zone." Cc: Nick Piggin Cc: Christoph Lameter Signed-off-by: Andrew Morton --- mm/vmscan.c | 77 ++++++++++++++++++++++++-------------------------- 1 files changed, 38 insertions(+), 39 deletions(-) diff -puN mm/vmscan.c~vmscan-return-nr_reclaimed mm/vmscan.c --- devel/mm/vmscan.c~vmscan-return-nr_reclaimed 2006-02-27 20:57:54.000000000 -0800 +++ devel-akpm/mm/vmscan.c 2006-02-27 20:57:54.000000000 -0800 @@ -55,9 +55,6 @@ struct scan_control { /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; - /* Incremented by the number of pages reclaimed */ - unsigned long nr_reclaimed; - unsigned long nr_mapped; /* From page_state */ /* This context's GFP mask */ @@ -409,7 +406,7 @@ cannot_free: } /* - * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed + * shrink_list return the number of reclaimed pages */ static unsigned long shrink_list(struct list_head *page_list, struct scan_control *sc) @@ -417,7 +414,7 @@ static unsigned long shrink_list(struct LIST_HEAD(ret_pages); struct pagevec freed_pvec; int pgactivate = 0; - unsigned long reclaimed = 0; + unsigned long nr_reclaimed = 0; cond_resched(); @@ -557,7 +554,7 @@ static unsigned long shrink_list(struct free_it: unlock_page(page); - reclaimed++; + nr_reclaimed++; if (!pagevec_add(&freed_pvec, page)) __pagevec_release_nonlru(&freed_pvec); continue; @@ -575,8 +572,7 @@ keep: if (pagevec_count(&freed_pvec)) __pagevec_release_nonlru(&freed_pvec); mod_page_state(pgactivate, pgactivate); - sc->nr_reclaimed += reclaimed; - return reclaimed; + return nr_reclaimed; } #ifdef CONFIG_MIGRATION @@ -1101,14 +1097,15 @@ static unsigned long isolate_lru_pages(u } /* - * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed + * shrink_cache() return the number of reclaimed pages */ -static void shrink_cache(unsigned long max_scan, struct zone *zone, - struct scan_control *sc) +static unsigned long shrink_cache(unsigned long max_scan, struct zone *zone, + struct scan_control *sc) { LIST_HEAD(page_list); struct pagevec pvec; unsigned long nr_scanned = 0; + unsigned long nr_reclaimed = 0; pagevec_init(&pvec, 1); @@ -1132,7 +1129,7 @@ static void shrink_cache(unsigned long m nr_scanned += nr_scan; nr_freed = shrink_list(&page_list, sc); - + nr_reclaimed += nr_freed; local_irq_disable(); if (current_is_kswapd()) { __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); @@ -1164,6 +1161,7 @@ static void shrink_cache(unsigned long m spin_unlock_irq(&zone->lru_lock); done: pagevec_release(&pvec); + return nr_reclaimed; } /* @@ -1323,12 +1321,13 @@ refill_inactive_zone(unsigned long nr_pa /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ -static void shrink_zone(int priority, struct zone *zone, - struct scan_control *sc) +static unsigned long shrink_zone(int priority, struct zone *zone, + struct scan_control *sc) { unsigned long nr_active; unsigned long nr_inactive; unsigned long nr_to_scan; + unsigned long nr_reclaimed = 0; atomic_inc(&zone->reclaim_in_progress); @@ -1362,13 +1361,14 @@ static void shrink_zone(int priority, st nr_to_scan = min(nr_inactive, (unsigned long)sc->swap_cluster_max); nr_inactive -= nr_to_scan; - shrink_cache(nr_to_scan, zone, sc); + nr_reclaimed += shrink_cache(nr_to_scan, zone, sc); } } throttle_vm_writeout(); atomic_dec(&zone->reclaim_in_progress); + return nr_reclaimed; } /* @@ -1387,9 +1387,10 @@ static void shrink_zone(int priority, st * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static void shrink_caches(int priority, struct zone **zones, - struct scan_control *sc) +static unsigned long shrink_caches(int priority, struct zone **zones, + struct scan_control *sc) { + unsigned long nr_reclaimed = 0; int i; for (i = 0; zones[i] != NULL; i++) { @@ -1408,8 +1409,9 @@ static void shrink_caches(int priority, if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ - shrink_zone(priority, zone, sc); + nr_reclaimed += shrink_zone(priority, zone, sc); } + return nr_reclaimed; } /* @@ -1430,7 +1432,7 @@ unsigned long try_to_free_pages(struct z int priority; int ret = 0; unsigned long total_scanned = 0; - unsigned long total_reclaimed = 0; + unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; int i; @@ -1456,18 +1458,16 @@ unsigned long try_to_free_pages(struct z for (priority = DEF_PRIORITY; priority >= 0; priority--) { sc.nr_mapped = read_page_state(nr_mapped); sc.nr_scanned = 0; - sc.nr_reclaimed = 0; if (!priority) disable_swap_token(); - shrink_caches(priority, zones, &sc); + nr_reclaimed += shrink_caches(priority, zones, &sc); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { - sc.nr_reclaimed += reclaim_state->reclaimed_slab; + nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } total_scanned += sc.nr_scanned; - total_reclaimed += sc.nr_reclaimed; - if (total_reclaimed >= sc.swap_cluster_max) { + if (nr_reclaimed >= sc.swap_cluster_max) { ret = 1; goto out; } @@ -1534,7 +1534,7 @@ static unsigned long balance_pgdat(pg_da int priority; int i; unsigned long total_scanned; - unsigned long total_reclaimed; + unsigned long nr_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc = { .gfp_mask = GFP_KERNEL, @@ -1544,7 +1544,7 @@ static unsigned long balance_pgdat(pg_da loop_again: total_scanned = 0; - total_reclaimed = 0; + nr_reclaimed = 0; sc.may_writepage = !laptop_mode, sc.nr_mapped = read_page_state(nr_mapped); @@ -1626,13 +1626,11 @@ scan: if (zone->prev_priority > priority) zone->prev_priority = priority; sc.nr_scanned = 0; - sc.nr_reclaimed = 0; - shrink_zone(priority, zone, &sc); + nr_reclaimed += shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); - sc.nr_reclaimed += reclaim_state->reclaimed_slab; - total_reclaimed += sc.nr_reclaimed; + nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; if (zone->all_unreclaimable) continue; @@ -1645,10 +1643,10 @@ scan: * even in laptop mode */ if (total_scanned > SWAP_CLUSTER_MAX * 2 && - total_scanned > total_reclaimed+total_reclaimed/2) + total_scanned > nr_reclaimed + nr_reclaimed / 2) sc.may_writepage = 1; } - if (nr_pages && to_free > total_reclaimed) + if (nr_pages && to_free > nr_reclaimed) continue; /* swsusp: need to do more work */ if (all_zones_ok) break; /* kswapd: all done */ @@ -1665,7 +1663,7 @@ scan: * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ - if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) + if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages) break; } out: @@ -1679,7 +1677,7 @@ out: goto loop_again; } - return total_reclaimed; + return nr_reclaimed; } /* @@ -1887,6 +1885,7 @@ int zone_reclaim(struct zone *zone, gfp_ cpumask_t mask; int node_id; int priority; + unsigned long nr_reclaimed = 0; struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), @@ -1928,11 +1927,11 @@ int zone_reclaim(struct zone *zone, gfp_ */ priority = ZONE_RECLAIM_PRIORITY; do { - shrink_zone(priority, zone, &sc); + nr_reclaimed += shrink_zone(priority, zone, &sc); priority--; - } while (priority >= 0 && sc.nr_reclaimed < nr_pages); + } while (priority >= 0 && nr_reclaimed < nr_pages); - if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { + if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { /* * shrink_slab does not currently allow us to determine * how many pages were freed in the zone. So we just @@ -1947,9 +1946,9 @@ int zone_reclaim(struct zone *zone, gfp_ p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); - if (sc.nr_reclaimed == 0) + if (nr_reclaimed == 0) zone->last_unsuccessful_zone_reclaim = jiffies; - return sc.nr_reclaimed >= nr_pages; + return nr_reclaimed >= nr_pages; } #endif _