Index: linux-2.6.16-rc1-mm1/mm/vmscan.c =================================================================== --- linux-2.6.16-rc1-mm1.orig/mm/vmscan.c 2006-01-19 20:07:42.000000000 -0800 +++ linux-2.6.16-rc1-mm1/mm/vmscan.c 2006-01-19 20:16:56.000000000 -0800 @@ -409,7 +409,7 @@ cannot_free: /* * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed */ -static int shrink_list(struct list_head *page_list, struct scan_control *sc) +static int shrink_list(struct list_head *page_list, struct scan_control *sc, int may_writepage) { LIST_HEAD(ret_pages); struct pagevec freed_pvec; @@ -485,7 +485,7 @@ static int shrink_list(struct list_head goto keep_locked; if (!may_enter_fs) goto keep_locked; - if (!sc->may_writepage) + if (may_writepage) goto keep_locked; /* Page is dirty, try to write it out here */ @@ -1078,7 +1078,7 @@ static int isolate_lru_pages(int nr_to_s /* * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed */ -static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc) +static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc, int may_writepage) { LIST_HEAD(page_list); struct pagevec pvec; @@ -1104,7 +1104,7 @@ static void shrink_cache(int max_scan, s goto done; max_scan -= nr_scan; - nr_freed = shrink_list(&page_list, sc); + nr_freed = shrink_list(&page_list, sc, may_writepage); local_irq_disable(); if (current_is_kswapd()) { @@ -1288,7 +1288,7 @@ refill_inactive_zone(int nr_pages, struc * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static void -shrink_zone(int priority, struct zone *zone, struct scan_control *sc) +shrink_zone(int priority, struct zone *zone, struct scan_control *sc, int may_writepage) { unsigned long nr_active; unsigned long nr_inactive; @@ -1326,7 +1326,7 @@ shrink_zone(int priority, struct zone *z nr_to_scan = min(nr_inactive, (unsigned long)sc->swap_cluster_max); nr_inactive -= nr_to_scan; - shrink_cache(nr_to_scan, zone, sc); + shrink_cache(nr_to_scan, zone, sc, may_writepage); } } @@ -1352,7 +1352,7 @@ shrink_zone(int priority, struct zone *z * scan then give up on it. */ static void -shrink_caches(int priority, struct zone **zones, struct scan_control *sc) +shrink_caches(int priority, struct zone **zones, struct scan_control *sc, int write) { int i; @@ -1372,7 +1372,7 @@ shrink_caches(int priority, struct zone if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ - shrink_zone(priority, zone, sc); + shrink_zone(priority, zone, sc, write); } } @@ -1398,9 +1398,9 @@ int try_to_free_pages(struct zone **zone struct scan_control sc; unsigned long lru_pages = 0; int i; + int write = !laptop_mode; sc.gfp_mask = gfp_mask; - sc.may_writepage = !laptop_mode; sc.may_swap = 1; inc_page_state(allocstall); @@ -1422,7 +1422,7 @@ int try_to_free_pages(struct zone **zone sc.swap_cluster_max = SWAP_CLUSTER_MAX; if (!priority) disable_swap_token(); - shrink_caches(priority, zones, &sc); + shrink_caches(priority, zones, &sc, write); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { sc.nr_reclaimed += reclaim_state->reclaimed_slab; @@ -1444,7 +1444,7 @@ int try_to_free_pages(struct zone **zone */ if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { wakeup_pdflush(laptop_mode ? 0 : total_scanned); - sc.may_writepage = 1; + write = 1; } /* Take a nap, wait for some writeback to complete */ @@ -1497,12 +1497,12 @@ static int balance_pgdat(pg_data_t *pgda int total_scanned, total_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc; + int write = !laptop_mode; loop_again: total_scanned = 0; total_reclaimed = 0; sc.gfp_mask = GFP_KERNEL; - sc.may_writepage = 1; sc.may_swap = 1; sc.nr_mapped = read_page_state(nr_mapped); @@ -1587,7 +1587,7 @@ scan: sc.nr_reclaimed = 0; sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; atomic_inc(&zone->reclaim_in_progress); - shrink_zone(priority, zone, &sc); + shrink_zone(priority, zone, &sc, write); atomic_dec(&zone->reclaim_in_progress); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, @@ -1607,7 +1607,7 @@ scan: */ if (total_scanned > SWAP_CLUSTER_MAX * 2 && total_scanned > total_reclaimed+total_reclaimed/2) - sc.may_writepage = 1; + write = 1; } if (nr_pages && to_free > total_reclaimed) continue; /* swsusp: need to do more work */ @@ -1839,7 +1839,6 @@ int zone_reclaim(struct zone *zone, gfp_ atomic_read(&zone->reclaim_in_progress) > 0) return 0; - sc.may_writepage = 0; sc.may_swap = 0; sc.nr_scanned = 0; sc.nr_reclaimed = 0; @@ -1858,7 +1857,7 @@ int zone_reclaim(struct zone *zone, gfp_ p->flags |= PF_MEMALLOC; reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; - shrink_zone(0, zone, &sc); + shrink_zone(0, zone, &sc, 0); p->reclaim_state = NULL; current->flags &= ~PF_MEMALLOC;