Index: linux-2.6.16-rc1-mm1/mm/vmscan.c =================================================================== --- linux-2.6.16-rc1-mm1.orig/mm/vmscan.c 2006-01-19 20:16:56.000000000 -0800 +++ linux-2.6.16-rc1-mm1/mm/vmscan.c 2006-01-19 20:22:01.000000000 -0800 @@ -63,11 +63,6 @@ struct scan_control { /* This context's GFP mask */ gfp_t gfp_mask; - int may_writepage; - - /* Can pages be swapped as part of reclaim? */ - int may_swap; - /* This context's SWAP_CLUSTER_MAX. If freeing memory for * suspend, we effectively ignore SWAP_CLUSTER_MAX. * In this context, it doesn't matter that we scan the @@ -409,7 +404,8 @@ cannot_free: /* * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed */ -static int shrink_list(struct list_head *page_list, struct scan_control *sc, int may_writepage) +static int shrink_list(struct list_head *page_list, struct scan_control *sc, + int swap, int write) { LIST_HEAD(ret_pages); struct pagevec freed_pvec; @@ -454,7 +450,7 @@ static int shrink_list(struct list_head * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) { - if (!sc->may_swap) + if (!swap) goto keep_locked; if (!add_to_swap(page, GFP_ATOMIC)) goto activate_locked; @@ -485,7 +481,7 @@ static int shrink_list(struct list_head goto keep_locked; if (!may_enter_fs) goto keep_locked; - if (may_writepage) + if (!write) goto keep_locked; /* Page is dirty, try to write it out here */ @@ -1078,7 +1074,8 @@ static int isolate_lru_pages(int nr_to_s /* * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed */ -static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc, int may_writepage) +static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc, + int swap, int write) { LIST_HEAD(page_list); struct pagevec pvec; @@ -1104,7 +1101,7 @@ static void shrink_cache(int max_scan, s goto done; max_scan -= nr_scan; - nr_freed = shrink_list(&page_list, sc, may_writepage); + nr_freed = shrink_list(&page_list, sc, swap, write); local_irq_disable(); if (current_is_kswapd()) { @@ -1288,7 +1285,7 @@ refill_inactive_zone(int nr_pages, struc * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static void -shrink_zone(int priority, struct zone *zone, struct scan_control *sc, int may_writepage) +shrink_zone(int priority, struct zone *zone, struct scan_control *sc, int swap, int write) { unsigned long nr_active; unsigned long nr_inactive; @@ -1326,7 +1323,7 @@ shrink_zone(int priority, struct zone *z nr_to_scan = min(nr_inactive, (unsigned long)sc->swap_cluster_max); nr_inactive -= nr_to_scan; - shrink_cache(nr_to_scan, zone, sc, may_writepage); + shrink_cache(nr_to_scan, zone, sc, 1, write); } } @@ -1352,7 +1349,8 @@ shrink_zone(int priority, struct zone *z * scan then give up on it. */ static void -shrink_caches(int priority, struct zone **zones, struct scan_control *sc, int write) +shrink_caches(int priority, struct zone **zones, struct scan_control *sc, + int swap, int write) { int i; @@ -1372,7 +1370,7 @@ shrink_caches(int priority, struct zone if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ - shrink_zone(priority, zone, sc, write); + shrink_zone(priority, zone, sc, swap, write); } } @@ -1401,7 +1399,6 @@ int try_to_free_pages(struct zone **zone int write = !laptop_mode; sc.gfp_mask = gfp_mask; - sc.may_swap = 1; inc_page_state(allocstall); @@ -1422,7 +1419,7 @@ int try_to_free_pages(struct zone **zone sc.swap_cluster_max = SWAP_CLUSTER_MAX; if (!priority) disable_swap_token(); - shrink_caches(priority, zones, &sc, write); + shrink_caches(priority, zones, &sc, 1, write); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { sc.nr_reclaimed += reclaim_state->reclaimed_slab; @@ -1503,7 +1500,6 @@ loop_again: total_scanned = 0; total_reclaimed = 0; sc.gfp_mask = GFP_KERNEL; - sc.may_swap = 1; sc.nr_mapped = read_page_state(nr_mapped); inc_page_state(pageoutrun); @@ -1587,7 +1583,7 @@ scan: sc.nr_reclaimed = 0; sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; atomic_inc(&zone->reclaim_in_progress); - shrink_zone(priority, zone, &sc, write); + shrink_zone(priority, zone, &sc, 1, write); atomic_dec(&zone->reclaim_in_progress); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, @@ -1839,7 +1835,6 @@ int zone_reclaim(struct zone *zone, gfp_ atomic_read(&zone->reclaim_in_progress) > 0) return 0; - sc.may_swap = 0; sc.nr_scanned = 0; sc.nr_reclaimed = 0; sc.nr_mapped = read_page_state(nr_mapped); @@ -1857,7 +1852,7 @@ int zone_reclaim(struct zone *zone, gfp_ p->flags |= PF_MEMALLOC; reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; - shrink_zone(0, zone, &sc, 0); + shrink_zone(0, zone, &sc, 0, 0); p->reclaim_state = NULL; current->flags &= ~PF_MEMALLOC;