Index: linux-2.6.8.1-ck/include/linux/mmzone.h =================================================================== --- linux-2.6.8.1-ck.orig/include/linux/mmzone.h 2004-08-23 13:19:00.000000000 +1000 +++ linux-2.6.8.1-ck/include/linux/mmzone.h 2004-08-27 10:31:23.622446302 +1000 @@ -276,7 +276,7 @@ typedef struct pglist_data { struct pglist_data *pgdat_next; wait_queue_head_t kswapd_wait; struct task_struct *kswapd; - int maplimit; + unsigned long mapped_nrpages; } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) Index: linux-2.6.8.1-ck/mm/page_alloc.c =================================================================== --- linux-2.6.8.1-ck.orig/mm/page_alloc.c 2004-08-27 10:15:59.129917917 +1000 +++ linux-2.6.8.1-ck/mm/page_alloc.c 2004-08-27 10:47:36.681445492 +1000 @@ -727,8 +727,11 @@ __alloc_pages(unsigned int gfp_mask, uns min -= z->pages_low >> 1; else if (vm_mapped && wait && z->free_pages < z->pages_unmapped && - z->free_pages > z->pages_low) + z->free_pages > z->pages_low) { + z->zone_pgdat->mapped_nrpages = + z->pages_unmapped - z->free_pages; wakeup_kswapd(z); + } if (z->free_pages >= min || (!wait && z->free_pages >= z->pages_high)) { Index: linux-2.6.8.1-ck/mm/vmscan.c =================================================================== --- linux-2.6.8.1-ck.orig/mm/vmscan.c 2004-08-27 10:15:59.132917435 +1000 +++ linux-2.6.8.1-ck/mm/vmscan.c 2004-08-27 10:49:25.340139901 +1000 @@ -592,7 +592,7 @@ static void shrink_cache(struct zone *zo max_scan -= nr_scan; if (current_is_kswapd()) { mod_page_state_zone(zone, pgscan_kswapd, nr_scan); - maplimit = zone->zone_pgdat->maplimit; + maplimit = !!zone->zone_pgdat->mapped_nrpages; } else mod_page_state_zone(zone, pgscan_direct, nr_scan); nr_freed = shrink_list(&page_list, sc, maplimit); @@ -688,7 +688,7 @@ refill_inactive_zone(struct zone *zone, page = lru_to_page(&l_hold); list_del(&page->lru); if (page_mapped(page)) { - if (zone->zone_pgdat->maplimit) { + if (zone->zone_pgdat->mapped_nrpages) { list_add(&page->lru, &l_active); continue; } @@ -953,11 +953,12 @@ out: * the page allocator fallback scheme to ensure that aging of pages is balanced * across the zones. */ -static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int maplimit) +static int +balance_pgdat(pg_data_t *pgdat, int nr_pages, unsigned long mapped_nrpages) { int to_free = nr_pages; int priority; - int i; + int i, maplimit = 0; int total_scanned = 0, total_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc; @@ -970,8 +971,11 @@ static int balance_pgdat(pg_data_t *pgda * Sanity check to ensure we don't have a stale maplimit set * and are calling balance_pgdat for a different reason. */ - if (nr_pages) - maplimit = 0; + if (!nr_pages && mapped_nrpages) { + maplimit = 1; + nr_pages = mapped_nrpages; + } + /* * kswapd does a light balance_pgdat() when there is less than 1/3 * ram free provided there is less than vm_mapped % of that ram @@ -1143,7 +1147,7 @@ int kswapd(void *p) schedule(); finish_wait(&pgdat->kswapd_wait, &wait); - balance_pgdat(pgdat, 0, pgdat->maplimit); + balance_pgdat(pgdat, 0, pgdat->mapped_nrpages); } return 0; } @@ -1154,14 +1158,12 @@ int kswapd(void *p) void wakeup_kswapd(struct zone *zone) { if (zone->free_pages > zone->pages_unmapped) - goto out; - if (zone->free_pages > zone->pages_low) - zone->zone_pgdat->maplimit = 1; + return; + if (zone->free_pages <= zone->pages_low) + zone->zone_pgdat->mapped_nrpages = 0; if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) - goto out; + return; wake_up_interruptible(&zone->zone_pgdat->kswapd_wait); -out: - zone->zone_pgdat->maplimit = 0; } #ifdef CONFIG_PM