Index: linux-2.6.19-rc6-mm1/mm/page-writeback.c =================================================================== --- linux-2.6.19-rc6-mm1.orig/mm/page-writeback.c 2006-11-27 18:42:36.000000000 -0800 +++ linux-2.6.19-rc6-mm1/mm/page-writeback.c 2006-11-27 18:56:27.000000000 -0800 @@ -102,6 +102,11 @@ static void background_writeout(unsigned long _min_pages); +static enum dl_items = { + DL_BACKGROUND, DL_DIRTY, + DL_DIRTY_UNSTABLE, DL_DIRTY_UNSTABLE_WRITEBACK, + NR_DL_ITEMS + }; /* * Work out the current dirty-memory clamping and background writeout * thresholds. @@ -177,6 +182,7 @@ static void balance_dirty_pages(struct address_space *mapping) { long nr_reclaimable; + long thresh[NR_DL_ITEMS]; long background_thresh; long dirty_thresh; unsigned long pages_written = 0; @@ -193,10 +199,9 @@ .range_cyclic = 1, }; - get_dirty_limits(&background_thresh, &dirty_thresh, mapping); - nr_reclaimable = global_page_state(NR_FILE_DIRTY) + - global_page_state(NR_UNSTABLE_NFS); - if (nr_reclaimable + global_page_state(NR_WRITEBACK) <= + get_dirty_limits(thresh, mapping); + nr_reclaimable = thresh[DL_DIRTY_UNSTABLE]; + if (thresh[DL_DIRTY_UNSTABLE_WRITEBACK] <= dirty_thresh) break; @@ -211,13 +216,10 @@ */ if (nr_reclaimable) { writeback_inodes(&wbc); - get_dirty_limits(&background_thresh, - &dirty_thresh, mapping); - nr_reclaimable = global_page_state(NR_FILE_DIRTY) + + get_dirty_limits(thresh, mapping); + nr_reclaimable = thresh[DL_DIRTY_UNSTABLE] + global_page_state(NR_UNSTABLE_NFS); - if (nr_reclaimable + - global_page_state(NR_WRITEBACK) - <= dirty_thresh) + if (thresh[DL_DIRTY_UNSTABLE_WRITEBACK] <= dirty_thresh) break; pages_written += write_chunk - wbc.nr_to_write; if (pages_written >= write_chunk) @@ -226,8 +228,8 @@ congestion_wait(WRITE, HZ/10); } - if (nr_reclaimable + global_page_state(NR_WRITEBACK) - <= dirty_thresh && dirty_exceeded) + if (thresh[DL_DIRTY_UNSTABLE_WRITEBACK] <= dirty_thresh + && dirty_exceeded) dirty_exceeded = 0; if (writeback_in_progress(bdi)) @@ -300,20 +302,19 @@ void throttle_vm_writeout(void) { - long background_thresh; + long thresh[NR_DL_ITEMS]; long dirty_thresh; for ( ; ; ) { - get_dirty_limits(&background_thresh, &dirty_thresh, NULL); + get_dirty_limits(thresh, NULL); /* * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ - dirty_thresh += dirty_thresh / 10; /* wheeee... */ + dirty_thresh += thresh[DL_DIRTY] + threash[DL_DIRTY] / 10; /* wheeee... */ - if (global_page_state(NR_UNSTABLE_NFS) + - global_page_state(NR_WRITEBACK) <= dirty_thresh) + if (thresh[DL_UNSTABLE_WRITEBACK] <= dirty_thresh) break; congestion_wait(WRITE, HZ/10); } @@ -337,12 +338,10 @@ }; for ( ; ; ) { - long background_thresh; - long dirty_thresh; + long thresh[NR_DL_ITEMS]; - get_dirty_limits(&background_thresh, &dirty_thresh, NULL); - if (global_page_state(NR_FILE_DIRTY) + - global_page_state(NR_UNSTABLE_NFS) < background_thresh + get_dirty_limits(thresh, NULL); + if (thresh[DL_DIRTY_UNSTABLE] < thresh[DL_DIRTY] && min_pages <= 0) break; wbc.encountered_congestion = 0;