Index: linux-2.6.19-rc6-mm1/mm/page-writeback.c =================================================================== --- linux-2.6.19-rc6-mm1.orig/mm/page-writeback.c 2006-11-27 19:13:14.000000000 -0800 +++ linux-2.6.19-rc6-mm1/mm/page-writeback.c 2006-11-27 19:14:09.000000000 -0800 @@ -102,6 +102,11 @@ static void background_writeout(unsigned long _min_pages); +struct dirty_limits { + long background; + long dirty; +}; + /* * Work out the current dirty-memory clamping and background writeout * thresholds. @@ -120,8 +125,7 @@ * clamping level. */ static void -get_dirty_limits(long *pbackground, long *pdirty, - struct address_space *mapping) +get_dirty_limits(struct dirty_limits *dl, struct address_space *mapping) { int background_ratio; /* Percentages */ int dirty_ratio; @@ -163,8 +167,8 @@ background += background / 4; dirty += dirty / 4; } - *pbackground = background; - *pdirty = dirty; + dl->background = background; + dl->dirty = dirty; } /* @@ -177,8 +181,7 @@ static void balance_dirty_pages(struct address_space *mapping) { long nr_reclaimable; - long background_thresh; - long dirty_thresh; + struct dirty_limits dl; unsigned long pages_written = 0; unsigned long write_chunk = sync_writeback_pages(); @@ -193,11 +196,11 @@ .range_cyclic = 1, }; - get_dirty_limits(&background_thresh, &dirty_thresh, mapping); + get_dirty_limits(&dl, mapping); nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); if (nr_reclaimable + global_page_state(NR_WRITEBACK) <= - dirty_thresh) + dl.dirty) break; if (!dirty_exceeded) @@ -211,13 +214,12 @@ */ if (nr_reclaimable) { writeback_inodes(&wbc); - get_dirty_limits(&background_thresh, - &dirty_thresh, mapping); + get_dirty_limits(&dl, mapping); nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); if (nr_reclaimable + global_page_state(NR_WRITEBACK) - <= dirty_thresh) + <= dl.dirty) break; pages_written += write_chunk - wbc.nr_to_write; if (pages_written >= write_chunk) @@ -227,7 +229,7 @@ } if (nr_reclaimable + global_page_state(NR_WRITEBACK) - <= dirty_thresh && dirty_exceeded) + <= dl.dirty && dirty_exceeded) dirty_exceeded = 0; if (writeback_in_progress(bdi)) @@ -242,7 +244,7 @@ * background_thresh, to keep the amount of dirty memory low. */ if ((laptop_mode && pages_written) || - (!laptop_mode && (nr_reclaimable > background_thresh))) + (!laptop_mode && (nr_reclaimable > dl.background))) pdflush_operation(background_writeout, 0); } @@ -300,20 +302,19 @@ void throttle_vm_writeout(void) { - long background_thresh; - long dirty_thresh; + struct dirty_limits dl; for ( ; ; ) { - get_dirty_limits(&background_thresh, &dirty_thresh, NULL); + get_dirty_limits(&dl, NULL); /* * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ - dirty_thresh += dirty_thresh / 10; /* wheeee... */ + dl.dirty += dl.dirty / 10; /* wheeee... */ if (global_page_state(NR_UNSTABLE_NFS) + - global_page_state(NR_WRITEBACK) <= dirty_thresh) + global_page_state(NR_WRITEBACK) <= dl.dirty) break; congestion_wait(WRITE, HZ/10); } @@ -337,12 +338,11 @@ }; for ( ; ; ) { - long background_thresh; - long dirty_thresh; + struct dirty_limits dl; - get_dirty_limits(&background_thresh, &dirty_thresh, NULL); + get_dirty_limits(&dl, NULL); if (global_page_state(NR_FILE_DIRTY) + - global_page_state(NR_UNSTABLE_NFS) < background_thresh + global_page_state(NR_UNSTABLE_NFS) < dl.background && min_pages <= 0) break; wbc.encountered_congestion = 0;