From: Peter Zijlstra Delay clearing the dirty page state till after we've invalidated the page in invalidate_complete_page2(). This gives try_to_release_pages() a chance to flush dirty data. Signed-off-by: Peter Zijlstra Cc: Trond Myklebust Signed-off-by: Andrew Morton --- fs/nfs/file.c | 2 -- mm/truncate.c | 14 ++------------ 2 files changed, 2 insertions(+), 14 deletions(-) diff -puN fs/nfs/file.c~nfs-fix-nr_file_dirty-underflow fs/nfs/file.c --- a/fs/nfs/file.c~nfs-fix-nr_file_dirty-underflow +++ a/fs/nfs/file.c @@ -320,8 +320,6 @@ static int nfs_release_page(struct page */ if (!(gfp & __GFP_FS)) return 0; - /* Hack... Force nfs_wb_page() to write out the page */ - SetPageDirty(page); return !nfs_wb_page(page->mapping->host, page); } diff -puN mm/truncate.c~nfs-fix-nr_file_dirty-underflow mm/truncate.c --- a/mm/truncate.c~nfs-fix-nr_file_dirty-underflow +++ a/mm/truncate.c @@ -306,19 +306,14 @@ invalidate_complete_page2(struct address if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) return 0; + test_clear_page_dirty(page); write_lock_irq(&mapping->tree_lock); - if (PageDirty(page)) - goto failed; - BUG_ON(PagePrivate(page)); __remove_from_page_cache(page); write_unlock_irq(&mapping->tree_lock); ClearPageUptodate(page); page_cache_release(page); /* pagecache ref */ return 1; -failed: - write_unlock_irq(&mapping->tree_lock); - return 0; } /** @@ -350,7 +345,6 @@ int invalidate_inode_pages2_range(struct for (i = 0; !ret && i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index; - int was_dirty; lock_page(page); if (page->mapping != mapping) { @@ -386,12 +380,8 @@ int invalidate_inode_pages2_range(struct PAGE_CACHE_SIZE, 0); } } - was_dirty = test_clear_page_dirty(page); - if (!invalidate_complete_page2(mapping, page)) { - if (was_dirty) - set_page_dirty(page); + if (!invalidate_complete_page2(mapping, page)) ret = -EIO; - } unlock_page(page); } pagevec_release(&pvec); _