From: Peter Zijlstra Now that we can detect writers of shared mappings, throttle them. Avoids OOM by surprise. Signed-off-by: Peter Zijlstra Cc: Hugh Dickins Signed-off-by: Andrew Morton --- include/linux/writeback.h | 1 + mm/memory.c | 5 +++-- mm/page-writeback.c | 10 ++++++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff -puN include/linux/writeback.h~mm-balance-dirty-pages include/linux/writeback.h --- a/include/linux/writeback.h~mm-balance-dirty-pages +++ a/include/linux/writeback.h @@ -118,6 +118,7 @@ int sync_page_range(struct inode *inode, loff_t pos, loff_t count); int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count); +void set_page_dirty_balance(struct page *page); /* pdflush.c */ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl diff -puN mm/memory.c~mm-balance-dirty-pages mm/memory.c --- a/mm/memory.c~mm-balance-dirty-pages +++ a/mm/memory.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -1571,7 +1572,7 @@ gotten: unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { - set_page_dirty(dirty_page); + set_page_dirty_balance(dirty_page); put_page(dirty_page); } return ret; @@ -2218,7 +2219,7 @@ retry: unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { - set_page_dirty(dirty_page); + set_page_dirty_balance(dirty_page); put_page(dirty_page); } return ret; diff -puN mm/page-writeback.c~mm-balance-dirty-pages mm/page-writeback.c --- a/mm/page-writeback.c~mm-balance-dirty-pages +++ a/mm/page-writeback.c @@ -246,6 +246,16 @@ static void balance_dirty_pages(struct a pdflush_operation(background_writeout, 0); } +void set_page_dirty_balance(struct page *page) +{ + if (set_page_dirty(page)) { + struct address_space *mapping = page_mapping(page); + + if (mapping) + balance_dirty_pages_ratelimited(mapping); + } +} + /** * balance_dirty_pages_ratelimited_nr - balance dirty memory state * @mapping: address_space which was dirtied _