Kick write-back I/O against dirty pages before memory migration if the pages have PG_private and don't have migrate_page method. It doesn't await completion of the write-back I/O's. The purpose is just for performance of memory migration. Signed-off-by: Hirokazu Takahashi Signed-off-by: Dave Hansen --- memhotplug-dave/mm/mmigrate.c | 48 +++++++++++++++++++++++++++++++++++++++--- 1 files changed, 45 insertions(+), 3 deletions(-) diff -puN mm/mmigrate.c~AA-PM-12.1-mmigrate-pre_writeback mm/mmigrate.c --- memhotplug/mm/mmigrate.c~AA-PM-12.1-mmigrate-pre_writeback 2005-07-28 13:50:42.000000000 -0700 +++ memhotplug-dave/mm/mmigrate.c 2005-07-28 13:50:42.000000000 -0700 @@ -396,12 +396,55 @@ migrate_onepage(struct page *page) int try_to_migrate_pages(struct list_head *page_list) { struct page *page, *page2, *newpage; - LIST_HEAD(rest_list); + struct address_space* mapping; + LIST_HEAD(pass1_list); + LIST_HEAD(pass2_list); int nr_busy = 0; int nr_noswap = 0; current->flags |= PF_KSWAPD; /* It's fake */ list_for_each_entry_safe(page, page2, page_list, lru) { + /* + * Start writeback I/O if it's a dirty page with buffers + */ + if (PageDirty(page) && PagePrivate(page)) { + if (!TestSetPageLocked(page)) { + mapping = page_mapping(page); + if (!mapping || + !PageDirty(page) || !PagePrivate(page) || + PageWriteback(page) || + pageout(page, mapping) != PAGE_SUCCESS) { + unlock_page(page); + } + } + } + list_del(&page->lru); + list_add(&page->lru, &pass1_list); + } + + /* + * Try to migrate easily movable pages first. + */ + list_for_each_entry_safe(page, page2, &pass1_list, lru) { + list_del(&page->lru); + if (PageLocked(page) || PageWriteback(page) || + IS_ERR(newpage = migrate_onepage(page))) { + if (page_count(page) == 1) { + /* the page is already unused */ + putback_page_to_lru(page_zone(page), page); + page_cache_release(page); + } else { + list_add(&page->lru, &pass2_list); + } + } else { + putback_page_to_lru(page_zone(newpage), newpage); + page_cache_release(newpage); + } + } + /* + * Try to migrate the rest of them. + */ + list_for_each_entry_safe(page, page2, &pass2_list, lru) { list_del(&page->lru); if (IS_ERR(newpage = migrate_onepage(page))) { if (page_count(page) == 1) { @@ -413,14 +456,13 @@ int try_to_migrate_pages(struct list_hea nr_busy++; if (PTR_ERR(newpage) == -ENOSPC) nr_noswap++; - list_add(&page->lru, &rest_list); + list_add(&page->lru, page_list); } } else { putback_page_to_lru(page_zone(newpage), newpage); page_cache_release(newpage); } } - list_splice(&rest_list, page_list); current->flags &= ~PF_KSWAPD; if (nr_noswap) { if (printk_ratelimit()) _