From: Christoph Lameter Lets go back to the way the old hotplug code did it. Modify try_to_unmap() to take an additional parameter so that the reference bit in the ptes does not cause a SWAP_FAIL. Another option would be to modify try_to_unmap to return an additional status SWAP_REFERENCE and call try_to_unmap until another status is returned. Add a parameter to try_to_unmap and friends to not return SWAP_FAIL if a newly referenced pte is encountered. Then replace the loop in migrate_page_remove_references() with an invokation of try_to_unmap with that parameter. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- include/linux/rmap.h | 4 ++-- mm/rmap.c | 20 +++++++++++--------- mm/vmscan.c | 17 +++-------------- 3 files changed, 16 insertions(+), 25 deletions(-) diff -puN include/linux/rmap.h~direct-migration-v9-migrate_pages-extension-fix2 include/linux/rmap.h --- 25/include/linux/rmap.h~direct-migration-v9-migrate_pages-extension-fix2 Tue Jan 24 14:25:12 2006 +++ 25-akpm/include/linux/rmap.h Tue Jan 24 14:25:12 2006 @@ -91,7 +91,7 @@ static inline void page_dup_rmap(struct * Called from mm/vmscan.c to handle paging out */ int page_referenced(struct page *, int is_locked); -int try_to_unmap(struct page *); +int try_to_unmap(struct page *, int ignore_refs); /* * Called from mm/filemap_xip.c to unmap empty zero page @@ -111,7 +111,7 @@ unsigned long page_address_in_vma(struct #define anon_vma_link(vma) do {} while (0) #define page_referenced(page,l) TestClearPageReferenced(page) -#define try_to_unmap(page) SWAP_FAIL +#define try_to_unmap(page, refs) SWAP_FAIL #endif /* CONFIG_MMU */ diff -puN mm/rmap.c~direct-migration-v9-migrate_pages-extension-fix2 mm/rmap.c --- 25/mm/rmap.c~direct-migration-v9-migrate_pages-extension-fix2 Tue Jan 24 14:25:12 2006 +++ 25-akpm/mm/rmap.c Tue Jan 24 14:25:12 2006 @@ -541,7 +541,8 @@ void page_remove_rmap(struct page *page) * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) +static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + int ignore_refs) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -564,7 +565,8 @@ static int try_to_unmap_one(struct page * skipped over this mm) then we should reactivate it. */ if ((vma->vm_flags & VM_LOCKED) || - ptep_clear_flush_young(vma, address, pte)) { + (ptep_clear_flush_young(vma, address, pte) + && !ignore_refs)) { ret = SWAP_FAIL; goto out_unmap; } @@ -698,7 +700,7 @@ static void try_to_unmap_cluster(unsigne pte_unmap_unlock(pte - 1, ptl); } -static int try_to_unmap_anon(struct page *page) +static int try_to_unmap_anon(struct page *page, int ignore_refs) { struct anon_vma *anon_vma; struct vm_area_struct *vma; @@ -709,7 +711,7 @@ static int try_to_unmap_anon(struct page return ret; list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, ignore_refs); if (ret == SWAP_FAIL || !page_mapped(page)) break; } @@ -726,7 +728,7 @@ static int try_to_unmap_anon(struct page * * This function is only called from try_to_unmap for object-based pages. */ -static int try_to_unmap_file(struct page *page) +static int try_to_unmap_file(struct page *page, int ignore_refs) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -740,7 +742,7 @@ static int try_to_unmap_file(struct page spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, ignore_refs); if (ret == SWAP_FAIL || !page_mapped(page)) goto out; } @@ -825,16 +827,16 @@ out: * SWAP_AGAIN - we missed a mapping, try again later * SWAP_FAIL - the page is unswappable */ -int try_to_unmap(struct page *page) +int try_to_unmap(struct page *page, int ignore_refs) { int ret; BUG_ON(!PageLocked(page)); if (PageAnon(page)) - ret = try_to_unmap_anon(page); + ret = try_to_unmap_anon(page, ignore_refs); else - ret = try_to_unmap_file(page); + ret = try_to_unmap_file(page, ignore_refs); if (!page_mapped(page)) ret = SWAP_SUCCESS; diff -puN mm/vmscan.c~direct-migration-v9-migrate_pages-extension-fix2 mm/vmscan.c --- 25/mm/vmscan.c~direct-migration-v9-migrate_pages-extension-fix2 Tue Jan 24 14:25:12 2006 +++ 25-akpm/mm/vmscan.c Tue Jan 24 14:25:24 2006 @@ -483,7 +483,7 @@ static int shrink_list(struct list_head if (!sc->may_swap) goto keep_locked; - switch (try_to_unmap(page)) { + switch (try_to_unmap(page, 0)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: @@ -623,7 +623,7 @@ static int swap_page(struct page *page) struct address_space *mapping = page_mapping(page); if (page_mapped(page) && mapping) - if (try_to_unmap(page) != SWAP_SUCCESS) + if (try_to_unmap(page, 0) != SWAP_SUCCESS) goto unlock_retry; if (PageDirty(page)) { @@ -679,7 +679,6 @@ static int migrate_page_remove_reference { struct address_space *mapping = page_mapping(page); struct page **radix_pointer; - int i; /* * Avoid doing any of the following work if the page count @@ -708,17 +707,7 @@ static int migrate_page_remove_reference * If the page was not migrated then the PageSwapCache bit * is still set and the operation may continue. */ - for (i = 0; i < 10 && page_mapped(page); i++) { - int rc = try_to_unmap(page); - - if (rc == SWAP_SUCCESS) - break; - /* - * If there are other runnable processes then running - * them may make it possible to unmap the page - */ - schedule(); - } + try_to_unmap(page, 1); /* * Give up if we were unable to remove all mappings. _