Distinguish in try_to_umap_one between the case when the page is truly unswappable from the case when the page was recently referenced. The page migration code uses try_to_unmap_one and may want to retry immediately to move the page if the page was recently referenced. Signed-off-by: Christoph Lameter Index: linux-2.6.14-rc5-mm1/include/linux/rmap.h =================================================================== --- linux-2.6.14-rc5-mm1.orig/include/linux/rmap.h 2005-10-24 10:27:11.000000000 -0700 +++ linux-2.6.14-rc5-mm1/include/linux/rmap.h 2005-10-26 09:50:02.000000000 -0700 @@ -119,6 +119,7 @@ unsigned long page_address_in_vma(struct */ #define SWAP_SUCCESS 0 #define SWAP_AGAIN 1 -#define SWAP_FAIL 2 +#define SWAP_REFERENCE 2 +#define SWAP_FAIL 3 #endif /* _LINUX_RMAP_H */ Index: linux-2.6.14-rc5-mm1/mm/rmap.c =================================================================== --- linux-2.6.14-rc5-mm1.orig/mm/rmap.c 2005-10-24 10:27:12.000000000 -0700 +++ linux-2.6.14-rc5-mm1/mm/rmap.c 2005-10-26 09:50:02.000000000 -0700 @@ -527,16 +527,20 @@ static int try_to_unmap_one(struct page /* * If the page is mlock()d, we cannot swap it out. - * If it's recently referenced (perhaps page_referenced - * skipped over this mm) then we should reactivate it. - * * Pages belonging to VM_RESERVED regions should not happen here. */ - if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || - ptep_clear_flush_young(vma, address, pte)) { + if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) { ret = SWAP_FAIL; goto out_unmap; } + /* + * If the page is recently referenced (perhaps page_referenced + * skipped over this mm) then we may want to reactivate it. + */ + if (ptep_clear_flush_young(vma, address, pte)) { + ret = SWAP_REFERENCE; + goto out_unmap; + } /* Nuke the page table entry. */ flush_cache_page(vma, address, page_to_pfn(page)); @@ -687,7 +691,9 @@ static int try_to_unmap_anon(struct page list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { ret = try_to_unmap_one(page, vma); - if (ret == SWAP_FAIL || !page_mapped(page)) + if (ret == SWAP_FAIL || + ret == SWAP_REFERENCE || + !page_mapped(page)) break; } spin_unlock(&anon_vma->lock); @@ -718,7 +724,9 @@ static int try_to_unmap_file(struct page spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { ret = try_to_unmap_one(page, vma); - if (ret == SWAP_FAIL || !page_mapped(page)) + if (ret == SWAP_FAIL || + ret == SWAP_REFERENCE || + !page_mapped(page)) goto out; } @@ -803,7 +811,9 @@ out: * * SWAP_SUCCESS - we succeeded in removing all mappings * SWAP_AGAIN - we missed a mapping, try again later + * SWAP_REFERENCE - the page was recently referenced * SWAP_FAIL - the page is unswappable + * */ int try_to_unmap(struct page *page) { Index: linux-2.6.14-rc5-mm1/mm/vmscan.c =================================================================== --- linux-2.6.14-rc5-mm1.orig/mm/vmscan.c 2005-10-26 09:47:33.000000000 -0700 +++ linux-2.6.14-rc5-mm1/mm/vmscan.c 2005-10-26 09:50:02.000000000 -0700 @@ -472,6 +472,7 @@ static int shrink_list(struct list_head */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page)) { + case SWAP_REFERENCE: case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: