Distinguish in try_to_umap_one between the case when the page is truly unswappable from the case when the page was recently referenced. The page migration code uses try_to_unmap_one and can avoid calling try_to_unmap again if there was a persistent failure. Signed-off-by: Christoph Lameter Index: linux-2.6.15-rc1-mm2/include/linux/rmap.h =================================================================== --- linux-2.6.15-rc1-mm2.orig/include/linux/rmap.h 2005-11-18 12:28:37.000000000 -0800 +++ linux-2.6.15-rc1-mm2/include/linux/rmap.h 2005-11-18 12:28:44.000000000 -0800 @@ -122,6 +122,7 @@ unsigned long page_address_in_vma(struct */ #define SWAP_SUCCESS 0 #define SWAP_AGAIN 1 -#define SWAP_FAIL 2 +#define SWAP_REFERENCE 2 +#define SWAP_FAIL 3 #endif /* _LINUX_RMAP_H */ Index: linux-2.6.15-rc1-mm2/mm/rmap.c =================================================================== --- linux-2.6.15-rc1-mm2.orig/mm/rmap.c 2005-11-18 12:28:37.000000000 -0800 +++ linux-2.6.15-rc1-mm2/mm/rmap.c 2005-11-18 12:28:44.000000000 -0800 @@ -552,15 +552,20 @@ static int try_to_unmap_one(struct page /* * If the page is mlock()d, we cannot swap it out. - * If it's recently referenced (perhaps page_referenced - * skipped over this mm) then we should reactivate it. */ - if ((vma->vm_flags & VM_LOCKED) || - ptep_clear_flush_young(vma, address, pte)) { + if (vma->vm_flags & VM_LOCKED) { ret = SWAP_FAIL; goto out_unmap; } + /* If the page is recently referenced (perhaps page_referenced + * skipped over this mm) then we should reactivate it. + */ + if (ptep_clear_flush_young(vma, address, pte)) { + ret = SWAP_REFERENCE; + goto out_unmap; + } + /* Nuke the page table entry. */ flush_cache_page(vma, address, page_to_pfn(page)); pteval = ptep_clear_flush(vma, address, pte); @@ -710,7 +715,9 @@ static int try_to_unmap_anon(struct page list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { ret = try_to_unmap_one(page, vma); - if (ret == SWAP_FAIL || !page_mapped(page)) + if (ret == SWAP_FAIL || + ret == SWAP_REFERENCE || + !page_mapped(page)) break; } spin_unlock(&anon_vma->lock); @@ -741,7 +748,9 @@ static int try_to_unmap_file(struct page spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { ret = try_to_unmap_one(page, vma); - if (ret == SWAP_FAIL || !page_mapped(page)) + if (ret == SWAP_FAIL || + ret == SWAP_REFERENCE || + !page_mapped(page)) goto out; } @@ -823,7 +832,9 @@ out: * * SWAP_SUCCESS - we succeeded in removing all mappings * SWAP_AGAIN - we missed a mapping, try again later + * SWAP_REFERENCE - the page was recently referenced * SWAP_FAIL - the page is unswappable + * */ int try_to_unmap(struct page *page) { Index: linux-2.6.15-rc1-mm2/mm/vmscan.c =================================================================== --- linux-2.6.15-rc1-mm2.orig/mm/vmscan.c 2005-11-18 12:28:41.000000000 -0800 +++ linux-2.6.15-rc1-mm2/mm/vmscan.c 2005-11-18 12:28:44.000000000 -0800 @@ -470,6 +470,7 @@ static int shrink_list(struct list_head */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page)) { + case SWAP_REFERENCE: case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: @@ -709,8 +710,9 @@ int migrate_page_remove_references(struc for(i = 0; i < 10 && page_mapped(page); i++) { int rc = try_to_unmap(page); - if (rc == SWAP_SUCCESS) + if (rc == SWAP_SUCCESS || rc == SWAP_FAIL) break; + /* * If there are other runnable processes then running * them may make it possible to unmap the page