--- memhotplug-dave/include/linux/rmap.h | 6 +- memhotplug-dave/mm/mmigrate.c | 4 - memhotplug-dave/mm/rmap.c | 98 ++++++++++++++++++++++++++++++----- memhotplug-dave/mm/vmscan.c | 2 4 files changed, 93 insertions(+), 17 deletions(-) diff -puN include/linux/rmap.h~AA-PM-14-try_to_unmap_force include/linux/rmap.h --- memhotplug/include/linux/rmap.h~AA-PM-14-try_to_unmap_force 2005-07-28 13:50:47.000000000 -0700 +++ memhotplug-dave/include/linux/rmap.h 2005-07-28 13:50:47.000000000 -0700 @@ -90,7 +90,9 @@ static inline void page_dup_rmap(struct * Called from mm/vmscan.c to handle paging out */ int page_referenced(struct page *, int is_locked, int ignore_token); -int try_to_unmap(struct page *); +int try_to_unmap(struct page *, struct list_head *); +int touch_unmapped_address(struct list_head *); + /* * Called from mm/filemap_xip.c to unmap empty zero page @@ -110,7 +112,7 @@ unsigned long page_address_in_vma(struct #define anon_vma_link(vma) do {} while (0) #define page_referenced(page,l,i) TestClearPageReferenced(page) -#define try_to_unmap(page) SWAP_FAIL +#define try_to_unmap(page, force) SWAP_FAIL #endif /* CONFIG_MMU */ diff -puN mm/mmigrate.c~AA-PM-14-try_to_unmap_force mm/mmigrate.c --- memhotplug/mm/mmigrate.c~AA-PM-14-try_to_unmap_force 2005-07-28 13:50:47.000000000 -0700 +++ memhotplug-dave/mm/mmigrate.c 2005-07-28 13:50:47.000000000 -0700 @@ -111,7 +111,7 @@ page_migratable(struct page *page, struc int truncated; if (page_mapped(page)) { - switch (try_to_unmap(page)) { + switch (try_to_unmap(page, NULL)) { case SWAP_FAIL: return -EBUSY; case SWAP_AGAIN: @@ -257,7 +257,7 @@ generic_migrate_page(struct page *page, * can be caught and blocked in a pagefault handler. */ if (page_mapped(page)) { - while ((ret = try_to_unmap(page)) == SWAP_AGAIN) + while ((ret = try_to_unmap(page, NULL)) == SWAP_AGAIN) msleep(1); if (ret != SWAP_SUCCESS) { ret = -EBUSY; diff -puN mm/rmap.c~AA-PM-14-try_to_unmap_force mm/rmap.c --- memhotplug/mm/rmap.c~AA-PM-14-try_to_unmap_force 2005-07-28 13:50:47.000000000 -0700 +++ memhotplug-dave/mm/rmap.c 2005-07-28 13:50:47.000000000 -0700 @@ -46,6 +46,7 @@ */ #include +#include #include #include #include @@ -506,11 +507,81 @@ void page_remove_rmap(struct page *page) } } +struct page_va_list { + struct mm_struct *mm; + unsigned long addr; + struct list_head list; +}; + +/* + * This function is invoked to record an address space and a mapped address + * to which a target page belongs, when it is unmapped forcibly. + */ +static int +record_unmapped_address(struct list_head *force, struct mm_struct *mm, + unsigned long address) +{ + struct page_va_list *vlist; + + vlist = kmalloc(sizeof(struct page_va_list), GFP_KERNEL); + if (vlist == NULL) + return -ENOMEM; + spin_lock(&mmlist_lock); + if (!atomic_read(&mm->mm_users)) + vlist->mm = NULL; + else { + vlist->mm = mm; + atomic_inc(&mm->mm_users); + } + spin_unlock(&mmlist_lock); + + if (vlist->mm == NULL) + kfree(vlist); + else { + vlist->addr = address; + list_add(&vlist->list, force); + } + return 0; +} + +/* + * This function touches an address recorded in the vlist to map + * a page into an address space again. + */ +int +touch_unmapped_address(struct list_head *vlist) +{ + struct page_va_list *v1, *v2; + struct vm_area_struct *vma; + int ret = 0; + int error; + + list_for_each_entry_safe(v1, v2, vlist, list) { + list_del(&v1->list); + down_read(&v1->mm->mmap_sem); + if (atomic_read(&v1->mm->mm_users) == 1) + goto out; + vma = find_vma(v1->mm, v1->addr); + if (vma == NULL) + goto out; + error = get_user_pages(current, v1->mm, v1->addr, PAGE_SIZE, + 0, 0, NULL, NULL); + if (error < 0) + ret = error; + out: + up_read(&v1->mm->mmap_sem); + mmput(v1->mm); + kfree(v1); + } + return ret; +} + /* * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) +static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + struct list_head *force) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -528,15 +599,18 @@ static int try_to_unmap_one(struct page if (IS_ERR(pte)) goto out; + if (force && record_unmapped_address(force, mm, address)) + goto out_unmap; + /* * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ - if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || - ptep_clear_flush_young(vma, address, pte)) { - ret = SWAP_FAIL; - goto out_unmap; + if (((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || + ptep_clear_flush_young(vma, address, pte)) && force == NULL) { + ret = SWAP_FAIL; + goto out_unmap; } /* Nuke the page table entry. */ @@ -678,7 +752,7 @@ out_unlock: spin_unlock(&mm->page_table_lock); } -static int try_to_unmap_anon(struct page *page) +static int try_to_unmap_anon(struct page *page, struct list_head *force) { struct anon_vma *anon_vma; struct vm_area_struct *vma; @@ -689,7 +763,7 @@ static int try_to_unmap_anon(struct page return ret; list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, force); if (ret == SWAP_FAIL || !page_mapped(page)) break; } @@ -706,7 +780,7 @@ static int try_to_unmap_anon(struct page * * This function is only called from try_to_unmap for object-based pages. */ -static int try_to_unmap_file(struct page *page) +static int try_to_unmap_file(struct page *page, struct list_head *force) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -720,7 +794,7 @@ static int try_to_unmap_file(struct page spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, force); if (ret == SWAP_FAIL || !page_mapped(page)) goto out; } @@ -809,7 +883,7 @@ out: * SWAP_AGAIN - we missed a mapping, try again later * SWAP_FAIL - the page is unswappable */ -int try_to_unmap(struct page *page) +int try_to_unmap(struct page *page, struct list_head *force) { int ret; @@ -817,9 +891,9 @@ int try_to_unmap(struct page *page) BUG_ON(!PageLocked(page)); if (PageAnon(page)) - ret = try_to_unmap_anon(page); + ret = try_to_unmap_anon(page, force); else - ret = try_to_unmap_file(page); + ret = try_to_unmap_file(page, force); if (!page_mapped(page)) ret = SWAP_SUCCESS; diff -puN mm/vmscan.c~AA-PM-14-try_to_unmap_force mm/vmscan.c --- memhotplug/mm/vmscan.c~AA-PM-14-try_to_unmap_force 2005-07-28 13:50:47.000000000 -0700 +++ memhotplug-dave/mm/vmscan.c 2005-07-28 13:50:47.000000000 -0700 @@ -420,7 +420,7 @@ static int shrink_list(struct list_head * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { - switch (try_to_unmap(page)) { + switch (try_to_unmap(page, NULL)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: _