Index: linux-2.6.17-rc1-mm2/mm/memory.c =================================================================== --- linux-2.6.17-rc1-mm2.orig/mm/memory.c 2006-04-11 12:14:34.000000000 -0700 +++ linux-2.6.17-rc1-mm2/mm/memory.c 2006-04-13 13:10:43.000000000 -0700 @@ -1902,12 +1902,12 @@ static int do_swap_page(struct mm_struct mark_page_accessed(page); lock_page(page); - if (!PageSwapCache(page)) { - /* Page migration has occured */ - unlock_page(page); - page_cache_release(page); - goto out; - } +// if (!PageSwapCache(page)) { +// /* Page migration has occured */ +// unlock_page(page); +// page_cache_release(page); +// goto out; +// } /* * Back out if somebody else already faulted in this pte. @@ -1916,6 +1916,8 @@ static int do_swap_page(struct mm_struct if (unlikely(!pte_same(*page_table, orig_pte))) goto out_nomap; + BUG_ON(is_migration_entry(entry)); + if (unlikely(!PageUptodate(page))) { ret = VM_FAULT_SIGBUS; goto out_nomap; Index: linux-2.6.17-rc1-mm2/mm/rmap.c =================================================================== --- linux-2.6.17-rc1-mm2.orig/mm/rmap.c 2006-04-13 13:07:40.000000000 -0700 +++ linux-2.6.17-rc1-mm2/mm/rmap.c 2006-04-13 13:24:05.000000000 -0700 @@ -283,7 +283,8 @@ pte_t *page_check_address(struct page *p *ptlp = ptl; return ptep; } - } else { + } else + if (!pte_none(pte) && !pte_file(pte)) { /* Could still be a migration entry pointing to the page */ swp_entry_t entry = pte_to_swp_entry(pte); @@ -302,24 +303,27 @@ pte_t *page_check_address(struct page *p * Restore a potential migration pte to a working pte entry for * anonymous pages. */ -static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr, +static int remove_migration_pte(struct vm_area_struct *vma, unsigned long addr, struct page *old, struct page *new) { struct mm_struct *mm = vma->vm_mm; pte_t *ptep; spinlock_t *ptl; + int done = 0; ptep = page_check_address(old, mm, addr, &ptl); if (!ptep) - return; + return 0; if (!pte_present(*ptep)) { inc_mm_counter(vma->vm_mm, anon_rss); get_page(new); set_pte_at(mm, addr, ptep, pte_mkold(mk_pte(new, vma->vm_page_prot))); page_add_anon_rmap(new, vma, addr); + done = 1; } spin_unlock(ptl); + return done; } /* @@ -334,6 +338,8 @@ void remove_migration_ptes(struct page * struct anon_vma *anon_vma; struct vm_area_struct *vma; unsigned long mapping; + int nr_removed; + int pass = 0; mapping = (unsigned long)newpage->mapping; @@ -344,13 +350,25 @@ void remove_migration_ptes(struct page * * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. */ anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); - spin_lock(&anon_vma->lock); - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) - remove_migration_pte(vma, page_address_in_vma(newpage, vma), + /* + * May race with copy_page_range. We can only be sure that all + * entries have been taken care of if we did onot encounter + * any entries on none pass. + */ + do { + nr_removed = 0; + spin_lock(&anon_vma->lock); + + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) + nr_removed += remove_migration_pte(vma, page_address_in_vma(newpage, vma), page, newpage); - spin_unlock(&anon_vma->lock); + spin_unlock(&anon_vma->lock); + if (pass && nr_removed) + printk("Pass %d removed=%d %p->%p\n",pass, nr_removed, page, newpage); + pass++; + } while (nr_removed); } #endif