--- include/linux/mmu_notifier.h | 11 +++++++---- mm/filemap_xip.c | 4 ++-- mm/hugetlb.c | 4 ++-- mm/memory.c | 7 ++++--- mm/rmap.c | 11 ++++++----- 5 files changed, 21 insertions(+), 16 deletions(-) Index: linux-2.6/include/linux/mmu_notifier.h =================================================================== --- linux-2.6.orig/include/linux/mmu_notifier.h 2008-01-31 18:15:06.000000000 -0800 +++ linux-2.6/include/linux/mmu_notifier.h 2008-01-31 18:17:42.000000000 -0800 @@ -51,6 +51,9 @@ struct mmu_notifier { const struct mmu_notifier_ops *ops; }; +#define MMU_ATOMIC 1 /* Called in atomic context */ +#define MMU_WRITABLE 2 /* Only evict writable entries */ + struct mmu_notifier_ops { /* * The release notifier is called when no other execution threads @@ -69,7 +72,7 @@ struct mmu_notifier_ops { /* invalidate_page is called from contexts where the pte_lock is held */ void (*invalidate_page)(struct mmu_notifier *mn, struct mm_struct *mm, - unsigned long address); + unsigned long address, unsigned long flags); /* * invalidate_range_begin() and invalidate_range_end() must paired. @@ -98,10 +101,10 @@ struct mmu_notifier_ops { void (*invalidate_range_begin)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end, - int atomic); + int flags); void (*invalidate_range_end)(struct mmu_notifier *mn, - struct mm_struct *mm, int atomic); + struct mm_struct *mm, int flags); }; struct mmu_rmap_notifier_ops; @@ -118,7 +121,7 @@ struct mmu_rmap_notifier_ops { * mapping a page. */ void (*invalidate_page)(struct mmu_rmap_notifier *mrn, - struct page *page); + struct page *page, unsigned long flags); }; #ifdef CONFIG_MMU_NOTIFIER Index: linux-2.6/mm/filemap_xip.c =================================================================== --- linux-2.6.orig/mm/filemap_xip.c 2008-01-31 18:18:07.000000000 -0800 +++ linux-2.6/mm/filemap_xip.c 2008-01-31 18:18:33.000000000 -0800 @@ -191,7 +191,7 @@ __xip_unmap (struct address_space * mapp ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); BUG_ON(address < vma->vm_start || address >= vma->vm_end); mmu_notifier(invalidate_range_begin, mm, address, - address + PAGE_SIZE - 1, 1); + address + PAGE_SIZE - 1, MMU_ATOMIC); pte = page_check_address(page, mm, address, &ptl); if (pte) { /* Nuke the page table entry. */ @@ -203,7 +203,7 @@ __xip_unmap (struct address_space * mapp pte_unmap_unlock(pte, ptl); page_cache_release(page); } - mmu_notifier(invalidate_range_end, mm, 1); + mmu_notifier(invalidate_range_end, mm, MMU_ATOMIC); } spin_unlock(&mapping->i_mmap_lock); } Index: linux-2.6/mm/hugetlb.c =================================================================== --- linux-2.6.orig/mm/hugetlb.c 2008-01-31 18:19:06.000000000 -0800 +++ linux-2.6/mm/hugetlb.c 2008-01-31 18:19:35.000000000 -0800 @@ -744,7 +744,7 @@ void __unmap_hugepage_range(struct vm_ar BUG_ON(start & ~HPAGE_MASK); BUG_ON(end & ~HPAGE_MASK); - mmu_notifier(invalidate_range_begin, mm, start, end, 1); + mmu_notifier(invalidate_range_begin, mm, start, end, MMU_ATOMIC); spin_lock(&mm->page_table_lock); for (address = start; address < end; address += HPAGE_SIZE) { ptep = huge_pte_offset(mm, address); @@ -765,7 +765,7 @@ void __unmap_hugepage_range(struct vm_ar } spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, start, end); - mmu_notifier(invalidate_range_end, mm, 1); + mmu_notifier(invalidate_range_end, mm, MMU_ATOMIC); list_for_each_entry_safe(page, tmp, &page_list, lru) { list_del(&page->lru); put_page(page); Index: linux-2.6/mm/memory.c =================================================================== --- linux-2.6.orig/mm/memory.c 2008-01-31 18:19:43.000000000 -0800 +++ linux-2.6/mm/memory.c 2008-01-31 18:21:51.000000000 -0800 @@ -603,7 +603,8 @@ int copy_page_range(struct mm_struct *ds return copy_hugetlb_page_range(dst_mm, src_mm, vma); if (is_cow_mapping(vma->vm_flags)) - mmu_notifier(invalidate_range_begin, src_mm, addr, end, 0); + mmu_notifier(invalidate_range_begin, src_mm, addr, end, + MMU_WRITABLE); dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); @@ -617,7 +618,7 @@ int copy_page_range(struct mm_struct *ds } while (dst_pgd++, src_pgd++, addr = next, addr != end); if (is_cow_mapping(vma->vm_flags)) - mmu_notifier(invalidate_range_end, src_mm, 0); + mmu_notifier(invalidate_range_end, src_mm, MMU_WRITABLE); return 0; } @@ -891,7 +892,7 @@ unsigned long zap_page_range(struct vm_a struct mmu_gather *tlb; unsigned long end = address + size; unsigned long nr_accounted = 0; - int atomic = details ? (details->i_mmap_lock != 0) : 0; + int atomic = (details && details->i_mmap_lock != 0) ? MMU_ATOMIC : 0; lru_add_drain(); tlb = tlb_gather_mmu(mm, 0); Index: linux-2.6/mm/rmap.c =================================================================== --- linux-2.6.orig/mm/rmap.c 2008-01-31 18:22:38.000000000 -0800 +++ linux-2.6/mm/rmap.c 2008-01-31 18:24:15.000000000 -0800 @@ -436,7 +436,7 @@ static int page_mkclean_one(struct page flush_cache_page(vma, address, pte_pfn(*pte)); entry = ptep_clear_flush(vma, address, pte); - mmu_notifier(invalidate_page, mm, address); + mmu_notifier(invalidate_page, mm, address, MMU_WRITABLE); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(mm, address, pte, entry); @@ -477,7 +477,8 @@ int page_mkclean(struct page *page) if (mapping) { ret = page_mkclean_file(mapping, page); if (unlikely(PageExternalRmap(page))) { - mmu_rmap_notifier(invalidate_page, page); + mmu_rmap_notifier(invalidate_page, page, + MMU_WRITABLE); ClearPageExternalRmap(page); } if (page_test_dirty(page)) { @@ -693,7 +694,7 @@ static int try_to_unmap_one(struct page /* Nuke the page table entry. */ flush_cache_page(vma, address, page_to_pfn(page)); pteval = ptep_clear_flush(vma, address, pte); - mmu_notifier(invalidate_page, mm, address); + mmu_notifier(invalidate_page, mm, address, 0); /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) @@ -825,7 +826,7 @@ static void try_to_unmap_cluster(unsigne /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush(vma, address, pte); - mmu_notifier(invalidate_page, mm, address); + mmu_notifier(invalidate_page, mm, address, 0); /* If nonlinear, store the file page offset in the pte. */ if (page->index != linear_page_index(vma, address)) @@ -983,7 +984,7 @@ int try_to_unmap(struct page *page, int ret = try_to_unmap_file(page, migration); if (unlikely(PageExternalRmap(page))) { - mmu_rmap_notifier(invalidate_page, page); + mmu_rmap_notifier(invalidate_page, page, 0); ClearPageExternalRmap(page); }