--- include/linux/mm_types.h | 3 + include/linux/mmu_notifier.h | 126 +++++++++++++++++++++++++++++++++++++++++++ kernel/fork.c | 2 mm/Kconfig | 4 + mm/Makefile | 1 mm/filemap_xip.c | 3 + mm/fremap.c | 3 + mm/hugetlb.c | 3 + mm/memory.c | 34 +++++++++-- mm/mmap.c | 2 mm/mmu_notifier.c | 111 +++++++++++++++++++++++++++++++++++++ mm/mprotect.c | 3 + mm/mremap.c | 5 + mm/rmap.c | 34 ++++++++--- 14 files changed, 320 insertions(+), 14 deletions(-) Index: linux-2.6/include/linux/mm_types.h =================================================================== --- linux-2.6.orig/include/linux/mm_types.h 2008-04-18 11:05:57.000000000 -0700 +++ linux-2.6/include/linux/mm_types.h 2008-04-18 14:09:26.000000000 -0700 @@ -225,6 +225,9 @@ struct mm_struct { #ifdef CONFIG_CGROUP_MEM_RES_CTLR struct mem_cgroup *mem_cgroup; #endif +#ifdef CONFIG_MMU_NOTIFIER + struct list_head mmu_notifier_list; +#endif }; #endif /* _LINUX_MM_TYPES_H */ Index: linux-2.6/include/linux/mmu_notifier.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6/include/linux/mmu_notifier.h 2008-04-18 14:09:26.000000000 -0700 @@ -0,0 +1,126 @@ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include +#include +#include + +struct mmu_notifier; +struct mmu_notifier_ops; + +#ifdef CONFIG_MMU_NOTIFIER + +struct mmu_notifier_ops { + /* + * Called when nobody can register any more notifier in the mm + * and after the "mn" notifier has been disarmed already. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * start() and end() must be paired. Multiple start/ends may be nested + * or called concurrently. + */ + void (*start)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*end)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +struct mmu_notifier { + struct list_head list; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(!list_empty(&mm->mmu_notifier_list)); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_end(struct mm_struct *mm, + unsigned long start, unsigned long end); + + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, address); + return 0; +} + +static inline void mmu_notifier_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_start(mm, start, end); +} + +static inline void mmu_notifier_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_end(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + INIT_LIST_HEAD(&mm->mmu_notifier_list); +} + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ Index: linux-2.6/kernel/fork.c =================================================================== --- linux-2.6.orig/kernel/fork.c 2008-04-18 14:06:14.000000000 -0700 +++ linux-2.6/kernel/fork.c 2008-04-18 14:09:26.000000000 -0700 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -362,6 +363,7 @@ static struct mm_struct * mm_init(struct if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; + mmu_notifier_mm_init(mm); return mm; } Index: linux-2.6/mm/Kconfig =================================================================== --- linux-2.6.orig/mm/Kconfig 2008-04-18 11:05:57.000000000 -0700 +++ linux-2.6/mm/Kconfig 2008-04-18 14:09:26.000000000 -0700 @@ -193,3 +193,7 @@ config NR_QUICK config VIRT_TO_BUS def_bool y depends on !ARCH_NO_VIRT_TO_BUS + +config MMU_NOTIFIER + def_bool y + bool "MMU notifier, for paging KVM/RDMA" Index: linux-2.6/mm/Makefile =================================================================== --- linux-2.6.orig/mm/Makefile 2008-04-18 11:06:07.000000000 -0700 +++ linux-2.6/mm/Makefile 2008-04-18 14:09:26.000000000 -0700 @@ -33,4 +33,5 @@ obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_SMP) += allocpercpu.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o +obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o Index: linux-2.6/mm/filemap_xip.c =================================================================== --- linux-2.6.orig/mm/filemap_xip.c 2008-04-18 14:06:14.000000000 -0700 +++ linux-2.6/mm/filemap_xip.c 2008-04-18 14:09:26.000000000 -0700 @@ -15,6 +15,7 @@ #include #include #include +#include /* * We do use our own empty page to avoid interference with other users @@ -192,6 +193,7 @@ __xip_unmap (struct address_space * mapp BUG_ON(address < vma->vm_start || address >= vma->vm_end); pte = page_check_address(page, mm, address, &ptl); if (pte) { + mmu_notifier_start(mm, address, address + PAGE_SIZE); /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush(vma, address, pte); @@ -200,6 +202,7 @@ __xip_unmap (struct address_space * mapp BUG_ON(pte_dirty(pteval)); pte_unmap_unlock(pte, ptl); page_cache_release(page); + mmu_notifier_end(mm, address, address + PAGE_SIZE); } } up_read(&mapping->i_mmap_sem); Index: linux-2.6/mm/fremap.c =================================================================== --- linux-2.6.orig/mm/fremap.c 2008-04-18 14:06:14.000000000 -0700 +++ linux-2.6/mm/fremap.c 2008-04-18 14:09:26.000000000 -0700 @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -214,7 +215,9 @@ asmlinkage long sys_remap_file_pages(uns up_write(&mapping->i_mmap_sem); } + mmu_notifier_start(mm, start, start + size); err = populate_range(mm, vma, start, size, pgoff); + mmu_notifier_end(mm, start, start + size); if (!err && !(flags & MAP_NONBLOCK)) { if (unlikely(has_write_lock)) { downgrade_write(&mm->mmap_sem); Index: linux-2.6/mm/hugetlb.c =================================================================== --- linux-2.6.orig/mm/hugetlb.c 2008-04-18 14:06:14.000000000 -0700 +++ linux-2.6/mm/hugetlb.c 2008-04-18 14:09:26.000000000 -0700 @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -799,6 +800,7 @@ void __unmap_hugepage_range(struct vm_ar BUG_ON(start & ~HPAGE_MASK); BUG_ON(end & ~HPAGE_MASK); + mmu_notifier_start(mm, start, end); spin_lock(&mm->page_table_lock); for (address = start; address < end; address += HPAGE_SIZE) { ptep = huge_pte_offset(mm, address); @@ -819,6 +821,7 @@ void __unmap_hugepage_range(struct vm_ar } spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, start, end); + mmu_notifier_end(mm, start, end); list_for_each_entry_safe(page, tmp, &page_list, lru) { list_del(&page->lru); put_page(page); Index: linux-2.6/mm/memory.c =================================================================== --- linux-2.6.orig/mm/memory.c 2008-04-18 14:08:23.000000000 -0700 +++ linux-2.6/mm/memory.c 2008-04-18 14:10:18.000000000 -0700 @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -616,6 +617,9 @@ int copy_page_range(struct mm_struct *ds if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma); + if (is_cow_mapping(vma->vm_flags)) + mmu_notifier_start(src_mm, addr, end); + dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); do { @@ -626,6 +630,10 @@ int copy_page_range(struct mm_struct *ds vma, addr, next)) return -ENOMEM; } while (dst_pgd++, src_pgd++, addr = next, addr != end); + + if (is_cow_mapping(vma->vm_flags)) + mmu_notifier_end(src_mm, vma->vm_start, end); + return 0; } @@ -829,6 +837,7 @@ unsigned long unmap_vmas(struct vm_area_ tlb = tlb_gather_mmu(mm, 0); update_hiwater_rss(mm); fullmm = tlb->fullmm; + mmu_notifier_start(mm, start_addr, end_addr); for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { unsigned long end; @@ -879,8 +888,9 @@ unsigned long unmap_vmas(struct vm_area_ zap_work = ZAP_BLOCK_SIZE; } } - tlb_finish_mmu(tlb, start_addr, end_addr);\ + tlb_finish_mmu(tlb, start_addr, end_addr); out: + mmu_notifier_end(mm, start_addr, end_addr); return start; /* which is now the end (or restart) address */ } @@ -1461,10 +1471,11 @@ int apply_to_page_range(struct mm_struct { pgd_t *pgd; unsigned long next; - unsigned long end = addr + size; + unsigned long start = addr, end = addr + size; int err; BUG_ON(addr >= end); + mmu_notifier_start(mm, start, end); pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); @@ -1472,6 +1483,7 @@ int apply_to_page_range(struct mm_struct if (err) break; } while (pgd++, addr = next, addr != end); + mmu_notifier_end(mm, start, end); return err; } EXPORT_SYMBOL_GPL(apply_to_page_range); @@ -1611,9 +1623,10 @@ static int do_wp_page(struct mm_struct * */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - page_cache_release(old_page); + new_page = NULL; if (!pte_same(*page_table, orig_pte)) goto unlock; + page_cache_release(old_page); page_mkwrite = 1; } @@ -1629,6 +1642,7 @@ static int do_wp_page(struct mm_struct * if (ptep_set_access_flags(vma, address, page_table, entry,1)) update_mmu_cache(vma, address, entry); ret |= VM_FAULT_WRITE; + old_page = new_page = NULL; goto unlock; } @@ -1685,12 +1699,20 @@ gotten: } else mem_cgroup_uncharge_page(new_page); - if (new_page) +unlock: + pte_unmap_unlock(page_table, ptl); + + if (new_page) { + if (new_page == old_page) { + /* cow happened, notify before releasing old_page */ + mmu_notifier_start(mm, address, address + PAGE_SIZE); + mmu_notifier_end(mm, address, address + PAGE_SIZE); + } page_cache_release(new_page); + } if (old_page) page_cache_release(old_page); -unlock: - pte_unmap_unlock(page_table, ptl); + if (dirty_page) { if (vma->vm_file) file_update_time(vma->vm_file); Index: linux-2.6/mm/mmap.c =================================================================== --- linux-2.6.orig/mm/mmap.c 2008-04-18 14:08:50.000000000 -0700 +++ linux-2.6/mm/mmap.c 2008-04-18 14:09:26.000000000 -0700 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -2033,6 +2034,7 @@ void exit_mmap(struct mm_struct *mm) unsigned long end; /* mm's last user has gone, and its about to be pulled down */ + mmu_notifier_release(mm); arch_exit_mmap(mm); lru_add_drain(); Index: linux-2.6/mm/mmu_notifier.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6/mm/mmu_notifier.c 2008-04-18 14:09:26.000000000 -0700 @@ -0,0 +1,111 @@ +/* + * linux/mm/mmu_notifier.c + * + * Copyright (C) 2008 Qumranet, Inc. + * Copyright (C) 2008 SGI + * Christoph Lameter + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include + +/* + * No synchronization. This function can only be called when only a single + * process remains that performs teardown. + */ +void __mmu_notifier_release(struct mm_struct *mm) +{ + struct mmu_notifier *mn; + + while (unlikely(!list_empty(&mm->mmu_notifier_list))) { + mn = list_entry(mm->mmu_notifier_list.next, + struct mmu_notifier, + list); + list_del(&mn->list); + if (mn->ops->release) + mn->ops->release(mn, mm); + } +} + +/* + * If no young bitflag is supported by the hardware, ->clear_flush_young can + * unmap the address and return 1 or 0 depending if the mapping previously + * existed or not. + */ +int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + struct mmu_notifier *mn; + int young = 0; + + list_for_each_entry(mn, &mm->mmu_notifier_list, list) { + if (mn->ops->clear_flush_young) + young |= mn->ops->clear_flush_young(mn, mm, address); + } + + return young; +} + +void __mmu_notifier_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct mmu_notifier *mn; + + list_for_each_entry(mn, &mm->mmu_notifier_list, list) { + if (mn->ops->start) + mn->ops->start(mn, mm, start, end); + } +} + +void __mmu_notifier_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct mmu_notifier *mn; + + list_for_each_entry(mn, &mm->mmu_notifier_list, list) { + if (mn->ops->end) + mn->ops->end(mn, mm, start, end); + } +} + +/* + * Must not hold mmap_sem nor any other VM related lock when calling + * this registration function. + */ +int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) +{ + int rc; + + rc = mm_lock(mm); + if (rc) + return rc; + list_add(&mn->list, &mm->mmu_notifier_list); + mm_unlock(mm); + return 0; +} +EXPORT_SYMBOL_GPL(mmu_notifier_register); + +/* + * mm_users can't go down to zero while mmu_notifier_unregister() + * runs or it can race with ->release. So a mm_users pin must + * be taken by the caller (if mm can be different from current->mm). + */ +int mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) +{ + int rc; + + BUG_ON(!atomic_read(&mm->mm_users)); + + rc = mm_lock(mm); + if (unlikely(rc)) + return rc; + list_del(&mn->list); + mm_unlock(mm); + return 0; +} +EXPORT_SYMBOL_GPL(mmu_notifier_unregister); Index: linux-2.6/mm/mprotect.c =================================================================== --- linux-2.6.orig/mm/mprotect.c 2008-04-18 11:05:57.000000000 -0700 +++ linux-2.6/mm/mprotect.c 2008-04-18 14:09:26.000000000 -0700 @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -198,10 +199,12 @@ success: dirty_accountable = 1; } + mmu_notifier_start(mm, start, end); if (is_vm_hugetlb_page(vma)) hugetlb_change_protection(vma, start, end, vma->vm_page_prot); else change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); + mmu_notifier_end(mm, start, end); vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); return 0; Index: linux-2.6/mm/mremap.c =================================================================== --- linux-2.6.orig/mm/mremap.c 2008-04-18 14:06:14.000000000 -0700 +++ linux-2.6/mm/mremap.c 2008-04-18 14:09:26.000000000 -0700 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -74,7 +75,10 @@ static void move_ptes(struct vm_area_str struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; + unsigned long old_start; + old_start = old_addr; + mmu_notifier_start(vma->vm_mm, old_start, old_end); if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before @@ -114,6 +118,7 @@ static void move_ptes(struct vm_area_str spin_unlock(new_ptl); pte_unmap_nested(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); + mmu_notifier_end(vma->vm_mm, old_start, old_end); if (mapping) up_write(&mapping->i_mmap_sem); } Index: linux-2.6/mm/rmap.c =================================================================== --- linux-2.6.orig/mm/rmap.c 2008-04-18 14:08:31.000000000 -0700 +++ linux-2.6/mm/rmap.c 2008-04-18 14:09:26.000000000 -0700 @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -283,7 +284,7 @@ static int page_referenced_one(struct pa unsigned long address; pte_t *pte; spinlock_t *ptl; - int referenced = 0; + int referenced = 0, clear_flush_young = 0; address = vma_address(page, vma); if (address == -EFAULT) @@ -296,8 +297,11 @@ static int page_referenced_one(struct pa if (vma->vm_flags & VM_LOCKED) { referenced++; *mapcount = 1; /* break early from loop */ - } else if (ptep_clear_flush_young(vma, address, pte)) - referenced++; + } else { + clear_flush_young = 1; + if (ptep_clear_flush_young(vma, address, pte)) + referenced++; + } /* Pretend the page is referenced if the task has the swap token and is in the middle of a page fault. */ @@ -307,6 +311,10 @@ static int page_referenced_one(struct pa (*mapcount)--; pte_unmap_unlock(pte, ptl); + + if (clear_flush_young) + referenced += mmu_notifier_clear_flush_young(mm, address); + out: return referenced; } @@ -457,9 +465,10 @@ static int page_mkclean_one(struct page if (address == -EFAULT) goto out; + mmu_notifier_start(mm, address, address + PAGE_SIZE); pte = page_check_address(page, mm, address, &ptl); if (!pte) - goto out; + goto out_notifier; if (pte_dirty(*pte) || pte_write(*pte)) { pte_t entry; @@ -473,6 +482,10 @@ static int page_mkclean_one(struct page } pte_unmap_unlock(pte, ptl); + +out_notifier: + mmu_notifier_start(mm, address, address + PAGE_SIZE); + out: return ret; } @@ -716,17 +729,17 @@ static int try_to_unmap_one(struct page if (address == -EFAULT) goto out; + mmu_notifier_start(mm, address, address + PAGE_SIZE); pte = page_check_address(page, mm, address, &ptl); if (!pte) - goto out; + goto out_notifier; /* * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ - if (!migration && ((vma->vm_flags & VM_LOCKED) || - (ptep_clear_flush_young(vma, address, pte)))) { + if (!migration && (vma->vm_flags & VM_LOCKED)) { ret = SWAP_FAIL; goto out_unmap; } @@ -788,6 +801,8 @@ static int try_to_unmap_one(struct page out_unmap: pte_unmap_unlock(pte, ptl); +out_notifier: + mmu_notifier_end(mm, address, address + PAGE_SIZE); out: return ret; } @@ -826,7 +841,7 @@ static void try_to_unmap_cluster(unsigne spinlock_t *ptl; struct page *page; unsigned long address; - unsigned long end; + unsigned long start, end; address = (vma->vm_start + cursor) & CLUSTER_MASK; end = address + CLUSTER_SIZE; @@ -847,6 +862,8 @@ static void try_to_unmap_cluster(unsigne if (!pmd_present(*pmd)) return; + start = address; + mmu_notifier_start(mm, start, end); pte = pte_offset_map_lock(mm, pmd, address, &ptl); /* Update high watermark before we lower rss */ @@ -879,6 +896,7 @@ static void try_to_unmap_cluster(unsigne (*mapcount)--; } pte_unmap_unlock(pte - 1, ptl); + mmu_notifier_end(mm, start, end); } static int try_to_unmap_anon(struct page *page, int migration)