From: Christoph Lameter 1. Introduce a new function make_migration_entry() to isolate common code between copy_pte_range and change_pte_range. 2. Modify change_pte_range() to check for a migration entry. If a write migration entry is found and there is a request for a READ permissions then change the migration entry. I am a bit concerned about the check of newprot. Are there other values than PAGE_READONLY that indicate read only access? Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- include/linux/swapops.h | 6 ++++++ mm/memory.c | 7 ++----- mm/mprotect.c | 21 ++++++++++++++++++--- 3 files changed, 26 insertions(+), 8 deletions(-) diff -puN include/linux/swapops.h~read-write-migration-entries-make-mprotect-convert-write-migration include/linux/swapops.h --- devel/include/linux/swapops.h~read-write-migration-entries-make-mprotect-convert-write-migration 2006-04-22 01:39:30.000000000 -0700 +++ devel-akpm/include/linux/swapops.h 2006-04-22 01:39:30.000000000 -0700 @@ -98,6 +98,11 @@ static inline struct page *migration_ent return p; } +static inline void make_migration_entry_read(swp_entry_t *entry) +{ + *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); +} + extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); #else @@ -105,6 +110,7 @@ extern void migration_entry_wait(struct #define make_migration_entry(page, write) swp_entry(0, 0) #define is_migration_entry(swp) 0 #define migration_entry_to_page(swp) NULL +static inline void make_migration_entry_read(entryp) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } diff -puN mm/memory.c~read-write-migration-entries-make-mprotect-convert-write-migration mm/memory.c --- devel/mm/memory.c~read-write-migration-entries-make-mprotect-convert-write-migration 2006-04-22 01:39:30.000000000 -0700 +++ devel-akpm/mm/memory.c 2006-04-22 01:39:30.000000000 -0700 @@ -447,14 +447,11 @@ copy_one_pte(struct mm_struct *dst_mm, s } if (is_migration_entry(entry) && is_cow_mapping(vm_flags)) { - page = migration_entry_to_page(entry); - /* * COW mappings require pages in both parent - * and child to be set to read. + * and child to be set to read. */ - entry = make_migration_entry(page, - SWP_MIGRATION_READ); + make_migration_entry_read(&entry); pte = swp_entry_to_pte(entry); set_pte_at(src_mm, addr, src_pte, pte); } diff -puN mm/mprotect.c~read-write-migration-entries-make-mprotect-convert-write-migration mm/mprotect.c --- devel/mm/mprotect.c~read-write-migration-entries-make-mprotect-convert-write-migration 2006-04-22 01:39:30.000000000 -0700 +++ devel-akpm/mm/mprotect.c 2006-04-22 01:39:30.000000000 -0700 @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include @@ -28,22 +30,35 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot) { - pte_t *pte; + pte_t *pte, oldpte; spinlock_t *ptl; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); do { - if (pte_present(*pte)) { + oldpte = *pte; + if (pte_present(oldpte)) { pte_t ptent; /* Avoid an SMP race with hardware updated dirty/clean * bits by wiping the pte and then setting the new pte * into place. */ - ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot); + ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), + newprot); set_pte_at(mm, addr, pte, ptent); lazy_mmu_prot_update(ptent); + } else + if (!pte_file(oldpte) && pgprot_val(newprot) == + pgprot_val(PAGE_READONLY)) { + swp_entry_t entry = pte_to_swp_entry(oldpte); + + if (is_write_migration_entry(entry)) { + make_migration_entry_read(&entry); + set_pte_at(mm, addr, pte, + swp_entry_to_pte(entry)); + } } + } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(pte - 1, ptl); } _