From: "Chen, Kenneth W" On i386 and x86-64, pte flag _PAGE_PSE collides with _PAGE_PROTNONE. The identify of hugetlb pte is lost when changing page protection via mprotect. A page fault occurs later will trigger a bug check in huge_pte_alloc(). The fix is to always make new pte a hugetlb pte and also to clean up legacy code where _PAGE_PRESENT is forced on in the pre-faulting day. Signed-off-by: Ken Chen Signed-off-by: Nishanth Aravamudan Cc: Andi Kleen Signed-off-by: Andrew Morton --- include/asm-i386/pgtable.h | 5 ++--- include/asm-ia64/pgtable.h | 2 +- include/asm-x86_64/pgtable.h | 4 ++-- mm/hugetlb.c | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff -puN include/asm-i386/pgtable.h~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection include/asm-i386/pgtable.h --- devel/include/asm-i386/pgtable.h~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection 2006-03-16 02:03:54.000000000 -0800 +++ devel-akpm/include/asm-i386/pgtable.h 2006-03-16 02:03:54.000000000 -0800 @@ -219,13 +219,12 @@ extern unsigned long pg0[]; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -#define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT) static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } -static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; } +static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } /* * The following only works if pte_present() is not true. @@ -242,7 +241,7 @@ static inline pte_t pte_mkexec(pte_t pte static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } #ifdef CONFIG_X86_PAE # include diff -puN include/asm-ia64/pgtable.h~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection include/asm-ia64/pgtable.h --- devel/include/asm-ia64/pgtable.h~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection 2006-03-16 02:03:54.000000000 -0800 +++ devel-akpm/include/asm-ia64/pgtable.h 2006-03-16 02:03:54.000000000 -0800 @@ -314,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) -#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P)) +#define pte_mkhuge(pte) (__pte(pte_val(pte))) /* * Macro to a page protection value as "uncacheable". Note that "protection" is really a diff -puN include/asm-x86_64/pgtable.h~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection include/asm-x86_64/pgtable.h --- devel/include/asm-x86_64/pgtable.h~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection 2006-03-16 02:03:54.000000000 -0800 +++ devel-akpm/include/asm-x86_64/pgtable.h 2006-03-16 02:03:54.000000000 -0800 @@ -273,7 +273,7 @@ static inline int pte_dirty(pte_t pte) static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; } +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } @@ -285,7 +285,7 @@ static inline pte_t pte_mkexec(pte_t pte static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } struct vm_area_struct; diff -puN mm/hugetlb.c~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection mm/hugetlb.c --- devel/mm/hugetlb.c~fix-i386-x86-64-_page_pse-bit-when-changing-page-protection 2006-03-16 02:03:54.000000000 -0800 +++ devel-akpm/mm/hugetlb.c 2006-03-16 02:03:54.000000000 -0800 @@ -584,7 +584,7 @@ void hugetlb_change_protection(struct vm continue; if (!pte_none(*ptep)) { pte = huge_ptep_get_and_clear(mm, address, ptep); - pte = pte_modify(pte, newprot); + pte = pte_mkhuge(pte_modify(pte, newprot)); set_huge_pte_at(mm, address, ptep, pte); lazy_mmu_prot_update(pte); } _