From: Christoph Lameter Subject: [HUGE] Support VM_PFNMAP Support VM_PFNMAP in hugetlbfs. This is useful for drivers that want to map large contiguous regions of memory. Signed-off-by: Christoph Lameter Index: linux-2.6/mm/hugetlb.c =================================================================== --- linux-2.6.orig/mm/hugetlb.c 2008-04-10 13:06:32.000000000 -0700 +++ linux-2.6/mm/hugetlb.c 2008-04-10 13:06:58.000000000 -0700 @@ -811,6 +811,9 @@ if (pte_none(pte)) continue; + if (vma->vm_flags & VM_PFNMAP) + continue; + page = pte_page(pte); if (pte_dirty(pte)) set_page_dirty(page); @@ -965,6 +968,65 @@ goto out; } +static int hugetlb_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, int write_access) +{ + unsigned long pfn; + + BUG_ON(!(vma->vm_flags & VM_PFNMAP)); + + pfn = vma->vm_ops->nopfn(vma, address & HPAGE_MASK); + if (unlikely(pfn == NOPFN_OOM)) + return VM_FAULT_OOM; + else if (unlikely(pfn == NOPFN_SIGBUS)) + return VM_FAULT_SIGBUS; + else if (unlikely(pfn == NOPFN_REFAULT)) + return 0; + + spin_lock(&mm->page_table_lock); + if (pte_none(*ptep)) { + pte_t new_pte; + + new_pte = make_huge_pte(vma, pfn, ((vma->vm_flags & VM_WRITE) + && (vma->vm_flags & VM_SHARED))); + set_huge_pte_at(mm, address, ptep, new_pte); + } + spin_unlock(&mm->page_table_lock); + return 0; +} + +/** + * huge_insert_pfn - insert single huge pfn into user vma + * @vma: user vma to map to + * @addr: target user address of this page + * @pfn: source kernel pfn (must be properly aligned for a huge page) + */ +int huge_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + struct mm_struct *mm = vma->vm_mm; + pte_t *ptep; + + BUG_ON(!(vma->vm_flags & VM_PFNMAP)); + BUG_ON(addr & ~HPAGE_MASK); + + ptep = huge_pte_alloc(mm, addr); + if (!ptep) + return VM_FAULT_OOM; + + spin_lock(&mm->page_table_lock); + if (pte_none(*ptep)) { + pte_t new_pte; + + new_pte = make_huge_pte(vma, pfn, ((vma->vm_flags & VM_WRITE) + && (vma->vm_flags & VM_SHARED))); + set_huge_pte_at(mm, addr, ptep, new_pte); + } + spin_unlock(&mm->page_table_lock); + return 0; +} +EXPORT_SYMBOL(huge_insert_pfn); + int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) { @@ -985,7 +1047,13 @@ mutex_lock(&hugetlb_instantiation_mutex); entry = *ptep; if (pte_none(entry)) { - ret = hugetlb_no_page(mm, vma, address, ptep, write_access); + if (vma->vm_ops->nopfn) + ret = hugetlb_no_pfn(mm, vma, address, ptep, + write_access); + else + ret = hugetlb_no_page(mm, vma, address, ptep, + write_access); + mutex_unlock(&hugetlb_instantiation_mutex); return ret; } Index: linux-2.6/include/linux/hugetlb.h =================================================================== --- linux-2.6.orig/include/linux/hugetlb.h 2008-04-10 13:05:40.000000000 -0700 +++ linux-2.6/include/linux/hugetlb.h 2008-04-10 13:06:58.000000000 -0700 @@ -16,6 +16,9 @@ return vma->vm_flags & VM_HUGETLB; } +int huge_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); + int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);