===== arch/ia64/mm/hugetlbpage.c 1.18 vs edited ===== --- 1.18/arch/ia64/mm/hugetlbpage.c Thu Feb 26 12:18:51 2004 +++ edited/arch/ia64/mm/hugetlbpage.c Fri Apr 2 11:41:06 2004 @@ -162,8 +162,10 @@ goto nomem; src_pte = huge_pte_offset(src, addr); entry = *src_pte; - ptepage = pte_page(entry); - get_page(ptepage); + if (!pte_none(entry)) { + ptepage = pte_page(entry); + get_page(ptepage); + } set_pte(dst_pte, entry); dst->rss += (HPAGE_SIZE / PAGE_SIZE); addr += HPAGE_SIZE; @@ -187,6 +189,12 @@ do { pstart = start & HPAGE_MASK; ptep = huge_pte_offset(mm, start); + + if (!ptep || pte_none(*ptep)) { + hugetlb_fault(mm, vma, 0, start); + ptep = huge_pte_offset(mm, start); + } + pte = *ptep; back1: @@ -228,6 +236,12 @@ pte_t *ptep; ptep = huge_pte_offset(mm, addr); + + if (!ptep || pte_none(*ptep)) { + hugetlb_fault(mm, vma, 0, addr); + ptep = huge_pte_offset(mm, addr); + } + page = pte_page(*ptep); page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT); get_page(page); @@ -238,7 +252,8 @@ return 0; } struct page * -follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) +follow_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, int write) { return NULL; } @@ -349,6 +364,8 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) { + return 0; +#if 0 struct mm_struct *mm = current->mm; unsigned long addr; int ret = 0; @@ -397,6 +414,7 @@ out: spin_unlock(&mm->page_table_lock); return ret; +#endif } unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, @@ -590,6 +608,48 @@ if (size > (htlbpagemem << HPAGE_SHIFT)) return 0; return 1; +} + +int arch_hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + int write_access, unsigned long address) +{ + struct file *file = vma->vm_file; + struct address_space *mapping = file->f_dentry->d_inode->i_mapping; + struct page *page; + unsigned long idx; + pte_t *pte; + int ret = VM_FAULT_MINOR; + + BUG_ON(vma->vm_start & ~HPAGE_MASK); + BUG_ON(vma->vm_end & ~HPAGE_MASK); + + spin_lock(&mm->page_table_lock); + + idx = ((address - vma->vm_start) >> HPAGE_SHIFT) + + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); + page = find_get_page(mapping, idx); + + if (!page) { + page = alloc_hugetlb_page(); + if (!page) { + ret = VM_FAULT_SIGBUS; + goto out; + } + ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); + unlock_page(page); + if (ret) { + free_huge_page(page); + ret = VM_FAULT_SIGBUS; + goto out; + } + } + pte = huge_pte_alloc(mm, address); + set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); + flush_tlb_range(vma, address, address + HPAGE_SIZE); + update_mmu_cache(vma, address, *pte); + out: + spin_unlock(&mm->page_table_lock); + return ret; } static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int *unused)