Index: linux-2.6.18-mm3/arch/ia64/mm/init.c =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/mm/init.c 2006-10-07 20:02:57.100209838 -0700 +++ linux-2.6.18-mm3/arch/ia64/mm/init.c 2006-10-07 20:48:08.908456268 -0700 @@ -402,7 +402,7 @@ ia64_mmu_init (void *my_cpu_data) ia64_tlb_init(); #ifdef CONFIG_HUGETLB_PAGE - ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); + ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2 | VHPT_ENABLE_BIT); ia64_srlz_d(); #endif } @@ -469,6 +469,11 @@ retry_pte: #define VMEM_MAP_PAGE_SIZE PAGE_SIZE #endif +static void * __init alloc_vmem_page(int node, unsigned long size) +{ + return __alloc_bootmem_node(NODE_DATA(node), size, size, __pa(MAX_DMA_ADDRESS)); +} + int __init create_mem_map_page_table (u64 start, u64 end, void *arg) { @@ -488,28 +493,32 @@ create_mem_map_page_table (u64 start, u6 node = paddr_to_nid(__pa(start)); for (address = start_page; address < end_page; address += VMEM_MAP_PAGE_SIZE) { - pgd = pgd_offset_k(address); +#ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE + unsigned long taddr = htlbpage_to_page(address); + /* Keep region so that lookups can properly occur */ + pgd = pgd_offset(&init_mm, taddr); +#else + unsigned long taddr = address; + pgd = pgd_offset_k(taddr); +#endif if (pgd_none(*pgd)) - pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); - pud = pud_offset(pgd, address); + pgd_populate(&init_mm, pgd, alloc_vmem_page(node, PAGE_SIZE)); + pud = pud_offset(pgd, taddr); if (pud_none(*pud)) - pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); - pmd = pmd_offset(pud, address); + pud_populate(&init_mm, pud, alloc_vmem_page(node, PAGE_SIZE)); + pmd = pmd_offset(pud, taddr); if (pmd_none(*pmd)) - pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); - pte = pte_offset_kernel(pmd, address); + pmd_populate_kernel(&init_mm, pmd, alloc_vmem_page(node, PAGE_SIZE)); + pte = pte_offset_kernel(pmd, taddr); if (pte_none(*pte)) { unsigned long addr; - addr = __pa(__alloc_bootmem_node(NODE_DATA(node), - VMEM_MAP_PAGE_SIZE, - VMEM_MAP_PAGE_SIZE, - __pa(MAX_DMA_ADDRESS))); + addr = __pa(alloc_vmem_page(node, VMEM_MAP_PAGE_SIZE)); set_pte(pte, mk_pte_phys(addr, PAGE_KERNEL)); - printk(KERN_CRIT "Virtual mmap range %lx-%lx page @%lx:%lx pte=%lx size=%lu node=%d\n", start, end, address, addr, pte_val(*pte), VMEM_MAP_PAGE_SIZE, node); + printk(KERN_CRIT "Virtual mmap range %lx-%lx page @%lx:%lx:%lx pte=%lx size=%lu node=%d\n", start, end, address, taddr, addr, pte_val(*pte), VMEM_MAP_PAGE_SIZE, node); } else printk(KERN_CRIT "Virtual mmap %lx-%lx @%lx node %d already present.\n", Index: linux-2.6.18-mm3/arch/ia64/mm/fault.c =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/mm/fault.c 2006-10-07 20:02:57.102162843 -0700 +++ linux-2.6.18-mm3/arch/ia64/mm/fault.c 2006-10-07 20:18:15.975156359 -0700 @@ -65,6 +65,12 @@ mapped_kernel_page_is_present (unsigned pmd_t *pmd; pte_t *ptep, pte; +#ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE + if (REGION_NUMBER(address) == RGN_HPAGE && address >= VIRTUAL_MEM_MAP) { + address = htlbpage_to_page(address); + pgd = pgd_offset(&init_mm, address); + } else +#endif pgd = pgd_offset_k(address); if (pgd_none(*pgd) || pgd_bad(*pgd)) return 0; @@ -112,7 +118,7 @@ ia64_do_page_fault (unsigned long addres * code. */ - if ((REGION_NUMBER(address) == VIRTUAL_MEM_MAP_REGION) && !user_mode(regs)) + if (REGION_NUMBER(address) == RGN_GATE && !user_mode(regs)) goto bad_area_no_up; #endif @@ -257,8 +263,10 @@ ia64_do_page_fault (unsigned long addres * translation, which fixed the problem. So, we check to see if the translation is * valid, and return if it is. */ - if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) - return; + if ((REGION_NUMBER(address) == RGN_GATE || + REGION_NUMBER(address) == VIRTUAL_MEM_MAP_REGION) && + mapped_kernel_page_is_present(address)) + return; if (ia64_done_with_exception(regs)) return; Index: linux-2.6.18-mm3/include/asm-ia64/page.h =================================================================== --- linux-2.6.18-mm3.orig/include/asm-ia64/page.h 2006-10-07 20:02:57.101186341 -0700 +++ linux-2.6.18-mm3/include/asm-ia64/page.h 2006-10-07 21:00:53.322220788 -0700 @@ -54,7 +54,7 @@ # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE) # define HPAGE_SHIFT hpage_shift #ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE -# define HPAGE_SHIFT_DEFAULT 24 /* Reduce memory overhead for virtual mem_map */ +# define HPAGE_SHIFT_DEFAULT 20 /* Reduce memory overhead for virtual mem_map */ #else # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */ #endif Index: linux-2.6.18-mm3/arch/ia64/Kconfig =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/Kconfig 2006-10-07 20:02:57.102162843 -0700 +++ linux-2.6.18-mm3/arch/ia64/Kconfig 2006-10-07 20:06:11.741520292 -0700 @@ -383,6 +383,7 @@ config VIRTUAL_MEM_MAP config VIRTUAL_MEM_MAP_HUGE depends on VIRTUAL_MEM_MAP + bool "Virtual mem map uses Huge pages" help By default we map the virtual memory map using the default page size and take a part of VMALLOC space for the map. This option