Index: linux-2.6.18-mm3/include/asm-ia64/page.h =================================================================== --- linux-2.6.18-mm3.orig/include/asm-ia64/page.h 2006-10-08 18:30:12.454578641 -0700 +++ linux-2.6.18-mm3/include/asm-ia64/page.h 2006-10-09 02:28:01.420453902 -0700 @@ -53,11 +53,7 @@ #ifdef CONFIG_HUGETLB_PAGE # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE) # define HPAGE_SHIFT hpage_shift -#ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE -# define HPAGE_SHIFT_DEFAULT 20 /* Reduce memory overhead for virtual mem_map */ -#else # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */ -#endif # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT) # define HPAGE_MASK (~(HPAGE_SIZE - 1)) @@ -115,14 +111,16 @@ do { \ #ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE /* - * Use huge pages for the virtual memory map. Since we have separate - * huge page region we can use the whole range and leave VMALLOC - * untouched. + * Use huge pages for the virtual memory map. We place the virtual + * addresses beyond RGN_MAP_LIMIT in order to be able to distinguish + * between user space and kernel access to the huge area. */ #define VIRTUAL_MEM_MAP_REGION RGN_HPAGE -#define VIRTUAL_MEM_MAP RGN_BASE(VIRTUAL_MEM_MAP_REGION) +#define VIRTUAL_MEM_MAP (RGN_BASE(VIRTUAL_MEM_MAP_REGION) + RGN_MAP_LIMIT) #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) - +#define VIRTUAL_MEM_MAP_PAGE_SHIFT 20 +#define VIRTUAL_MEM_MAP_PAGE_SIZE (1UL << 20) +#define VIRTMM_PAGE_TO_PAGE(x) (((x) & (RGN_MAP_LIMIT-1)) >> (VIRTUAL_MEM_MAP_PAGE_SHIFT - PAGE_SHIFT)) #else /* * Place the virtual memory map in the VMALLOC area reducing the @@ -130,7 +128,11 @@ do { \ */ #define VIRTUAL_MEM_MAP_REGION RGN_GATE #define VIRTUAL_MEM_MAP (RGN_BASE(VIRTUAL_MEM_MAP_REGION) + 0x200000000UL) +#define VIRTUAL_MEM_MAP_PAGE_SHIFT PAGE_SHIFT +#define VIRTUAL_MEM_MAP_PAGE_SIZE PAGE_SIZE +#define VIRTMM_PAGE_TO_PAGE(x) (x) #define VMALLOC_START (VIRTUAL_MEM_MAP + VIRTUAL_MEM_MAP_SIZE) + #endif #define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) Index: linux-2.6.18-mm3/arch/ia64/kernel/ivt.S =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/kernel/ivt.S 2006-10-09 01:30:14.019055428 -0700 +++ linux-2.6.18-mm3/arch/ia64/kernel/ivt.S 2006-10-09 02:28:01.422406907 -0700 @@ -427,26 +427,27 @@ ENTRY(nested_dtlb_miss) */ rsm psr.dt // switch to using physical data addressing mov r19=IA64_KR(PT_BASE) // get the page table base address - shl r21=r16,3 // shift bit 60 into sign bit mov r18=cr.itir shr.u r17=r16,61 // get the region number into r17 + tbit.nz p9,p0=r16,IA64_RGN_MAP_SHIFT // Check for virtual MMAP flag set +(p9) br.cond.spnt memmap // Do potentially necessary mmap processing + srlz.d ;; +nested_dtlb_cont: + shl r21=r16,3 // shift bit 60 into sign bit cmp.eq p6,p7=5,r17 // kernel region? - cmp.eq p9,p0=4,r17 // select other pgd for vmmap huge region? extr.u r18=r18,2,6 // get the faulting page size ;; -(p7) cmp.eq p6,p7=4,r17 // huge region switches processing + LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir + ;; +nested_dtlb: add r22=-PAGE_SHIFT,r18 // page order add r18=PGDIR_SHIFT-PAGE_SHIFT+3,r18 // Calculate shift to get to nr of pgd ;; shr.u r22=r16,r22 shr.u r18=r21,r18 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place - - srlz.d - LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir ;; - LOAD_PHYSICAL(p9, r19, memmap_pg_dir) // region 4 may also be rooted at memmap_pg_dir .pred.rel "mutex", p6, p7 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 @@ -483,6 +484,43 @@ ENTRY(nested_dtlb_miss) br.sptk.many b0 // return to continuation point END(nested_dtlb_miss) + /* + * Virtual memory map helper. + * + * Check if we are truly in a virtual memory map and then + * set the corresponding registers to switch MMU to use the desired + * page size and continue in the nested_dtlb handler. + * + * Entry + * r17 region number + * r18 itir + * p9 set + * r19 pgdir of process + * + * Exit: + * All registers preserved + * p9 clear + * r19 pgdir of process or memmap_pgdir + * + * Clobbered: + * r22 + */ +ENTRY(memmap) + cmp.ne p9,p0=4,r17 // This only works on the hugetlb region + mov r22=r18 +(p9) br.cond.spnt nested_dtlb_cont + ;; + mov r18=IA64_VIRTUAL_MEM_MAP_PAGE_SHIFT + dep r16=0,r16,IA64_RGN_MAP_SHIFT,1 // Clear VIRTUAL_MEM_MAP flag + cmp.eq p6,p7=r0,r0 // Set proper flags for kernel map processing + ;; + dep r22=r18,r22,2,6 // Fix up the page size + LOAD_PHYSICAL(p0, r19, memmap_pg_dir) // Get the right page table + ;; + mov cr.itir=r22 // Set it up for the next itc + br.sptk.many nested_dtlb +END(memmap) + .org ia64_ivt+0x1800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) @@ -1183,7 +1221,6 @@ ENTRY(dispatch_to_fault_handler) mov rp=r14 br.call.sptk.many b6=ia64_fault END(dispatch_to_fault_handler) - // // --- End of long entries, Beginning of short entries // Index: linux-2.6.18-mm3/arch/ia64/kernel/asm-offsets.c =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/kernel/asm-offsets.c 2006-10-06 18:11:41.094742499 -0700 +++ linux-2.6.18-mm3/arch/ia64/kernel/asm-offsets.c 2006-10-09 02:28:01.422406907 -0700 @@ -268,4 +268,8 @@ void foo(void) DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64); DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32); DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec)); + BLANK(); + + DEFINE(IA64_RGN_MAP_SHIFT, RGN_MAP_SHIFT); + DEFINE(IA64_VIRTUAL_MEM_MAP_PAGE_SHIFT, VIRTUAL_MEM_MAP_PAGE_SHIFT); } Index: linux-2.6.18-mm3/arch/ia64/mm/init.c =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/mm/init.c 2006-10-09 01:30:14.022961437 -0700 +++ linux-2.6.18-mm3/arch/ia64/mm/init.c 2006-10-09 02:29:22.734774195 -0700 @@ -463,12 +463,6 @@ retry_pte: return hole_next_pfn - pgdat->node_start_pfn; } -#ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE -#define VMEM_MAP_PAGE_SIZE (1UL << hpage_shift) -#else -#define VMEM_MAP_PAGE_SIZE PAGE_SIZE -#endif - static void * __init alloc_vmem_page(int node, unsigned long size) { return __alloc_bootmem_node(NODE_DATA(node), size, size, __pa(MAX_DMA_ADDRESS)); @@ -488,15 +482,15 @@ create_mem_map_page_table (u64 start, u6 map_start = virt_to_page(start); map_end = virt_to_page(end); - start_page = (unsigned long) map_start & ~(VMEM_MAP_PAGE_SIZE - 1); - end_page = ALIGN((unsigned long) map_end, VMEM_MAP_PAGE_SIZE); + start_page = (unsigned long) map_start & ~(VIRTUAL_MEM_MAP_PAGE_SIZE - 1); + end_page = ALIGN((unsigned long) map_end, VIRTUAL_MEM_MAP_PAGE_SIZE); node = paddr_to_nid(__pa(start)); - for (address = start_page; address < end_page; address += VMEM_MAP_PAGE_SIZE) { -#ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE - unsigned long taddr = htlbpage_to_page(address); - /* Keep region so that lookups can properly occur */ - pgd = pgd_offset_memmap(taddr); + for (address = start_page; address < end_page; address += VIRTUAL_MEM_MAP_PAGE_SIZE) { +#ifdef VIRTUAL_MEM_MAP_HUGE + unsigned long taddr = VIRTMM_PAGE_TO_PAGE(address); + + pgd = pgd_offset_mm(taddr); #else unsigned long taddr = address; pgd = pgd_offset_k(taddr); @@ -516,9 +510,9 @@ create_mem_map_page_table (u64 start, u6 if (pte_none(*pte)) { unsigned long addr; - addr = __pa(alloc_vmem_page(node, VMEM_MAP_PAGE_SIZE)); + addr = __pa(alloc_vmem_page(node, VIRTUAL_MEM_MAP_PAGE_SIZE)); set_pte(pte, mk_pte_phys(addr, PAGE_KERNEL)); - printk(KERN_CRIT "Virtual mmap range %lx-%lx page @%lx:%lx:%lx pte=%lx size=%lu node=%d\n", start, end, address, taddr, addr, pte_val(*pte), VMEM_MAP_PAGE_SIZE, node); + printk(KERN_CRIT "Virtual mmap range %lx-%lx page @%lx:%lx:%lx pte=%lx size=%lu node=%d\n", start, end, address, taddr, addr, pte_val(*pte), VIRTUAL_MEM_MAP_PAGE_SIZE, node); } else printk(KERN_CRIT "Virtual mmap %lx-%lx @%lx node %d already present.\n", Index: linux-2.6.18-mm3/arch/ia64/mm/fault.c =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/mm/fault.c 2006-10-09 01:30:14.022961437 -0700 +++ linux-2.6.18-mm3/arch/ia64/mm/fault.c 2006-10-09 02:30:45.848787734 -0700 @@ -66,12 +66,12 @@ mapped_kernel_page_is_present (unsigned pte_t *ptep, pte; #ifdef CONFIG_VIRTUAL_MEM_MAP_HUGE - if (REGION_NUMBER(address) == RGN_HPAGE) { - address = htlbpage_to_page(address); + if (REGION_NUMBER(address) == RGN_HPAGE && (address >= RGN_MAP_LIMIT)) { + address = VIRTMM_PAGE_TO_PAGE(address); pgd = pgd_offset_memmap(address); } else #endif - pgd = pgd_offset_k(address); + pgd = pgd_offset_k(address); if (pgd_none(*pgd) || pgd_bad(*pgd)) return 0;