Index: linux-2.6.18-mm3/include/asm-ia64/pgtable.h =================================================================== --- linux-2.6.18-mm3.orig/include/asm-ia64/pgtable.h 2006-10-05 17:07:56.417726941 -0500 +++ linux-2.6.18-mm3/include/asm-ia64/pgtable.h 2006-10-05 17:10:55.782761590 -0500 @@ -19,8 +19,6 @@ #include #include -#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ - /* * First, define the various bits in a PTE. Note that the PTE format * matches the VHPT short format, the firt doubleword of the VHPD long @@ -230,15 +228,6 @@ ia64_phys_addr_valid (unsigned long addr #define set_pte(ptep, pteval) (*(ptep) = (pteval)) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) -#ifdef CONFIG_VIRTUAL_MEM_MAP -# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) -# define VMALLOC_END vmalloc_end - extern unsigned long vmalloc_end; -#else -# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) -#endif - /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) Index: linux-2.6.18-mm3/mm/memory.c =================================================================== --- linux-2.6.18-mm3.orig/mm/memory.c 2006-10-05 17:07:56.430423346 -0500 +++ linux-2.6.18-mm3/mm/memory.c 2006-10-05 22:38:08.182900097 -0500 @@ -62,7 +62,7 @@ #include -#if defined(CONFIG_VIRTUAL_MEM_MAP) || !defined(CONFIG_NEED_MULTIPLE_NODES) +#if !defined(CONFIG_VIRTUAL_MEM_MAP) || !defined(CONFIG_NEED_MULTIPLE_NODES) /* * The memory map is either directly mapped without holes or a virtual * memory map. Then mem_map may have holes that __pfn_valid can check for. @@ -75,18 +75,22 @@ struct page *mem_map; EXPORT_SYMBOL(max_mapnr); EXPORT_SYMBOL(mem_map); +#endif #ifdef CONFIG_VIRTUAL_MEM_MAP int __pfn_valid (unsigned long pfn) { char byte; + int result; struct page *page = pfn_to_page(pfn); + BUILD_BUG_ON(sizeof(struct page) > (1UL << STRUCT_PAGE_ORDER)); + /* * A pfn is valid if we can read the first byte of the * page struct it points to. */ - int result = __get_user(byte, (char __user *)page) == 0; + result = __get_user(byte, (char __user *)page) == 0; /* & If the struct page may cross a page boundary then we also @@ -110,7 +114,6 @@ int __pfn_valid (unsigned long pfn) } EXPORT_SYMBOL(__pfn_valid); #endif -#endif unsigned long num_physpages; /* Index: linux-2.6.18-mm3/arch/ia64/mm/discontig.c =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/mm/discontig.c 2006-10-05 17:07:30.000000000 -0500 +++ linux-2.6.18-mm3/arch/ia64/mm/discontig.c 2006-10-05 22:38:08.157507353 -0500 @@ -506,9 +506,6 @@ void __init find_memory(void) initialize_pernode_data(); max_pfn = max_low_pfn; -#ifdef CONFIG_VIRTUAL_MEM_MAP - max_mapnr = max_low_pfn; -#endif find_initrd(); } @@ -691,18 +688,12 @@ void __init paging_init(void) int node; unsigned long max_zone_pfns[MAX_NR_ZONES]; - max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; - arch_sparse_init(); efi_memmap_walk(filter_rsvd_memory, count_node_pages); #ifdef CONFIG_VIRTUAL_MEM_MAP - vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * - sizeof(struct page)); - mem_map = (struct page *) vmalloc_end; efi_memmap_walk(create_mem_map_page_table, NULL); - printk("Virtual mem_map starts at 0x%p\n", mem_map); #endif for_each_online_node(node) { @@ -713,6 +704,7 @@ void __init paging_init(void) } #ifdef CONFIG_ZONE_DMA + max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; max_zone_pfns[ZONE_DMA] = max_dma; #endif max_zone_pfns[ZONE_NORMAL] = max_pfn; Index: linux-2.6.18-mm3/arch/ia64/mm/init.c =================================================================== --- linux-2.6.18-mm3.orig/arch/ia64/mm/init.c 2006-10-05 17:07:30.000000000 -0500 +++ linux-2.6.18-mm3/arch/ia64/mm/init.c 2006-10-05 22:38:08.166297149 -0500 @@ -45,11 +45,6 @@ extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; -#ifdef CONFIG_VIRTUAL_MEM_MAP -unsigned long vmalloc_end = VMALLOC_END_INIT; -EXPORT_SYMBOL(vmalloc_end); -#endif - struct page *zero_page_memmap_ptr; /* map entry for zero page */ EXPORT_SYMBOL(zero_page_memmap_ptr); Index: linux-2.6.18-mm3/include/asm-ia64/page.h =================================================================== --- linux-2.6.18-mm3.orig/include/asm-ia64/page.h 2006-10-05 13:29:45.000000000 -0500 +++ linux-2.6.18-mm3/include/asm-ia64/page.h 2006-10-05 22:23:49.805556557 -0500 @@ -25,6 +25,8 @@ #define RGN_GATE 5 /* Gate page, Kernel text, etc */ #define RGN_HPAGE 4 /* For Huge TLB pages */ +#define IA64_MAX_PHYS_BITS 50 + /* * PAGE_SHIFT determines the actual kernel page size. */ @@ -208,5 +210,18 @@ get_order (unsigned long size) (((current->personality & READ_IMPLIES_EXEC) != 0) \ ? VM_EXEC : 0)) +/* + * STRUCT_PAGE_ORDER is needed to approximate the size of struct page + * that is unknown at this point. struct page must be smaller than + * 1 << STRUCT_PAGE_ORDER. + */ +#define STRUCT_PAGE_ORDER 6 + +#define VIRTUAL_MEM_MAP (RGN_BASE(RGN_GATE) + 0x200000000UL) +#define VIRTUAL_MEM_MAP_SIZE (1UL << (IA64_MAX_PHYS_BITS - PAGE_SHIFT + STRUCT_PAGE_ORDER)) + +#define VMALLOC_START (VIRTUAL_MEM_MAP + VIRTUAL_MEM_MAP_SIZE) +#define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) + # endif /* __KERNEL__ */ #endif /* _ASM_IA64_PAGE_H */ Index: linux-2.6.18-mm3/include/linux/mmzone.h =================================================================== --- linux-2.6.18-mm3.orig/include/linux/mmzone.h 2006-10-05 17:08:58.569556550 -0500 +++ linux-2.6.18-mm3/include/linux/mmzone.h 2006-10-05 22:38:08.211222774 -0500 @@ -339,26 +339,28 @@ struct node_active_region { #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_FLATMEM) -extern struct page *mem_map; -extern unsigned long max_mapnr; #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (0UL) #endif -#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ - ARCH_PFN_OFFSET) - #ifdef CONFIG_VIRTUAL_MEM_MAP #ifndef VMEMMAP_PAGE_SIZE #define VMEMMAP_PAGE_SIZE PAGE_SIZE +#define mem_map ((struct page *)VIRTUAL_MEM_MAP) +#define max_mapnr (VIRTUAL_MEM_MAP_SIZE / sizeof(struct page *) - ARCH_PFN_OFFSET) #endif extern int __pfn_valid(unsigned long pfn); #else +extern unsigned long max_mapnr; +extern struct page *mem_map; #define __pfn_valid(pfn) 1 #endif +#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) +#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ + ARCH_PFN_OFFSET) + static inline int pfn_valid(unsigned long pfn) { return pfn - ARCH_PFN_OFFSET < max_mapnr && __pfn_valid(pfn); } @@ -428,7 +430,7 @@ int zone_watermark_ok(struct zone *z, in extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); -#ifdef CONFIG_HAVE_MEMORY_PRESENT +#if defined(CONFIG_HAVE_MEMORY_PRESENT) || defined(CONFIG_VIRTUAL_MEM_MAP) void memory_present(int nid, unsigned long start, unsigned long end); #else static inline void memory_present(int nid, unsigned long start, unsigned long end) {}