ZONE_DMA less operation for IA64 SGI platform Disable ZONE_DMA for SGI SN2. All memory is addressabel by all devices and we do not need any special memory pools. Also get rid off one bad use of GFP_DMA in SGI SN2 platform support. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc1-mm1/arch/ia64/mm/discontig.c =================================================================== --- linux-2.6.18-rc1-mm1.orig/arch/ia64/mm/discontig.c 2006-07-10 10:33:41.681271230 -0700 +++ linux-2.6.18-rc1-mm1/arch/ia64/mm/discontig.c 2006-07-10 11:13:46.488496555 -0700 @@ -37,7 +37,9 @@ struct early_node_data { unsigned long pernode_size; struct bootmem_data bootmem_data; unsigned long num_physpages; +#ifdef CONFIG_ZONE_DMA unsigned long num_dma_physpages; +#endif unsigned long min_pfn; unsigned long max_pfn; }; @@ -714,9 +716,11 @@ static __init int count_node_pages(unsig unsigned long end = start + len; mem_data[node].num_physpages += len >> PAGE_SHIFT; +#ifdef CONFIG_ZONE_DMA if (start <= __pa(MAX_DMA_ADDRESS)) mem_data[node].num_dma_physpages += (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; +#endif start = GRANULEROUNDDOWN(start); start = ORDERROUNDDOWN(start); end = GRANULEROUNDUP(end); @@ -761,6 +765,13 @@ void __init paging_init(void) num_physpages += mem_data[node].num_physpages; +#ifndef CONFIG_ZONE_DMA + zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - + mem_data[node].min_pfn; + zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn - + mem_data[node].min_pfn - + mem_data[node].num_physpages; +#else if (mem_data[node].min_pfn >= max_dma) { /* All of this node's memory is above ZONE_DMA */ zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - @@ -768,7 +779,8 @@ void __init paging_init(void) zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn - mem_data[node].min_pfn - mem_data[node].num_physpages; - } else if (mem_data[node].max_pfn < max_dma) { + } + else if (mem_data[node].max_pfn < max_dma) { /* All of this node's memory is in ZONE_DMA */ zones_size[ZONE_DMA] = mem_data[node].max_pfn - mem_data[node].min_pfn; @@ -787,7 +799,7 @@ void __init paging_init(void) (mem_data[node].num_physpages - mem_data[node].num_dma_physpages); } - +#endif pfn_offset = mem_data[node].min_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP Index: linux-2.6.18-rc1-mm1/arch/ia64/mm/init.c =================================================================== --- linux-2.6.18-rc1-mm1.orig/arch/ia64/mm/init.c 2006-07-05 21:09:49.000000000 -0700 +++ linux-2.6.18-rc1-mm1/arch/ia64/mm/init.c 2006-07-10 11:13:46.506073594 -0700 @@ -43,7 +43,11 @@ DEFINE_PER_CPU(long, __pgtable_quicklist extern void ia64_tlb_init (void); +#ifdef CONFIG_ZONE_DMA unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; +#else +unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET; +#endif #ifdef CONFIG_VIRTUAL_MEM_MAP unsigned long vmalloc_end = VMALLOC_END_INIT; Index: linux-2.6.18-rc1-mm1/arch/ia64/mm/contig.c =================================================================== --- linux-2.6.18-rc1-mm1.orig/arch/ia64/mm/contig.c 2006-07-10 10:33:41.681271230 -0700 +++ linux-2.6.18-rc1-mm1/arch/ia64/mm/contig.c 2006-07-10 11:13:46.506073594 -0700 @@ -250,6 +250,7 @@ paging_init (void) num_dma_physpages = 0; efi_memmap_walk(count_dma_pages, &num_dma_physpages); +#ifdef CONFIG_ZONE_DMA if (max_low_pfn < max_dma) { zones_size[ZONE_DMA] = max_low_pfn; zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages; @@ -257,12 +258,15 @@ paging_init (void) zones_size[ZONE_DMA] = max_dma; zholes_size[ZONE_DMA] = max_dma - num_dma_physpages; if (num_physpages > num_dma_physpages) { +#endif zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; zholes_size[ZONE_NORMAL] = ((max_low_pfn - max_dma) - (num_physpages - num_dma_physpages)); +#ifdef CONFIG_ZONE_DMA } } +#endif max_gap = 0; efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); Index: linux-2.6.18-rc1-mm1/arch/ia64/sn/kernel/bte.c =================================================================== --- linux-2.6.18-rc1-mm1.orig/arch/ia64/sn/kernel/bte.c 2006-07-05 21:09:49.000000000 -0700 +++ linux-2.6.18-rc1-mm1/arch/ia64/sn/kernel/bte.c 2006-07-10 11:13:46.563687222 -0700 @@ -277,8 +277,7 @@ bte_result_t bte_unaligned_copy(u64 src, } /* temporary buffer used during unaligned transfers */ - bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, - GFP_KERNEL | GFP_DMA); + bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, GFP_KERNEL); if (bteBlock_unaligned == NULL) { return BTEFAIL_NOTAVAIL; } Index: linux-2.6.18-rc1-mm1/arch/ia64/Kconfig =================================================================== --- linux-2.6.18-rc1-mm1.orig/arch/ia64/Kconfig 2006-07-10 11:11:30.488110736 -0700 +++ linux-2.6.18-rc1-mm1/arch/ia64/Kconfig 2006-07-10 11:13:46.572475741 -0700 @@ -23,8 +23,8 @@ config 64BIT default y config ZONE_DMA - bool - default y + def_bool y + depends on !IA64_SGI_SN2 config MMU bool Index: linux-2.6.18-rc1-mm1/arch/ia64/sn/kernel/setup.c =================================================================== --- linux-2.6.18-rc1-mm1.orig/arch/ia64/sn/kernel/setup.c 2006-07-10 10:33:41.684200736 -0700 +++ linux-2.6.18-rc1-mm1/arch/ia64/sn/kernel/setup.c 2006-07-10 11:13:46.596888295 -0700 @@ -429,8 +429,9 @@ void __init sn_setup(char **cmdline_p) #endif /* CONFIG_DUMMY_CONSOLE */ } #endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */ - +#ifdef CONFIG_ZONE_DMA MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY; +#endif /* * Build the tables for managing cnodes.