Index: linux-2.6.20/arch/x86_64/kernel/pci-dma.c =================================================================== --- linux-2.6.20.orig/arch/x86_64/kernel/pci-dma.c 2007-02-12 21:50:39.000000000 -0800 +++ linux-2.6.20/arch/x86_64/kernel/pci-dma.c 2007-02-12 21:57:39.000000000 -0800 @@ -48,7 +48,8 @@ /* Allocate DMA memory on node near device */ noinline static void * -dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) +dma_alloc_pages(unsigned long low, unsigned long high,struct device *dev, + gfp_t gfp, unsigned order) { struct page *page; int node; @@ -62,7 +63,7 @@ if (node < first_node(node_online_map)) node = first_node(node_online_map); - page = alloc_pages_node(node, gfp, order); + page = alloc_pages_range_node(low, high, node, gfp, order); return page ? page_address(page) : NULL; } @@ -90,15 +91,7 @@ uses the normal dma_mask for alloc_coherent. */ dma_mask &= *dev->dma_mask; - /* Why <=? Even when the mask is smaller than 4GB it is often - larger than 16MB and in this case we have a chance of - finding fitting memory in the next higher zone first. If - not retry with true GFP_DMA. -AK */ - if (dma_mask <= DMA_32BIT_MASK) - gfp |= GFP_DMA32; - - again: - memory = dma_alloc_pages(dev, gfp, get_order(size)); + memory = dma_alloc_pages(0, dma_mask, dev, gfp, get_order(size)); if (memory == NULL) return NULL;