From: Dean Nelson > > In this case I think adding the vmalloc call is overkill, I would simply > make it call kmalloc_node() unconditionally for all sizes and let it > fail if that situation occurs, given how unlikely it is. I'm dropping the call to vmalloc_node(). > >>> Index: linux-2.6/arch/ia64/sn/kernel/sn2/cache.c > > I would expect this part of the patch to be able to go in as is, > straight away so I don't think it should be a problem. It's not a big > deal whether we do it one way or another to me. For simplicity, I'm keeping this file's changes with the patch. Cc: Andrey Volkov Cc: Jes Sorensen Cc: "Luck, Tony" Signed-off-by: Andrew Morton --- arch/ia64/kernel/uncached.c | 61 ++++++++++++++++++---------------- lib/genalloc.c | 12 +----- 2 files changed, 37 insertions(+), 36 deletions(-) diff -puN arch/ia64/kernel/uncached.c~change-gen_pool-allocator-to-not-touch-managed-memory-update-2 arch/ia64/kernel/uncached.c --- 25/arch/ia64/kernel/uncached.c~change-gen_pool-allocator-to-not-touch-managed-memory-update-2 Wed Apr 26 15:13:24 2006 +++ 25-akpm/arch/ia64/kernel/uncached.c Wed Apr 26 15:13:24 2006 @@ -65,16 +65,15 @@ static void uncached_ipi_mc_drain(void * * Add a new chunk of uncached memory pages to the specified pool. * * @pool: pool to add new chunk of uncached memory to - * @nid: node id of node to allocate memory from + * @nid: node id of node to allocate memory from, or -1 * * This is accomplished by first allocating a granule of cached memory pages * and then converting them to uncached memory pages. */ -static int -uncached_add_chunk(struct gen_pool *pool, int nid) +static int uncached_add_chunk(struct gen_pool *pool, int nid) { struct page *page; - int status, i, ret; + int status, i; unsigned long c_addr, uc_addr; if (allocated_granules >= MAX_UNCACHED_GRANULES) @@ -103,8 +102,11 @@ uncached_add_chunk(struct gen_pool *pool flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); - if (!status) - (void) smp_call_function(uncached_ipi_visibility, NULL, 0, 1); + if (!status) { + status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); + if (status) + goto failed; + } preempt_disable(); @@ -119,46 +121,53 @@ uncached_add_chunk(struct gen_pool *pool preempt_enable(); ia64_pal_mc_drain(); - (void) smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); + status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); + if (status) + goto failed; /* * The chunk of memory pages has been converted to uncached so now we * can add it to the pool. */ - ret = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); - if (ret != 0) { - /* failed to add the chunk so give it back to the kernel */ - for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) - ClearPageUncached(&page[i]); - - free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); - return -1; - } + status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); + if (status) + goto failed; allocated_granules++; return 0; + + /* failed to convert or add the chunk so give it back to the kernel */ +failed: + for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) + ClearPageUncached(&page[i]); + + free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); + return -1; } /* * uncached_alloc_page * - * @starting_nid: node id of node to start with + * @starting_nid: node id of node to start with, or -1 * * Allocate 1 uncached page. Allocates on the requested node. If no * uncached pages are available on the requested node, roundrobin starting * with the next higher node. */ -unsigned long -uncached_alloc_page(int starting_nid) +unsigned long uncached_alloc_page(int starting_nid) { unsigned long uc_addr; struct gen_pool *pool; - int nid = starting_nid; + int nid; - if (unlikely(starting_nid < 0 || starting_nid >= MAX_NUMNODES)) + if (unlikely(starting_nid >= MAX_NUMNODES)) return 0; + if (starting_nid < 0) + starting_nid = numa_node_id(); + nid = starting_nid; + do { if (!node_online(nid)) continue; @@ -185,8 +194,7 @@ EXPORT_SYMBOL(uncached_alloc_page); * * Free a single uncached page. */ -void -uncached_free_page(unsigned long uc_addr) +void uncached_free_page(unsigned long uc_addr) { int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); struct gen_pool *pool = uncached_pool[nid]; @@ -212,8 +220,8 @@ EXPORT_SYMBOL(uncached_free_page); * Called at boot time to build a map of pages that can be used for * memory special operations. */ -static int __init -uncached_build_memmap(unsigned long uc_start, unsigned long uc_end, void *arg) +static int __init uncached_build_memmap(unsigned long uc_start, + unsigned long uc_end, void *arg) { int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); struct gen_pool *pool = uncached_pool[nid]; @@ -229,8 +237,7 @@ uncached_build_memmap(unsigned long uc_s } -static int __init -uncached_init(void) +static int __init uncached_init(void) { int nid; diff -puN lib/genalloc.c~change-gen_pool-allocator-to-not-touch-managed-memory-update-2 lib/genalloc.c --- 25/lib/genalloc.c~change-gen_pool-allocator-to-not-touch-managed-memory-update-2 Wed Apr 26 15:13:24 2006 +++ 25-akpm/lib/genalloc.c Wed Apr 26 15:13:24 2006 @@ -11,7 +11,6 @@ */ #include -#include #include @@ -19,7 +18,7 @@ * Create a new special memory pool. * * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents - * @nid: node id of the node the pool structure should be allocated on + * @nid: node id of the node the pool structure should be allocated on, or -1 */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { @@ -43,7 +42,7 @@ EXPORT_SYMBOL(gen_pool_create); * @addr: starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be - * allocated on + * allocated on, or -1 */ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nid) @@ -53,12 +52,7 @@ int gen_pool_add(struct gen_pool *pool, int nbytes = sizeof(struct gen_pool_chunk) + (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; - if (nbytes > PAGE_SIZE) { - chunk = vmalloc_node(nbytes, nid); - } else { - chunk = kmalloc_node(nbytes, GFP_KERNEL, nid); - } - + chunk = kmalloc_node(nbytes, GFP_KERNEL, nid); if (unlikely(chunk == NULL)) return -1; _