From: KAMEZAWA Hiroyuki This patch allows hot-add memory which is not aligned to section. Now, hot-added memory has to be aligned to section size. Considering big section sized archs, this is not useful. When hot-added memory is registerd as iomem resoruce by iomem resource patch, we can make use of that information to detect valid memory range. Note: With this, not-aligned memory can be registerd. To allow hot-add memory with holes, we have to do more work around add_memory(). (It doesn't allows add memory to already existing mem section.) Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton --- include/linux/ioport.h | 3 +++ kernel/resource.c | 38 ++++++++++++++++++++++++++++++++++++++ mm/memory_hotplug.c | 28 ++++++++++++++++++++++++---- 3 files changed, 65 insertions(+), 4 deletions(-) diff -puN include/linux/ioport.h~catch-valid-mem-range-at-onlining-memory include/linux/ioport.h --- devel/include/linux/ioport.h~catch-valid-mem-range-at-onlining-memory 2006-06-09 15:21:48.000000000 -0700 +++ devel-akpm/include/linux/ioport.h 2006-06-09 15:21:48.000000000 -0700 @@ -105,6 +105,9 @@ extern int allocate_resource(struct reso int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); +/* get registered SYSTEM_RAM resources in specified area */ +extern int find_next_system_ram(struct resource *res); + /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) diff -puN kernel/resource.c~catch-valid-mem-range-at-onlining-memory kernel/resource.c --- devel/kernel/resource.c~catch-valid-mem-range-at-onlining-memory 2006-06-09 15:21:48.000000000 -0700 +++ devel-akpm/kernel/resource.c 2006-06-09 15:21:48.000000000 -0700 @@ -241,6 +241,44 @@ int release_resource(struct resource *ol EXPORT_SYMBOL(release_resource); +#ifdef CONFIG_MEMORY_HOTPLUG +/* + * Finds the lowest memory reosurce exists within [res->start.res->end) + * the caller must specify res->start, res->end, res->flags. + * If found, returns 0, res is overwritten, if not found, returns -1. + */ +int find_next_system_ram(struct resource *res) +{ + resource_size_t start, end; + struct resource *p; + + BUG_ON(!res); + + start = res->start; + end = res->end; + + read_lock(&resource_lock); + for (p = iomem_resource.child; p ; p = p->sibling) { + /* system ram is just marked as IORESOURCE_MEM */ + if (p->flags != res->flags) + continue; + if (p->start > end) { + p = NULL; + break; + } + if (p->start >= start) + break; + } + read_unlock(&resource_lock); + if (!p) + return -1; + /* copy data */ + res->start = p->start; + res->end = p->end; + return 0; +} +#endif + /* * Find empty slot in the resource tree given range and alignment. */ diff -puN mm/memory_hotplug.c~catch-valid-mem-range-at-onlining-memory mm/memory_hotplug.c --- devel/mm/memory_hotplug.c~catch-valid-mem-range-at-onlining-memory 2006-06-09 15:21:48.000000000 -0700 +++ devel-akpm/mm/memory_hotplug.c 2006-06-09 15:21:48.000000000 -0700 @@ -127,6 +127,9 @@ int online_pages(unsigned long pfn, unsi unsigned long i; unsigned long flags; unsigned long onlined_pages = 0; + struct resource res; + u64 section_end; + unsigned long start_pfn; struct zone *zone; int need_zonelists_rebuild = 0; @@ -149,10 +152,27 @@ int online_pages(unsigned long pfn, unsi if (!populated_zone(zone)) need_zonelists_rebuild = 1; - for (i = 0; i < nr_pages; i++) { - struct page *page = pfn_to_page(pfn + i); - online_page(page); - onlined_pages++; + res.start = (u64)pfn << PAGE_SHIFT; + res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1; + res.flags = IORESOURCE_MEM; /* we just need system ram */ + section_end = res.end; + + while (find_next_system_ram(&res) >= 0) { + start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); + nr_pages = (unsigned long) + ((res.end + 1 - res.start) >> PAGE_SHIFT); + + if (PageReserved(pfn_to_page(start_pfn))) { + /* this region's page is not onlined now */ + for (i = 0; i < nr_pages; i++) { + struct page *page = pfn_to_page(start_pfn + i); + online_page(page); + onlined_pages++; + } + } + + res.start = res.end + 1; + res.end = section_end; } zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; _