From: Yasunori Goto In current code, zonelist is considered to be build once, no modification. But MemoryHotplug can add new zone/pgdat. It must be updated. This patch modifies build_all_zonelists(). By this, build_all_zonelist() can reconfig pgdat's zonelists. To update them safety, this patch use stop_machine_run(). Other cpus don't touch among updating them by using it. In old version (V2 of node hotadd), kernel updated them after zone initialization. But present_page of its new zone is still 0, because online_page() is not called yet at this time. Build_zonelists() checks present_pages to find present zone. It was too early. So, I changed it after online_pages(). Signed-off-by: Yasunori Goto Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton --- mm/memory_hotplug.c | 12 ++++++++++++ mm/page_alloc.c | 26 +++++++++++++++++++++----- 2 files changed, 33 insertions(+), 5 deletions(-) diff -puN mm/memory_hotplug.c~wait_table-and-zonelist-initializing-for-memory-hotadd-update-zonelists mm/memory_hotplug.c --- 25/mm/memory_hotplug.c~wait_table-and-zonelist-initializing-for-memory-hotadd-update-zonelists Tue Apr 11 14:32:55 2006 +++ 25-akpm/mm/memory_hotplug.c Tue Apr 11 14:32:55 2006 @@ -123,6 +123,7 @@ int online_pages(unsigned long pfn, unsi unsigned long flags; unsigned long onlined_pages = 0; struct zone *zone; + int need_zonelists_rebuild = 0; /* * This doesn't need a lock to do pfn_to_page(). @@ -135,6 +136,14 @@ int online_pages(unsigned long pfn, unsi grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); + /* + * If this zone is not populated, then it is not in zonelist. + * This means the page allocator ignores this zone. + * So, zonelist must be updated after online. + */ + if (!populated_zone(zone)) + need_zonelists_rebuild = 1; + for (i = 0; i < nr_pages; i++) { struct page *page = pfn_to_page(pfn + i); online_page(page); @@ -145,5 +154,8 @@ int online_pages(unsigned long pfn, unsi setup_per_zone_pages_min(); + if (need_zonelists_rebuild) + build_all_zonelists(); + return 0; } diff -puN mm/page_alloc.c~wait_table-and-zonelist-initializing-for-memory-hotadd-update-zonelists mm/page_alloc.c --- 25/mm/page_alloc.c~wait_table-and-zonelist-initializing-for-memory-hotadd-update-zonelists Tue Apr 11 14:32:55 2006 +++ 25-akpm/mm/page_alloc.c Tue Apr 11 14:32:55 2006 @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "internal.h" @@ -1694,14 +1695,29 @@ static void __meminit build_zonelists(pg #endif /* CONFIG_NUMA */ -void __init build_all_zonelists(void) +/* return values int ....just for stop_machine_run() */ +static int __meminit __build_all_zonelists(void *dummy) { - int i; + int nid; + for_each_online_node(nid) + build_zonelists(NODE_DATA(nid)); + return 0; +} + +void __meminit build_all_zonelists(void) +{ + if (system_state == SYSTEM_BOOTING) { + __build_all_zonelists(0); + cpuset_init_current_mems_allowed(); + } else { + /* we have to stop all cpus to guaranntee there is no user + of zonelist */ + stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); + /* cpuset refresh routine should be here */ + } - for_each_online_node(i) - build_zonelists(NODE_DATA(i)); printk("Built %i zonelists\n", num_online_nodes()); - cpuset_init_current_mems_allowed(); + } /* _