This splits up sparse_index_alloc() into two pieces. This is needed because we'll allocate the memory for the second level in a different place from where we actually consume it to keep the allocation from happening underneath a lock Signed-off-by: Dave Hansen --- memhotplug-dave/include/linux/mmzone.h | 1 memhotplug-dave/mm/sparse.c | 57 +++++++++++++++++++++++---------- 2 files changed, 42 insertions(+), 16 deletions(-) diff -puN mm/sparse.c~A6-extreme-hotplug-prepare mm/sparse.c --- memhotplug/mm/sparse.c~A6-extreme-hotplug-prepare 2005-07-28 13:50:16.000000000 -0700 +++ memhotplug-dave/mm/sparse.c 2005-07-28 13:50:16.000000000 -0700 @@ -22,27 +22,52 @@ struct mem_section mem_section[NR_SECTIO #endif EXPORT_SYMBOL(mem_section); -static void sparse_alloc_root(unsigned long root) -{ #ifdef CONFIG_ARCH_SPARSEMEM_EXTREME - mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE); -#endif +static struct mem_section *sparse_index_alloc(int nid) +{ + struct mem_section *section = NULL; + unsigned long array_size = SECTIONS_PER_ROOT * + sizeof(struct mem_section); + + section = alloc_bootmem_node(NODE_DATA(nid), array_size); + + if (section) + memset(section, 0, array_size); + + return section; } -static void sparse_index_init(unsigned long section, int nid) +static int sparse_index_init(unsigned long section_nr, int nid) { - unsigned long root = SECTION_NR_TO_ROOT(section); + static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED; + unsigned long root = SECTION_NR_TO_ROOT(section_nr); + struct mem_section *section; + int ret = 0; - if (mem_section[root]) - return; + section = sparse_index_alloc(nid); + /* + * This lock keeps two different sections from + * reallocating for the same index + */ + spin_lock(&index_init_lock); - sparse_alloc_root(root); + if (mem_section[root]) { + ret = -EEXIST; + goto out; + } - if (mem_section[root]) - memset(mem_section[root], 0, PAGE_SIZE); - else - panic("memory_present: NO MEMORY\n"); + mem_section[root] = section; +out: + spin_unlock(&index_init_lock); + return ret; +} +#else /* !SPARSEMEM_EXTREME */ +static inline int sparse_index_init(unsigned long section_nr, int nid) +{ + return 0; } +#endif + /* Record a memory area against a node. */ void memory_present(int nid, unsigned long start, unsigned long end) { @@ -50,12 +75,12 @@ void memory_present(int nid, unsigned lo start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { - unsigned long section = pfn_to_section_nr(pfn); + unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *ms; - sparse_index_init(section, nid); + sparse_index_init(section_nr, nid); - ms = __nr_to_section(section); + ms = __nr_to_section(section_nr); if (!ms->section_mem_map) ms->section_mem_map = SECTION_MARKED_PRESENT; } diff -puN include/linux/mmzone.h~A6-extreme-hotplug-prepare include/linux/mmzone.h --- memhotplug/include/linux/mmzone.h~A6-extreme-hotplug-prepare 2005-07-28 13:50:16.000000000 -0700 +++ memhotplug-dave/include/linux/mmzone.h 2005-07-28 13:50:16.000000000 -0700 @@ -588,6 +588,7 @@ static inline int pfn_valid(unsigned lon void sparse_init(void); #else #define sparse_init() do {} while (0) +#define sparse_index_init(_sec, _nid) do {} while (0) #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_NODES_SPAN_OTHER_NODES _