Singe zone per pdat optimization If we only have a single zone per node / pgdat then we do not need the zone_pgdat link but can directly calculate the pgdat address from zone since pgdat embedds the zone struct. This avoids one indirection on critical VM paths. Signed-off-by: Christoph Lameter Index: linux-2.6.19-rc1-mm1/include/linux/mmzone.h =================================================================== --- linux-2.6.19-rc1-mm1.orig/include/linux/mmzone.h 2006-10-13 15:35:21.525282128 -0500 +++ linux-2.6.19-rc1-mm1/include/linux/mmzone.h 2006-10-13 15:35:36.936783101 -0500 @@ -292,7 +292,12 @@ struct zone { /* * Discontig memory support fields. */ +#ifdef MULTI_ZONE struct pglist_data *zone_pgdat; +#define get_pgdat(__zone) ((__zone)->zone_pgdat) +#else +#define get_pgdat(__zone) container_of((__zone), struct pglist_data, node_zones[0]) +#endif /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; Index: linux-2.6.19-rc1-mm1/mm/page_alloc.c =================================================================== --- linux-2.6.19-rc1-mm1.orig/mm/page_alloc.c 2006-10-13 15:35:21.431523947 -0500 +++ linux-2.6.19-rc1-mm1/mm/page_alloc.c 2006-10-13 15:35:36.957292703 -0500 @@ -963,7 +963,7 @@ get_page_from_freelist(gfp_t gfp_mask, u do { zone = *z; if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && - zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) + get_pgdat(zone) != get_pgdat(zonelist->zones[0]))) break; if ((alloc_flags & ALLOC_CPUSET) && !cpuset_zone_allowed(zone, gfp_mask)) @@ -1992,7 +1992,7 @@ static __meminit int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) { int i; - struct pglist_data *pgdat = zone->zone_pgdat; + struct pglist_data *pgdat = get_pgdat(zone); size_t alloc_size; /* @@ -2054,7 +2054,7 @@ __meminit int init_currently_empty_zone( unsigned long zone_start_pfn, unsigned long size) { - struct pglist_data *pgdat = zone->zone_pgdat; + struct pglist_data *pgdat = get_pgdat(zone); int ret; ret = zone_wait_table_init(zone, size); if (ret) @@ -2479,7 +2479,9 @@ static void __meminit free_area_init_cor spin_lock_init(&zone->lock); spin_lock_init(&zone->lru_lock); zone_seqlock_init(zone); +#ifdef MULTI_ZONE zone->zone_pgdat = pgdat; +#endif zone->free_pages = 0; zone->temp_priority = zone->prev_priority = DEF_PRIORITY; Index: linux-2.6.19-rc1-mm1/mm/vmscan.c =================================================================== --- linux-2.6.19-rc1-mm1.orig/mm/vmscan.c 2006-10-10 21:47:13.073597526 -0500 +++ linux-2.6.19-rc1-mm1/mm/vmscan.c 2006-10-13 15:35:36.989522077 -0500 @@ -1318,7 +1318,7 @@ void wakeup_kswapd(struct zone *zone, in if (!populated_zone(zone)) return; - pgdat = zone->zone_pgdat; + pgdat = get_pgdat(zone); if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) return; if (pgdat->kswapd_max_order < order) Index: linux-2.6.19-rc1-mm1/mm/mmzone.c =================================================================== --- linux-2.6.19-rc1-mm1.orig/mm/mmzone.c 2006-10-04 21:57:05.000000000 -0500 +++ linux-2.6.19-rc1-mm1/mm/mmzone.c 2006-10-13 15:35:37.008078384 -0500 @@ -32,11 +32,14 @@ EXPORT_UNUSED_SYMBOL(next_online_pgdat); */ struct zone *next_zone(struct zone *zone) { - pg_data_t *pgdat = zone->zone_pgdat; + pg_data_t *pgdat = get_pgdat(zone); +#ifdef MULTI_ZONE if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) zone++; - else { + else +#endif + { pgdat = next_online_pgdat(pgdat); if (pgdat) zone = pgdat->node_zones; Index: linux-2.6.19-rc1-mm1/mm/sparse.c =================================================================== --- linux-2.6.19-rc1-mm1.orig/mm/sparse.c 2006-10-10 21:47:12.962259791 -0500 +++ linux-2.6.19-rc1-mm1/mm/sparse.c 2006-10-13 15:35:37.026634691 -0500 @@ -297,7 +297,7 @@ int sparse_add_one_section(struct zone * int nr_pages) { unsigned long section_nr = pfn_to_section_nr(start_pfn); - struct pglist_data *pgdat = zone->zone_pgdat; + struct pglist_data *pgdat = get_pgdat(zone); struct mem_section *ms; struct page *memmap; unsigned long flags;