Singe zone per pdat optimization If we only have a single zone per node / pgdat then we do not need the zone_pgdat link but can directly calculate the pgdat address from zone since pgdat embedds the zone struct. This avoids one indirection on critical VM paths. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc6-mm2/include/linux/mmzone.h =================================================================== --- linux-2.6.18-rc6-mm2.orig/include/linux/mmzone.h 2006-09-18 21:27:55.000000000 -0500 +++ linux-2.6.18-rc6-mm2/include/linux/mmzone.h 2006-09-18 21:27:59.343896203 -0500 @@ -282,7 +282,12 @@ struct zone { /* * Discontig memory support fields. */ +#ifdef MULTI_ZONE struct pglist_data *zone_pgdat; +#define get_pgdat(__zone) ((__zone)->zone_pgdat) +#else +#define get_pgdat(__zone) container_of((__zone), struct pglist_data, node_zones[0]) +#endif /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; Index: linux-2.6.18-rc6-mm2/mm/page_alloc.c =================================================================== --- linux-2.6.18-rc6-mm2.orig/mm/page_alloc.c 2006-09-18 21:27:54.000000000 -0500 +++ linux-2.6.18-rc6-mm2/mm/page_alloc.c 2006-09-18 21:27:59.365382353 -0500 @@ -960,7 +960,7 @@ get_page_from_freelist(gfp_t gfp_mask, u do { zone = *z; if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && - zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) + pgdat(zone) != pgdat(zonelist->zones[0]))) break; if ((alloc_flags & ALLOC_CPUSET) && !cpuset_zone_allowed(zone, gfp_mask)) @@ -1996,7 +1996,7 @@ static __meminit int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) { int i; - struct pglist_data *pgdat = zone->zone_pgdat; + struct pglist_data *pgdat = get_pgdat(zone); size_t alloc_size; /* @@ -2058,7 +2058,7 @@ __meminit int init_currently_empty_zone( unsigned long zone_start_pfn, unsigned long size) { - struct pglist_data *pgdat = zone->zone_pgdat; + struct pglist_data *pgdat = get_pgdat(zone); int ret; ret = zone_wait_table_init(zone, size); if (ret) @@ -2500,7 +2500,9 @@ static void __meminit free_area_init_cor spin_lock_init(&zone->lock); spin_lock_init(&zone->lru_lock); zone_seqlock_init(zone); +#ifdef MULTI_ZONE zone->zone_pgdat = pgdat; +#endif zone->free_pages = 0; zone->temp_priority = zone->prev_priority = DEF_PRIORITY; Index: linux-2.6.18-rc6-mm2/mm/vmscan.c =================================================================== --- linux-2.6.18-rc6-mm2.orig/mm/vmscan.c 2006-09-18 21:24:39.000000000 -0500 +++ linux-2.6.18-rc6-mm2/mm/vmscan.c 2006-09-18 21:27:59.380032001 -0500 @@ -1299,7 +1299,7 @@ void wakeup_kswapd(struct zone *zone, in if (!populated_zone(zone)) return; - pgdat = zone->zone_pgdat; + pgdat = get_pgdat(zone); if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) return; if (pgdat->kswapd_max_order < order) Index: linux-2.6.18-rc6-mm2/mm/mmzone.c =================================================================== --- linux-2.6.18-rc6-mm2.orig/mm/mmzone.c 2006-09-18 21:24:40.000000000 -0500 +++ linux-2.6.18-rc6-mm2/mm/mmzone.c 2006-09-18 21:27:59.406401367 -0500 @@ -32,11 +32,14 @@ EXPORT_UNUSED_SYMBOL(next_online_pgdat); */ struct zone *next_zone(struct zone *zone) { - pg_data_t *pgdat = zone->zone_pgdat; + pg_data_t *pgdat = get_pgdat(zone); +#ifdef MULTI_ZONE if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) zone++; - else { + else +#endif + { pgdat = next_online_pgdat(pgdat); if (pgdat) zone = pgdat->node_zones; Index: linux-2.6.18-rc6-mm2/mm/sparse.c =================================================================== --- linux-2.6.18-rc6-mm2.orig/mm/sparse.c 2006-09-18 21:24:40.000000000 -0500 +++ linux-2.6.18-rc6-mm2/mm/sparse.c 2006-09-18 21:27:59.415191155 -0500 @@ -297,7 +297,7 @@ int sparse_add_one_section(struct zone * int nr_pages) { unsigned long section_nr = pfn_to_section_nr(start_pfn); - struct pglist_data *pgdat = zone->zone_pgdat; + struct pglist_data *pgdat = get_pgdat(zone); struct mem_section *ms; struct page *memmap; unsigned long flags;