Index: linux-2.6.18-rc4/mm/page_alloc.c =================================================================== --- linux-2.6.18-rc4.orig/mm/page_alloc.c 2006-08-18 13:00:14.705025492 -0700 +++ linux-2.6.18-rc4/mm/page_alloc.c 2006-08-18 13:00:54.727944479 -0700 @@ -401,6 +401,7 @@ static inline int free_pages_check(struc return PageReserved(page); } +#ifdef CONFIG_PCP /* * Frees a list of pages. * Assumes all pages on list are in same zone, and of same order. @@ -429,6 +430,7 @@ static void free_pages_bulk(struct zone } spin_unlock(&zone->lock); } +#endif static void free_one_page(struct zone *zone, struct page *page, int order) { @@ -593,6 +595,7 @@ static struct page *__rmqueue(struct zon return NULL; } +#ifdef CONFIG_PCP /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. @@ -613,6 +616,7 @@ static int rmqueue_bulk(struct zone *zon spin_unlock(&zone->lock); return i; } +#endif #ifdef CONFIG_NUMA /* @@ -623,6 +627,7 @@ static int rmqueue_bulk(struct zone *zon */ void drain_node_pages(int nodeid) { +#ifdef CONFIG_PCP int i, z; unsigned long flags; @@ -646,12 +651,13 @@ void drain_node_pages(int nodeid) } } } -} #endif +} #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) static void __drain_pages(unsigned int cpu) { +#ifdef CONFIG_PCP unsigned long flags; struct zone *zone; int i; @@ -670,9 +676,12 @@ static void __drain_pages(unsigned int c local_irq_restore(flags); } } +#endif } #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ +#endif + #ifdef CONFIG_PM void mark_free_pages(struct zone *zone) @@ -719,8 +728,10 @@ void drain_local_pages(void) static void fastcall free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); +#ifdef CONFIG_PCP struct per_cpu_pages *pcp; unsigned long flags; +#endif arch_free_page(page, 0); @@ -731,6 +742,7 @@ static void fastcall free_hot_cold_page( kernel_map_pages(page, 1, 0); +#ifdef CONFIG_PCP pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; local_irq_save(flags); __count_vm_event(PGFREE); @@ -742,6 +754,10 @@ static void fastcall free_hot_cold_page( } local_irq_restore(flags); put_cpu(); +#else + count_vm_event(PGFREE); + free_one_page(zone, page, 0); +#endif } void fastcall free_hot_page(struct page *page) @@ -782,10 +798,13 @@ static struct page *buffered_rmqueue(str { unsigned long flags; struct page *page; +#ifdef CONFIG_PCP int cold = !!(gfp_flags & __GFP_COLD); int cpu; +#endif again: +#ifdef CONFIG_PCP cpu = get_cpu(); if (likely(order == 0)) { struct per_cpu_pages *pcp; @@ -801,7 +820,9 @@ again: page = list_entry(pcp->list.next, struct page, lru); list_del(&page->lru); pcp->count--; - } else { + } else +#endif + { spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order); spin_unlock(&zone->lock); @@ -812,7 +833,9 @@ again: __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(zonelist, zone); local_irq_restore(flags); +#ifdef CONFIG_PCP put_cpu(); +#endif BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) @@ -1254,7 +1277,9 @@ void si_meminfo_node(struct sysinfo *val */ void show_free_areas(void) { +#ifdef CONFIG_PCP int cpu, temperature; +#endif unsigned long active; unsigned long inactive; unsigned long free; @@ -1269,7 +1294,7 @@ void show_free_areas(void) continue; } else printk("\n"); - +#ifdef CONFIG_PCP for_each_online_cpu(cpu) { struct per_cpu_pageset *pageset; @@ -1283,6 +1308,7 @@ void show_free_areas(void) pageset->pcp[temperature].batch, pageset->pcp[temperature].count); } +#endif } get_zone_counts(&active, &inactive, &free); @@ -1755,6 +1781,7 @@ static int __cpuinit zone_batchsize(stru inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) { +#ifdef CONFIG_PCP struct per_cpu_pages *pcp; memset(p, 0, sizeof(*p)); @@ -1770,8 +1797,10 @@ inline void setup_pageset(struct per_cpu pcp->high = 2 * batch; pcp->batch = max(1UL, batch/2); INIT_LIST_HEAD(&pcp->list); +#endif } +#ifdef CONFIG_PCP /* * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist * to the value high for the pageset p. @@ -1788,7 +1817,7 @@ static void setup_pagelist_highmark(stru if ((high/4) > (PAGE_SHIFT * 8)) pcp->batch = PAGE_SHIFT * 8; } - +#endif #ifdef CONFIG_NUMA /* @@ -1808,6 +1837,7 @@ static void setup_pagelist_highmark(stru * not check if the processor is online before following the pageset pointer. * Other parts of the kernel may not check if the zone is available. */ +#ifdef CONFIG_PCP static struct per_cpu_pageset boot_pageset[NR_CPUS]; /* @@ -1831,7 +1861,6 @@ static int __cpuinit process_zones(int c setup_pagelist_highmark(zone_pcp(zone, cpu), (zone->present_pages / percpu_pagelist_fraction)); } - return 0; bad: for_each_zone(dzone) { @@ -1842,9 +1871,11 @@ bad: } return -ENOMEM; } +#endif static inline void free_zone_pagesets(int cpu) { +#ifdef CONFIG_PCP struct zone *zone; for_each_zone(zone) { @@ -1853,8 +1884,10 @@ static inline void free_zone_pagesets(in zone_pcp(zone, cpu) = NULL; kfree(pset); } +#endif } +#ifdef CONFIG_PCP static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -1892,7 +1925,7 @@ void __init setup_per_cpu_pageset(void) BUG_ON(err); register_cpu_notifier(&pageset_notifier); } - +#endif #endif static __meminit @@ -1940,9 +1973,12 @@ int zone_wait_table_init(struct zone *zo static __meminit void zone_pcp_init(struct zone *zone) { +#ifdef CONFIG_PCP int cpu; +#endif unsigned long batch = zone_batchsize(zone); +#ifdef CONFIG_PCP for (cpu = 0; cpu < NR_CPUS; cpu++) { #ifdef CONFIG_NUMA /* Early boot. Slab allocator not functional yet */ @@ -1952,6 +1988,7 @@ static __meminit void zone_pcp_init(stru setup_pageset(zone_pcp(zone,cpu), batch); #endif } +#endif if (zone->present_pages) printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", zone->name, zone->present_pages, batch); @@ -2342,6 +2379,7 @@ int lowmem_reserve_ratio_sysctl_handler( return 0; } +#ifdef CONFIG_PCP /* * percpu_pagelist_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist @@ -2367,6 +2405,7 @@ int percpu_pagelist_fraction_sysctl_hand } return 0; } +#endif __initdata int hashdist = HASHDIST_DEFAULT; Index: linux-2.6.18-rc4/include/linux/mmzone.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/mmzone.h 2006-08-18 11:57:34.083769865 -0700 +++ linux-2.6.18-rc4/include/linux/mmzone.h 2006-08-18 13:00:30.303671564 -0700 @@ -67,15 +67,19 @@ enum zone_stat_item { #endif NR_VM_ZONE_STAT_ITEMS }; +#ifdef CONFIG_PCP struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ struct list_head list; /* the list of pages */ }; +#endif struct per_cpu_pageset { +#ifdef CONFIG_PCP struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ +#endif #ifdef CONFIG_SMP s8 stat_threshold; /* maximum diff before update */ s8 vm_stat_diff[ NR_VM_ZONE_STAT_ITEMS]; Index: linux-2.6.18-rc4/mm/vmstat.c =================================================================== --- linux-2.6.18-rc4.orig/mm/vmstat.c 2006-08-18 12:05:32.365726846 -0700 +++ linux-2.6.18-rc4/mm/vmstat.c 2006-08-18 13:18:10.521390846 -0700 @@ -566,9 +566,11 @@ static int zoneinfo_show(struct seq_file "\n pagesets"); for_each_online_cpu(i) { struct per_cpu_pageset *pageset; +#ifdef CONFIG_PCP int j; - +#endif pageset = zone_pcp(zone, i); +#ifdef CONFIG_PCP for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { if (pageset->pcp[j].count) break; @@ -586,6 +588,7 @@ static int zoneinfo_show(struct seq_file pageset->pcp[j].high, pageset->pcp[j].batch); } +#endif seq_printf(m, "\n vm stats threshold: %d", pageset->stat_threshold); } Index: linux-2.6.18-rc4/init/main.c =================================================================== --- linux-2.6.18-rc4.orig/init/main.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/init/main.c 2006-08-18 13:47:13.969336349 -0700 @@ -548,7 +548,9 @@ asmlinkage void __init start_kernel(void cpuset_init_early(); mem_init(); kmem_cache_init(); +#ifdef CONFIG_PCP setup_per_cpu_pageset(); +#endif numa_policy_init(); if (late_time_init) late_time_init(); Index: linux-2.6.18-rc4/kernel/sysctl.c =================================================================== --- linux-2.6.18-rc4.orig/kernel/sysctl.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/kernel/sysctl.c 2006-08-18 13:46:21.738188423 -0700 @@ -71,7 +71,9 @@ extern int printk_ratelimit_jiffies; extern int printk_ratelimit_burst; extern int pid_max_min, pid_max_max; extern int sysctl_drop_caches; +#ifdef CONFIG_PCP extern int percpu_pagelist_fraction; +#endif extern int compat_log; #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) @@ -83,7 +85,9 @@ extern int proc_unknown_nmi_panic(ctl_ta /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; +#ifdef CONFIG_PCP static int min_percpu_pagelist_fract = 8; +#endif static int ngroups_max = NGROUPS_MAX; @@ -849,6 +853,7 @@ static ctl_table vm_table[] = { .strategy = &sysctl_intvec, .extra1 = &zero, }, +#ifdef CONFIG_PCP { .ctl_name = VM_PERCPU_PAGELIST_FRACTION, .procname = "percpu_pagelist_fraction", @@ -859,6 +864,7 @@ static ctl_table vm_table[] = { .strategy = &sysctl_intvec, .extra1 = &min_percpu_pagelist_fract, }, +#endif #ifdef CONFIG_MMU { .ctl_name = VM_MAX_MAP_COUNT,