diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-iso/include/linux/swap.h linux-2.6.7-ck1pre/include/linux/swap.h --- linux-2.6.7-iso/include/linux/swap.h 2004-06-16 17:35:46.000000000 +1000 +++ linux-2.6.7-ck1pre/include/linux/swap.h 2004-06-16 20:45:17.735283986 +1000 @@ -175,6 +175,7 @@ extern void swap_setup(void); extern int try_to_free_pages(struct zone **, unsigned int, unsigned int); extern int shrink_all_memory(int); extern int vm_swappiness; +extern int auto_swappiness; #ifdef CONFIG_MMU /* linux/mm/shmem.c */ diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-iso/include/linux/sysctl.h linux-2.6.7-ck1pre/include/linux/sysctl.h --- linux-2.6.7-iso/include/linux/sysctl.h 2004-06-16 20:41:29.489328189 +1000 +++ linux-2.6.7-ck1pre/include/linux/sysctl.h 2004-06-16 20:45:17.736284240 +1000 @@ -166,6 +166,7 @@ enum VM_LAPTOP_MODE=23, /* vm laptop mode */ VM_BLOCK_DUMP=24, /* block dump mode */ VM_HUGETLB_GROUP=25, /* permitted hugetlb group */ + VM_AUTO_SWAPPINESS=26, /* Make vm_swappiness autoregulated */ }; diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-iso/kernel/sysctl.c linux-2.6.7-ck1pre/kernel/sysctl.c --- linux-2.6.7-iso/kernel/sysctl.c 2004-06-16 20:41:29.515334793 +1000 +++ linux-2.6.7-ck1pre/kernel/sysctl.c 2004-06-16 20:45:17.738284748 +1000 @@ -743,6 +743,14 @@ static ctl_table vm_table[] = { .extra1 = &zero, .extra2 = &one_hundred, }, + { + .ctl_name = VM_AUTO_SWAPPINESS, + .procname = "autoswappiness", + .data = &auto_swappiness, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, #ifdef CONFIG_HUGETLB_PAGE { .ctl_name = VM_HUGETLB_PAGES, diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-iso/mm/vmscan.c linux-2.6.7-ck1pre/mm/vmscan.c --- linux-2.6.7-iso/mm/vmscan.c 2004-06-16 17:35:46.000000000 +1000 +++ linux-2.6.7-ck1pre/mm/vmscan.c 2004-06-16 20:47:00.381347687 +1000 @@ -43,6 +43,7 @@ * From 0 .. 100. Higher means more swappy. */ int vm_swappiness = 60; +int auto_swappiness = 1; static long total_memory; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) @@ -690,6 +691,41 @@ refill_inactive_zone(struct zone *zone, */ mapped_ratio = (sc->nr_mapped * 100) / total_memory; +#ifdef CONFIG_SWAP + if (auto_swappiness) { + int app_percent; + struct sysinfo i; + + si_swapinfo(&i); + + if (likely(i.totalswap >= 100)) { + int swap_centile; + + /* + * app_percent is the percentage of physical ram used + * by application pages. + */ + si_meminfo(&i); + app_percent = 100 - ((i.freeram + get_page_cache_size() - + swapper_space.nrpages) / (i.totalram / 100)); + + /* + * swap_centile is the percentage of the last (sizeof physical + * ram) of swap free. + */ + swap_centile = i.freeswap / + (min(i.totalswap, i.totalram) / 100); + /* + * Autoregulate vm_swappiness to be equal to the lowest of + * app_percent and swap_centile. Bias it downwards -ck + */ + vm_swappiness = min(app_percent, swap_centile); + vm_swappiness = vm_swappiness * vm_swappiness / 100; + } else + vm_swappiness = 0; + } +#endif + /* * Now decide how much we really want to unmap some pages. The mapped * ratio is downgraded - just because there's a lot of mapped memory