From: Paul Jackson Change the page cache allocation calls to support cpuset memory spreading. See the previous patch, cpuset_mem_spread, for an explanation of cpuset memory spreading. On systems without cpusets configured in the kernel, this is no change. On systems with cpusets configured in the kernel, but the "memory_spread" cpuset option not enabled for the current tasks cpuset, this adds a call to a cpuset routine and failed bit test of the processor state flag PF_SPREAD_PAGE. On tasks in cpusets with "memory_spread" enabled, this adds a call to a cpuset routine that computes which of the tasks mems_allowed nodes should be preferred for this allocation. If memory spreading applies to a particular allocation, then any other NUMA mempolicy does not apply. Signed-off-by: Paul Jackson Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 5 +++++ mm/filemap.c | 23 +++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff -puN include/linux/pagemap.h~cpuset-memory-spread-page-cache-implementation-and-hooks include/linux/pagemap.h --- 25/include/linux/pagemap.h~cpuset-memory-spread-page-cache-implementation-and-hooks Wed Feb 22 16:47:24 2006 +++ 25-akpm/include/linux/pagemap.h Wed Feb 22 16:47:24 2006 @@ -51,6 +51,10 @@ static inline void mapping_set_gfp_mask( #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +#ifdef CONFIG_NUMA +extern struct page *page_cache_alloc(struct address_space *x); +extern struct page *page_cache_alloc_cold(struct address_space *x); +#else static inline struct page *page_cache_alloc(struct address_space *x) { return alloc_pages(mapping_gfp_mask(x), 0); @@ -60,6 +64,7 @@ static inline struct page *page_cache_al { return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); } +#endif typedef int filler_t(void *, struct page *); diff -puN mm/filemap.c~cpuset-memory-spread-page-cache-implementation-and-hooks mm/filemap.c --- 25/mm/filemap.c~cpuset-memory-spread-page-cache-implementation-and-hooks Wed Feb 22 16:47:24 2006 +++ 25-akpm/mm/filemap.c Wed Feb 22 16:47:24 2006 @@ -30,6 +30,7 @@ #include #include #include +#include #include "filemap.h" #include "internal.h" @@ -427,6 +428,28 @@ int add_to_page_cache_lru(struct page *p return ret; } +#ifdef CONFIG_NUMA +struct page *page_cache_alloc(struct address_space *x) +{ + if (cpuset_do_page_mem_spread()) { + int n = cpuset_mem_spread_node(); + return alloc_pages_node(n, mapping_gfp_mask(x), 0); + } + return alloc_pages(mapping_gfp_mask(x), 0); +} +EXPORT_SYMBOL(page_cache_alloc); + +struct page *page_cache_alloc_cold(struct address_space *x) +{ + if (cpuset_do_page_mem_spread()) { + int n = cpuset_mem_spread_node(); + return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0); + } + return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); +} +EXPORT_SYMBOL(page_cache_alloc_cold); +#endif + /* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of _