===== include/linux/pagemap.h 1.43 vs edited ===== --- 1.43/include/linux/pagemap.h 2004-06-24 01:55:57 -07:00 +++ edited/include/linux/pagemap.h 2004-08-12 13:57:04 -07:00 @@ -49,15 +49,24 @@ #define page_cache_get(page) get_page(page) #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +/* Used for spreading out page cache allocations */ +extern int next_node; static inline struct page *page_cache_alloc(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x), 0); + /* + * We use a mod function here so that we don't have trouble with + * preemption since the value of the nid argument will always be + * less than numnodes. + */ + return alloc_pages_node(next_node++ % numnodes, mapping_gfp_mask(x), + 0); } static inline struct page *page_cache_alloc_cold(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); + return alloc_pages_node(next_node++ % numnodes, + mapping_gfp_mask(x)|__GFP_COLD, 0); } typedef int filler_t(void *, struct page *); ===== mm/mmap.c 1.135 vs edited ===== --- 1.135/mm/mmap.c 2004-07-17 17:00:00 -07:00 +++ edited/mm/mmap.c 2004-08-12 13:57:39 -07:00 @@ -59,6 +59,8 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; atomic_t vm_committed_space = ATOMIC_INIT(0); +int next_node; /* Used for NUMA page cache distribution */ + EXPORT_SYMBOL(sysctl_overcommit_memory); EXPORT_SYMBOL(sysctl_overcommit_ratio); EXPORT_SYMBOL(sysctl_max_map_count);