--- fs/ext3/dir.c | 2 +- fs/splice.c | 10 ++++++---- include/linux/pagemap.h | 16 ++++++++++------ mm/filemap.c | 46 ++++++++++++++++++++++++++++------------------ mm/madvise.c | 3 ++- mm/readahead.c | 29 ++++++++++++++++++----------- 6 files changed, 65 insertions(+), 41 deletions(-) Index: slub/fs/ext3/dir.c =================================================================== --- slub.orig/fs/ext3/dir.c 2007-06-04 15:26:15.000000000 -0700 +++ slub/fs/ext3/dir.c 2007-06-04 15:27:04.000000000 -0700 @@ -142,7 +142,7 @@ static int ext3_readdir(struct file * fi page_cache_readahead_ondemand( sb->s_bdev->bd_inode->i_mapping, &filp->f_ra, filp, - NULL, index, 1); + NULL, index, 1, NULL, 0); filp->f_ra.prev_index = index; bh = ext3_bread(NULL, inode, blk, 0, &err); } Index: slub/fs/splice.c =================================================================== --- slub.orig/fs/splice.c 2007-06-04 15:26:15.000000000 -0700 +++ slub/fs/splice.c 2007-06-04 15:27:04.000000000 -0700 @@ -313,12 +313,13 @@ __generic_file_splice_read(struct file * page = find_get_page(mapping, index); if (!page) { page_cache_readahead_ondemand(mapping, &in->f_ra, in, - NULL, index, nr_pages - spd.nr_pages); + NULL, index, nr_pages - spd.nr_pages, + NULL, 0); /* * page didn't exist, allocate one. */ - page = page_cache_alloc_cold(mapping); + page = page_cache_alloc_cold(mapping, NULL, 0); if (!page) break; @@ -362,7 +363,8 @@ __generic_file_splice_read(struct file * if (PageReadahead(page)) page_cache_readahead_ondemand(mapping, &in->f_ra, in, - page, index, nr_pages - page_nr); + page, index, nr_pages - page_nr, + NULL, 0); /* * If the page isn't uptodate, we may need to start io on it @@ -576,7 +578,7 @@ find_page: page = find_lock_page(mapping, index); if (!page) { ret = -ENOMEM; - page = page_cache_alloc_cold(mapping); + page = page_cache_alloc_cold(mapping, NULL, 0); if (unlikely(!page)) goto out_ret; Index: slub/include/linux/pagemap.h =================================================================== --- slub.orig/include/linux/pagemap.h 2007-06-04 15:26:41.000000000 -0700 +++ slub/include/linux/pagemap.h 2007-06-04 15:26:55.000000000 -0700 @@ -63,22 +63,26 @@ static inline void mapping_set_gfp_mask( void release_pages(struct page **pages, int nr, int cold); #ifdef CONFIG_NUMA -extern struct page *__page_cache_alloc(gfp_t gfp); +extern struct page *__page_cache_alloc(gfp_t gfp, + struct vm_area_struct *, unsigned long); #else -static inline struct page *__page_cache_alloc(gfp_t gfp) +static inline struct page *__page_cache_alloc(gfp_t gfp, + struct vm_area_struct *vma, unsigned long addr) { return alloc_pages(gfp, 0); } #endif -static inline struct page *page_cache_alloc(struct address_space *x) +static inline struct page *page_cache_alloc(struct address_space *x, + struct vm_area_struct *vma, unsigned long addr) { - return __page_cache_alloc(mapping_gfp_mask(x)); + return __page_cache_alloc(mapping_gfp_mask(x), vma, addr); } -static inline struct page *page_cache_alloc_cold(struct address_space *x) +static inline struct page *page_cache_alloc_cold(struct address_space *x, + struct vm_area_struct *vma, unsigned long addr) { - return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); + return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD, vma, addr); } typedef int filler_t(void *, struct page *); Index: slub/mm/filemap.c =================================================================== --- slub.orig/mm/filemap.c 2007-06-04 15:26:41.000000000 -0700 +++ slub/mm/filemap.c 2007-06-04 15:26:58.000000000 -0700 @@ -469,12 +469,16 @@ int add_to_page_cache_lru(struct page *p } #ifdef CONFIG_NUMA -struct page *__page_cache_alloc(gfp_t gfp) +struct page *__page_cache_alloc(gfp_t gfp, + struct vm_area_struct *vma, unsigned long addr) { if (cpuset_do_page_mem_spread()) { int n = cpuset_mem_spread_node(); return alloc_pages_node(n, gfp, 0); } + if (vma) + return alloc_page_vma(gfp, vma, addr); + return alloc_pages(gfp, 0); } EXPORT_SYMBOL(__page_cache_alloc); @@ -673,7 +677,7 @@ repeat: if (!page) { if (!cached_page) { cached_page = - __page_cache_alloc(gfp_mask); + __page_cache_alloc(gfp_mask, NULL, 0); if (!cached_page) return NULL; } @@ -809,7 +813,8 @@ grab_cache_page_nowait(struct address_sp page_cache_release(page); return NULL; } - page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); + page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS, + NULL, 0); if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) { page_cache_release(page); page = NULL; @@ -922,7 +927,8 @@ find_page: if (!page) { page_cache_readahead_ondemand(mapping, &ra, filp, page, - index, last_index - index); + index, last_index - index, + NULL, 0); page = find_get_page(mapping, index); if (unlikely(page == NULL)) goto no_cached_page; @@ -930,7 +936,8 @@ find_page: if (PageReadahead(page)) { page_cache_readahead_ondemand(mapping, &ra, filp, page, - index, last_index - index); + index, last_index - index, + NULL, 0); } if (!PageUptodate(page)) goto page_not_up_to_date; @@ -1060,7 +1067,7 @@ no_cached_page: * page.. */ if (!cached_page) { - cached_page = page_cache_alloc_cold(mapping); + cached_page = page_cache_alloc_cold(mapping, NULL, 0); if (!cached_page) { desc->error = -ENOMEM; goto out; @@ -1283,13 +1290,15 @@ EXPORT_SYMBOL(generic_file_sendfile); static ssize_t do_readahead(struct address_space *mapping, struct file *filp, - unsigned long index, unsigned long nr) + unsigned long index, unsigned long nr, + struct vm_area_struct *vma, unsigned long addr) { if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) return -EINVAL; force_page_cache_readahead(mapping, filp, index, - max_sane_readahead(nr)); + max_sane_readahead(nr), + vma, addr); return 0; } @@ -1306,7 +1315,7 @@ asmlinkage ssize_t sys_readahead(int fd, unsigned long start = offset >> PAGE_CACHE_SHIFT; unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT; unsigned long len = end - start + 1; - ret = do_readahead(mapping, file, start, len); + ret = do_readahead(mapping, file, start, len, NULL, 0); } fput(file); } @@ -1314,7 +1323,6 @@ asmlinkage ssize_t sys_readahead(int fd, } #ifdef CONFIG_MMU -static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); /** * page_cache_read - adds requested page to the page cache if not already there * @file: file to read @@ -1323,14 +1331,15 @@ static int FASTCALL(page_cache_read(stru * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ -static int fastcall page_cache_read(struct file * file, unsigned long offset) +static int fastcall page_cache_read(struct file * file, unsigned long offset, + struct vm_area_struct *vma, unsigned long addr) { struct address_space *mapping = file->f_mapping; struct page *page; int ret; do { - page = page_cache_alloc_cold(mapping); + page = page_cache_alloc_cold(mapping, vma, addr); if (!page) return -ENOMEM; @@ -1395,14 +1404,14 @@ retry_find: if (VM_SequentialReadHint(vma)) { if (!page) { page_cache_readahead_ondemand(mapping, ra, file, page, - fdata->pgoff, 1); + fdata->pgoff, 1, vma, fdata->address); page = find_lock_page(mapping, fdata->pgoff); if (!page) goto no_cached_page; } if (PageReadahead(page)) { page_cache_readahead_ondemand(mapping, ra, file, page, - fdata->pgoff, 1); + fdata->pgoff, 1, vma, fdata->address); } } @@ -1433,7 +1442,8 @@ retry_find: if (fdata->pgoff > ra_pages / 2) start = fdata->pgoff - ra_pages / 2; - do_page_cache_readahead(mapping, file, start, ra_pages); + do_page_cache_readahead(mapping, file, start, ra_pages, + vma, fdata->address); } page = find_lock_page(mapping, fdata->pgoff); if (!page) @@ -1479,7 +1489,7 @@ no_cached_page: * We're only likely to ever get here if MADV_RANDOM is in * effect. */ - error = page_cache_read(file, fdata->pgoff); + error = page_cache_read(file, fdata->pgoff, vma, fdata->address); /* * The page we want has now been added to the page cache. @@ -1579,7 +1589,7 @@ repeat: page = find_get_page(mapping, index); if (!page) { if (!cached_page) { - cached_page = page_cache_alloc_cold(mapping); + cached_page = page_cache_alloc_cold(mapping, NULL, 0); if (!cached_page) return ERR_PTR(-ENOMEM); } @@ -1693,7 +1703,7 @@ repeat: page = find_lock_page(mapping, index); if (!page) { if (!*cached_page) { - *cached_page = page_cache_alloc(mapping); + *cached_page = page_cache_alloc(mapping, NULL, 0); if (!*cached_page) return NULL; } Index: slub/mm/madvise.c =================================================================== --- slub.orig/mm/madvise.c 2007-06-04 15:26:15.000000000 -0700 +++ slub/mm/madvise.c 2007-06-04 15:27:04.000000000 -0700 @@ -124,7 +124,8 @@ static long madvise_willneed(struct vm_a end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; force_page_cache_readahead(file->f_mapping, - file, start, max_sane_readahead(end - start)); + file, start, max_sane_readahead(end - start), + vma, start); return 0; } Index: slub/mm/readahead.c =================================================================== --- slub.orig/mm/readahead.c 2007-06-04 15:26:15.000000000 -0700 +++ slub/mm/readahead.c 2007-06-04 15:27:04.000000000 -0700 @@ -138,7 +138,8 @@ out: static int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, - unsigned long lookahead_size) + unsigned long lookahead_size, + struct vm_area_struct *vma, unsigned long addr) { struct inode *inode = mapping->host; struct page *page; @@ -168,7 +169,7 @@ __do_page_cache_readahead(struct address continue; read_unlock_irq(&mapping->tree_lock); - page = page_cache_alloc_cold(mapping); + page = page_cache_alloc_cold(mapping, vma, addr); read_lock_irq(&mapping->tree_lock); if (!page) break; @@ -197,7 +198,8 @@ out: * memory at once. */ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read) + pgoff_t offset, unsigned long nr_to_read, + struct vm_area_struct *vma, unsigned long addr) { int ret = 0; @@ -212,7 +214,7 @@ int force_page_cache_readahead(struct ad if (this_chunk > nr_to_read) this_chunk = nr_to_read; err = __do_page_cache_readahead(mapping, filp, - offset, this_chunk, 0); + offset, this_chunk, 0, vma, addr); if (err < 0) { ret = err; break; @@ -232,12 +234,14 @@ int force_page_cache_readahead(struct ad * request queues. */ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read) + pgoff_t offset, unsigned long nr_to_read, + struct vm_area_struct *vma, unsigned long addr) { if (bdi_read_congested(mapping->backing_dev_info)) return -1; - return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); + return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0, + vma, addr); } /* @@ -263,7 +267,7 @@ unsigned long ra_submit(struct file_ra_s ra_size = ra_readahead_size(ra); la_size = ra_lookahead_size(ra); actual = __do_page_cache_readahead(mapping, filp, - ra->ra_index, ra_size, la_size); + ra->ra_index, ra_size, la_size, NULL, 0); return actual; } @@ -360,7 +364,8 @@ static unsigned long ondemand_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, struct page *page, pgoff_t offset, - unsigned long req_size) + unsigned long req_size, + struct vm_area_struct *vma, unsigned long addr) { unsigned long max; /* max readahead pages */ pgoff_t ra_index; /* readahead index */ @@ -389,7 +394,7 @@ ondemand_readahead(struct address_space */ if (!page && !sequential) { return __do_page_cache_readahead(mapping, filp, - offset, req_size, 0); + offset, req_size, 0, vma, addr); } /* @@ -441,7 +446,9 @@ unsigned long page_cache_readahead_ondemand(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, struct page *page, pgoff_t offset, - unsigned long req_size) + unsigned long req_size, + struct vm_area_struct *vma, + unsigned long addr) { /* no read-ahead */ if (!ra->ra_pages) @@ -465,6 +472,6 @@ page_cache_readahead_ondemand(struct add /* do read-ahead */ return ondemand_readahead(mapping, ra, filp, page, - offset, req_size); + offset, req_size, vma, addr); } EXPORT_SYMBOL_GPL(page_cache_readahead_ondemand);