Variable Order Page Cache: Fix up mm/filemap.c Fix up the function in mm/filemap.c to use the variable page cache. As many of the following patches this is also pretty straightforward. 1. Convert the bit ops into calls of page_cache_xxx(mapping, ....) 2. Use the mapping flush function Doing this also cleans up the handling of page cache pages. Signed-off-by: Christoph Lameter --- mm/filemap.c | 62 +++++++++++++++++++++++++++++------------------------------ 1 file changed, 31 insertions(+), 31 deletions(-) Index: linux-2.6.21-rc7/mm/filemap.c =================================================================== --- linux-2.6.21-rc7.orig/mm/filemap.c 2007-04-22 21:59:15.000000000 -0700 +++ linux-2.6.21-rc7/mm/filemap.c 2007-04-22 22:03:09.000000000 -0700 @@ -304,8 +304,8 @@ int wait_on_page_writeback_range(struct int sync_page_range(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count) { - pgoff_t start = pos >> PAGE_CACHE_SHIFT; - pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; + pgoff_t start = page_cache_index(mapping, pos); + pgoff_t end = page_cache_index(mapping, pos + count - 1); int ret; if (!mapping_cap_writeback_dirty(mapping) || !count) @@ -336,8 +336,8 @@ EXPORT_SYMBOL(sync_page_range); int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count) { - pgoff_t start = pos >> PAGE_CACHE_SHIFT; - pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; + pgoff_t start = page_cache_index(mapping, pos); + pgoff_t end = page_cache_index(mapping, pos + count - 1); int ret; if (!mapping_cap_writeback_dirty(mapping) || !count) @@ -366,7 +366,7 @@ int filemap_fdatawait(struct address_spa return 0; return wait_on_page_writeback_range(mapping, 0, - (i_size - 1) >> PAGE_CACHE_SHIFT); + page_cache_index(mapping, i_size - 1)); } EXPORT_SYMBOL(filemap_fdatawait); @@ -414,8 +414,8 @@ int filemap_write_and_wait_range(struct /* See comment of filemap_write_and_wait() */ if (err != -EIO) { int err2 = wait_on_page_writeback_range(mapping, - lstart >> PAGE_CACHE_SHIFT, - lend >> PAGE_CACHE_SHIFT); + page_cache_index(mapping, lstart), + page_cache_index(mapping, lend)); if (!err) err = err2; } @@ -888,27 +888,27 @@ void do_generic_mapping_read(struct addr struct file_ra_state ra = *_ra; cached_page = NULL; - index = *ppos >> PAGE_CACHE_SHIFT; + index = page_cache_index(mapping, *ppos); next_index = index; prev_index = ra.prev_page; - last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; - offset = *ppos & ~PAGE_CACHE_MASK; + last_index = page_cache_next(mapping, *ppos + desc->count); + offset = page_cache_offset(mapping, *ppos); isize = i_size_read(inode); if (!isize) goto out; - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + end_index = page_cache_index(mapping, isize - 1); for (;;) { struct page *page; unsigned long nr, ret; /* nr is the maximum number of bytes to copy from this page */ - nr = PAGE_CACHE_SIZE; + nr = page_cache_size(mapping); if (index >= end_index) { if (index > end_index) goto out; - nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + nr = page_cache_offset(mapping, isize - 1) + 1; if (nr <= offset) { goto out; } @@ -935,7 +935,7 @@ page_ok: * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_mapping_page(page); /* * When (part of) the same page is read multiple times @@ -957,8 +957,8 @@ page_ok: */ ret = actor(desc, page, offset, nr); offset += ret; - index += offset >> PAGE_CACHE_SHIFT; - offset &= ~PAGE_CACHE_MASK; + index += page_cache_index(mapping, offset); + offset = page_cache_offset(mapping, offset); page_cache_release(page); if (ret == nr && desc->count) @@ -1022,16 +1022,16 @@ readpage: * another truncate extends the file - this is desired though). */ isize = i_size_read(inode); - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + end_index = page_cache_index(mapping, isize - 1); if (unlikely(!isize || index > end_index)) { page_cache_release(page); goto out; } /* nr is the maximum number of bytes to copy from this page */ - nr = PAGE_CACHE_SIZE; + nr = page_cache_size(mapping); if (index == end_index) { - nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + nr = page_cache_offset(mapping, isize - 1) + 1; if (nr <= offset) { page_cache_release(page); goto out; @@ -1074,7 +1074,7 @@ no_cached_page: out: *_ra = ra; - *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; + *ppos = page_cache_pos(mapping, index, offset); if (cached_page) page_cache_release(cached_page); if (filp) @@ -1270,8 +1270,8 @@ asmlinkage ssize_t sys_readahead(int fd, if (file) { if (file->f_mode & FMODE_READ) { struct address_space *mapping = file->f_mapping; - unsigned long start = offset >> PAGE_CACHE_SHIFT; - unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT; + unsigned long start = page_cache_index(mapping, offset); + unsigned long end = page_cache_index(mapping, offset + count - 1); unsigned long len = end - start + 1; ret = do_readahead(mapping, file, start, len); } @@ -2086,9 +2086,9 @@ generic_file_buffered_write(struct kiocb unsigned long offset; size_t copied; - offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ - index = pos >> PAGE_CACHE_SHIFT; - bytes = PAGE_CACHE_SIZE - offset; + offset = page_cache_offset(mapping, pos); + index = page_cache_index(mapping, pos); + bytes = page_cache_size(mapping) - offset; /* Limit the size of the copy to the caller's write size */ bytes = min(bytes, count); @@ -2149,7 +2149,7 @@ generic_file_buffered_write(struct kiocb else copied = filemap_copy_from_user_iovec(page, offset, cur_iov, iov_base, bytes); - flush_dcache_page(page); + flush_mapping_page(page); status = a_ops->commit_write(file, page, offset, offset+bytes); if (status == AOP_TRUNCATED_PAGE) { page_cache_release(page); @@ -2315,8 +2315,8 @@ __generic_file_aio_write_nolock(struct k if (err == 0) { written = written_buffered; invalidate_mapping_pages(mapping, - pos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + page_cache_index(mapping, pos), + page_cache_index(mapping, endbyte)); } else { /* * We don't know how much we wrote, so just return @@ -2403,7 +2403,7 @@ generic_file_direct_IO(int rw, struct ki */ if (rw == WRITE) { write_len = iov_length(iov, nr_segs); - end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT; + end = page_cache_index(mapping, offset + write_len - 1); if (mapping_mapped(mapping)) unmap_mapping_range(mapping, offset, write_len, 0); } @@ -2420,7 +2420,7 @@ generic_file_direct_IO(int rw, struct ki */ if (rw == WRITE && mapping->nrpages) { retval = invalidate_inode_pages2_range(mapping, - offset >> PAGE_CACHE_SHIFT, end); + page_cache_index(mapping, offset), end); if (retval) goto out; } @@ -2438,7 +2438,7 @@ generic_file_direct_IO(int rw, struct ki */ if (rw == WRITE && mapping->nrpages) { int err = invalidate_inode_pages2_range(mapping, - offset >> PAGE_CACHE_SHIFT, end); + page_cache_index(mapping, offset), end); if (err && retval >= 0) retval = err; }