From faac61da5899c99f3114fe7c33d131f484d6ef2a Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 25 Jul 2007 20:39:08 -0700 Subject: [PATCH] Large blocksize: Compound page zeroing and flushing We may now have to zero and flush higher order pages. Implement clear_mapping_page and flush_mapping_page to do that job. Replace the flushing and clearing at some key locations for the pagecache. Signed-off-by: Christoph Lameter --- fs/libfs.c | 4 ++-- include/linux/highmem.h | 31 +++++++++++++++++++++++++++++-- mm/filemap.c | 4 ++-- mm/filemap_xip.c | 4 ++-- 4 files changed, 35 insertions(+), 8 deletions(-) diff --git a/fs/libfs.c b/fs/libfs.c index e90f894..5788495 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -330,8 +330,8 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry, int simple_readpage(struct file *file, struct page *page) { - clear_highpage(page); - flush_dcache_page(page); + clear_mapping_page(page); + flush_mapping_page(page); SetPageUptodate(page); unlock_page(page); return 0; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index c79d7c1..7054b29 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -124,14 +124,41 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr, KM_USER0); } +/* + * Clear a higher order page + */ +static inline void clear_mapping_page(struct page *page) +{ + int nr_pages = compound_pages(page); + int i; + + for (i = 0; i < nr_pages; i++) + clear_highpage(page + i); +} + +/* + * Primitive support for flushing higher order pages. + * + * A bit stupid: On many platforms flushing the first page + * will flush any TLB starting there + */ +static inline void flush_mapping_page(struct page *page) +{ + int nr_pages = compound_pages(page); + int i; + + for (i = 0; i < nr_pages; i++) + flush_dcache_page(page + i); +} + static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { void *kaddr = kmap_atomic(page, KM_USER0); - BUG_ON(end1 > PAGE_SIZE || - end2 > PAGE_SIZE); + BUG_ON(end1 > compound_size(page) || + end2 > compound_size(page)); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); diff --git a/mm/filemap.c b/mm/filemap.c index c188512..5e8f898 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -941,7 +941,7 @@ page_ok: * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_mapping_page(page); /* * When a sequential read accesses a page several times, @@ -1952,7 +1952,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, else copied = filemap_copy_from_user_iovec(page, offset, cur_iov, iov_base, bytes); - flush_dcache_page(page); + flush_mapping_page(page); status = a_ops->commit_write(file, page, offset, offset+bytes); if (status == AOP_TRUNCATED_PAGE) { page_cache_release(page); diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 5237e53..e1e7e77 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -104,7 +104,7 @@ do_xip_mapping_read(struct address_space *mapping, * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_mapping_page(page); /* * Ok, we have the page, so now we can copy it to user space... @@ -320,7 +320,7 @@ __xip_file_write(struct file *filp, const char __user *buf, } copied = filemap_copy_from_user(page, offset, buf, bytes); - flush_dcache_page(page); + flush_mapping_page(page); if (likely(copied > 0)) { status = copied; -- 1.4.4.4