From 328f263a571c5561f933b89152984a263145f7e8 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 25 Jul 2007 20:39:08 -0700 Subject: [PATCH] Compound page zeroing and flushing We may now have to zero and flush higher order pages. Implement clear_mapping_page and flush_mapping_page to do that job. Replace the flushing and clearing at some key locations for the pagecache. Signed-off-by: Christoph Lameter --- fs/libfs.c | 4 +- include/linux/highmem.h | 38 +-------------------- include/linux/pagemap.h | 83 ++++++++++++++++++++++++++++++++++++++++++++++++ mm/filemap.c | 4 +- mm/filemap_xip.c | 4 +- 5 files changed, 91 insertions(+), 42 deletions(-) Index: linux-2.6/fs/libfs.c =================================================================== --- linux-2.6.orig/fs/libfs.c 2007-09-05 01:55:09.000000000 -0700 +++ linux-2.6/fs/libfs.c 2007-09-06 07:32:01.000000000 -0700 @@ -330,8 +330,8 @@ int simple_rename(struct inode *old_dir, int simple_readpage(struct file *file, struct page *page) { - clear_highpage(page); - flush_dcache_page(page); + clear_mapping_page(page); + flush_mapping_page(page); SetPageUptodate(page); unlock_page(page); return 0; Index: linux-2.6/include/linux/highmem.h =================================================================== --- linux-2.6.orig/include/linux/highmem.h 2007-09-05 01:55:09.000000000 -0700 +++ linux-2.6/include/linux/highmem.h 2007-09-05 01:56:09.000000000 -0700 @@ -124,42 +124,8 @@ static inline void clear_highpage(struct kunmap_atomic(kaddr, KM_USER0); } -static inline void zero_user_segments(struct page *page, - unsigned start1, unsigned end1, - unsigned start2, unsigned end2) -{ - void *kaddr = kmap_atomic(page, KM_USER0); - - BUG_ON(end1 > PAGE_SIZE || - end2 > PAGE_SIZE); - - if (end1 > start1) - memset(kaddr + start1, 0, end1 - start1); - - if (end2 > start2) - memset(kaddr + start2, 0, end2 - start2); - - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page); -} - -static inline void zero_user_segment(struct page *page, - unsigned start, unsigned end) -{ - zero_user_segments(page, start, end, 0, 0); -} - -static inline void zero_user(struct page *page, - unsigned start, unsigned size) -{ - zero_user_segments(page, start, start + size, 0, 0); -} - -static inline void __deprecated memclear_highpage_flush(struct page *page, - unsigned int offset, unsigned int size) -{ - zero_user(page, offset, size); -} +#define memclear_highpage_flush(page, offset, size) \ + zero_user(page, offset, size) #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE Index: linux-2.6/mm/filemap.c =================================================================== --- linux-2.6.orig/mm/filemap.c 2007-09-05 01:55:09.000000000 -0700 +++ linux-2.6/mm/filemap.c 2007-09-06 07:32:23.000000000 -0700 @@ -941,7 +941,7 @@ page_ok: * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_mapping_page(page); /* * When a sequential read accesses a page several times, @@ -1932,7 +1932,7 @@ generic_file_buffered_write(struct kiocb else copied = filemap_copy_from_user_iovec(page, offset, cur_iov, iov_base, bytes); - flush_dcache_page(page); + flush_mapping_page(page); status = a_ops->commit_write(file, page, offset, offset+bytes); if (status == AOP_TRUNCATED_PAGE) { page_cache_release(page); Index: linux-2.6/mm/filemap_xip.c =================================================================== --- linux-2.6.orig/mm/filemap_xip.c 2007-09-05 01:55:09.000000000 -0700 +++ linux-2.6/mm/filemap_xip.c 2007-09-05 01:56:09.000000000 -0700 @@ -104,7 +104,7 @@ do_xip_mapping_read(struct address_space * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_mapping_page(page); /* * Ok, we have the page, so now we can copy it to user space... @@ -320,7 +320,7 @@ __xip_file_write(struct file *filp, cons } copied = filemap_copy_from_user(page, offset, buf, bytes); - flush_dcache_page(page); + flush_mapping_page(page); if (likely(copied > 0)) { status = copied; Index: linux-2.6/include/linux/pagemap.h =================================================================== --- linux-2.6.orig/include/linux/pagemap.h 2007-09-05 01:55:09.000000000 -0700 +++ linux-2.6/include/linux/pagemap.h 2007-09-06 07:32:20.000000000 -0700 @@ -91,6 +91,31 @@ static inline unsigned int page_cache_of return pos & ~PAGE_MASK; } +static inline struct page *page_cache_base(struct page *page) +{ + return page; +} + +static inline int page_cache_pages(struct page *page) +{ + return 1; +} + +static inline unsigned long page_cache_page_order(struct page *page) +{ + return 0; +} + +static inline unsigned long page_cache_page_size(struct page *page) +{ + return PAGE_SIZE; +} + +static inline unsigned long page_cache_page_shift(struct page *page) +{ + return PAGE_SHIFT; +} + static inline pgoff_t page_cache_index(struct address_space *a, loff_t pos) { @@ -135,6 +160,64 @@ static inline struct page *page_cache_al return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } +/* + * Clear a higher order page + */ +static inline void clear_mapping_page(struct page *page) +{ + int nr_pages = page_cache_pages(page); + int i; + + for (i = 0; i < nr_pages; i++) + clear_highpage(page + i); +} + +/* + * Primitive support for flushing higher order pages. + * + * A bit stupid: On many platforms flushing the first page + * will flush any TLB starting there + */ +static inline void flush_mapping_page(struct page *page) +{ + int nr_pages = page_cache_pages(page); + int i; + + for (i = 0; i < nr_pages; i++) + flush_dcache_page(page + i); +} + +static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + BUG_ON(end1 > page_cache_page_size(page) || + end2 > page_cache_page_size(page)); + + if (end1 > start1) + memset(kaddr + start1, 0, end1 - start1); + + if (end2 > start2) + memset(kaddr + start2, 0, end2 - start2); + + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page); +} + +static inline void zero_user_segment(struct page *page, + unsigned start, unsigned end) +{ + zero_user_segments(page, start, end, 0, 0); +} + +static inline void zero_user(struct page *page, + unsigned start, unsigned size) +{ + zero_user_segments(page, start, start + size, 0, 0); +} + typedef int filler_t(void *, struct page *); extern struct page * find_get_page(struct address_space *mapping,