From 46970241939d7c44bbb638fc70e89bad3d61a9a7 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 25 Jul 2007 20:39:08 -0700 Subject: [PATCH] Large page order operations, zeroing and flushing We may have to zero and flush higher order pages. Implement clear_mapping_page and flush_mapping_page to do that job. Replace the flushing and clearing at some key locations for the pagecache. In some places it is necesssary to determine the page order in use from the page struct since no mapping is available. Add a series of page_cache_page_xx functions: page_cache_head(page) -> Determine head page from a tail page page_cache_base_pages(page) -> Number of base pages of a page page_cache_page_order(page) -> Determine page order of a page page_cache_page_size(page) -> Determine page size of a page page_cache_page_shift(page) -> Determine page shif of a page Signed-off-by: Christoph Lameter --- fs/libfs.c | 4 +- include/linux/highmem.h | 36 ------------------- include/linux/pagemap.h | 89 ++++++++++++++++++++++++++++++++++++++++++++++++ mm/filemap.c | 6 +-- mm/filemap_xip.c | 2 - 5 files changed, 95 insertions(+), 42 deletions(-) Index: linux-2.6/fs/libfs.c =================================================================== --- linux-2.6.orig/fs/libfs.c 2008-02-16 21:32:53.000000000 -0800 +++ linux-2.6/fs/libfs.c 2008-02-16 21:42:40.000000000 -0800 @@ -331,8 +331,8 @@ int simple_rename(struct inode *old_dir, int simple_readpage(struct file *file, struct page *page) { - clear_highpage(page); - flush_dcache_page(page); + clear_mapping_page(page); + flush_mapping_page(page); SetPageUptodate(page); unlock_page(page); return 0; Index: linux-2.6/include/linux/pagemap.h =================================================================== --- linux-2.6.orig/include/linux/pagemap.h 2008-02-16 21:32:53.000000000 -0800 +++ linux-2.6/include/linux/pagemap.h 2008-02-16 21:49:01.000000000 -0800 @@ -107,6 +107,31 @@ static inline loff_t page_cache_pos(stru return ((loff_t)index << page_cache_shift(mapping)) + offset; } +static inline struct page *page_cache_head(struct page *page) +{ + return page; +} + +static inline int page_cache_base_pages(struct page *page) +{ + return 1; +} + +static inline unsigned long page_cache_page_order(struct page *page) +{ + return 0; +} + +static inline unsigned long page_cache_page_size(struct page *page) +{ + return PAGE_SIZE; +} + +static inline unsigned long page_cache_page_shift(struct page *page) +{ + return PAGE_SHIFT; +} + #define page_cache_get(page) get_page(page) #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); @@ -275,6 +300,70 @@ static inline void wait_on_page_writebac extern void end_page_writeback(struct page *page); /* + * Clear a higher order page + */ +static inline void clear_mapping_page(struct page *page) +{ + int nr_pages = page_cache_base_pages(page); + int i; + + for (i = 0; i < nr_pages; i++) + clear_highpage(page + i); +} + +/* + * Primitive support for flushing higher order pages. + * + * A bit stupid: On many platforms flushing the first page + * will flush any TLB starting there + */ +static inline void flush_mapping_page(struct page *page) +{ + int nr_pages = page_cache_base_pages(page); + int i; + + for (i = 0; i < nr_pages; i++) + flush_dcache_page(page + i); +} + +static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + BUG_ON(end1 > page_cache_page_size(page) || + end2 > page_cache_page_size(page)); + + if (end1 > start1) + memset(kaddr + start1, 0, end1 - start1); + + if (end2 > start2) + memset(kaddr + start2, 0, end2 - start2); + + kunmap_atomic(kaddr, KM_USER0); + flush_mapping_page(page); +} + +static inline void zero_user_segment(struct page *page, + unsigned start, unsigned end) +{ + zero_user_segments(page, start, end, 0, 0); +} + +static inline void zero_user(struct page *page, + unsigned start, unsigned size) +{ + zero_user_segments(page, start, start + size, 0, 0); +} + +static inline void __deprecated memclear_highpage_flush(struct page *page, + unsigned int offset, unsigned int size) +{ + zero_user(page, offset, size); +} + +/* * Fault a userspace page into pagetables. Return non-zero on a fault. * * This assumes that two userspace pages are always sufficient. That's Index: linux-2.6/mm/filemap.c =================================================================== --- linux-2.6.orig/mm/filemap.c 2008-02-16 21:32:53.000000000 -0800 +++ linux-2.6/mm/filemap.c 2008-02-16 21:42:40.000000000 -0800 @@ -963,7 +963,7 @@ page_ok: * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_mapping_page(page); /* * When a sequential read accesses a page several times, @@ -1954,7 +1954,7 @@ int pagecache_write_end(struct file *fil unsigned offset = page_cache_offset(mapping, pos); struct inode *inode = mapping->host; - flush_dcache_page(page); + flush_mapping_page(page); ret = aops->commit_write(file, page, offset, offset+len); unlock_page(page); mark_page_accessed(page); @@ -2167,7 +2167,7 @@ static ssize_t generic_perform_write_2co kunmap_atomic(src, KM_USER0); copied = bytes; } - flush_dcache_page(page); + flush_mapping_page(page); status = a_ops->commit_write(file, page, offset, offset+bytes); if (unlikely(status < 0)) Index: linux-2.6/mm/filemap_xip.c =================================================================== --- linux-2.6.orig/mm/filemap_xip.c 2008-02-16 21:32:53.000000000 -0800 +++ linux-2.6/mm/filemap_xip.c 2008-02-16 21:42:40.000000000 -0800 @@ -105,7 +105,7 @@ do_xip_mapping_read(struct address_space * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_mapping_page(page); /* * Ok, we have the page, so now we can copy it to user space... Index: linux-2.6/include/linux/highmem.h =================================================================== --- linux-2.6.orig/include/linux/highmem.h 2008-02-16 21:32:53.000000000 -0800 +++ linux-2.6/include/linux/highmem.h 2008-02-16 21:48:49.000000000 -0800 @@ -122,42 +122,6 @@ static inline void clear_highpage(struct kunmap_atomic(kaddr, KM_USER0); } -static inline void zero_user_segments(struct page *page, - unsigned start1, unsigned end1, - unsigned start2, unsigned end2) -{ - void *kaddr = kmap_atomic(page, KM_USER0); - - BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); - - if (end1 > start1) - memset(kaddr + start1, 0, end1 - start1); - - if (end2 > start2) - memset(kaddr + start2, 0, end2 - start2); - - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page); -} - -static inline void zero_user_segment(struct page *page, - unsigned start, unsigned end) -{ - zero_user_segments(page, start, end, 0, 0); -} - -static inline void zero_user(struct page *page, - unsigned start, unsigned size) -{ - zero_user_segments(page, start, start + size, 0, 0); -} - -static inline void __deprecated memclear_highpage_flush(struct page *page, - unsigned int offset, unsigned int size) -{ - zero_user(page, offset, size); -} - #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE static inline void copy_user_highpage(struct page *to, struct page *from,