From: Andrew Morton Make page->private a union of unsigned long and spinlock_t. Kill nr_swapped obfuscation in shmem.c Will need lots more fixups. Cc: Hugh Dickins Signed-off-by: Andrew Morton --- arch/frv/mm/pgalloc.c | 4 ++-- arch/i386/mm/pgtable.c | 8 ++++---- fs/afs/file.c | 4 ++-- fs/buffer.c | 2 +- fs/jfs/jfs_metapage.c | 12 ++++++------ include/linux/buffer_head.h | 6 +++--- include/linux/mm.h | 26 ++++++++++++++++---------- kernel/kexec.c | 2 +- mm/filemap.c | 2 +- mm/page_alloc.c | 16 ++++++++-------- mm/page_io.c | 6 ++++-- mm/rmap.c | 2 +- mm/shmem.c | 22 ++++++++++------------ mm/swap.c | 2 +- mm/swap_state.c | 8 ++++---- mm/swapfile.c | 12 ++++++------ mm/vmscan.c | 2 +- 17 files changed, 71 insertions(+), 65 deletions(-) diff -puN arch/frv/mm/pgalloc.c~mm-split-page-table-lock-fixes arch/frv/mm/pgalloc.c --- devel/arch/frv/mm/pgalloc.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/arch/frv/mm/pgalloc.c 2005-10-23 22:17:21.000000000 -0700 @@ -87,14 +87,14 @@ static inline void pgd_list_add(pgd_t *p if (pgd_list) pgd_list->private = (unsigned long) &page->index; pgd_list = page; - page->private = (unsigned long) &pgd_list; + set_page_private(page, (unsigned long)&pgd_list); } static inline void pgd_list_del(pgd_t *pgd) { struct page *next, **pprev, *page = virt_to_page(pgd); next = (struct page *) page->index; - pprev = (struct page **) page->private; + pprev = (struct page **)page_private(page); *pprev = next; if (next) next->private = (unsigned long) pprev; diff -puN arch/i386/mm/pgtable.c~mm-split-page-table-lock-fixes arch/i386/mm/pgtable.c --- devel/arch/i386/mm/pgtable.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/arch/i386/mm/pgtable.c 2005-10-23 22:17:21.000000000 -0700 @@ -188,19 +188,19 @@ static inline void pgd_list_add(pgd_t *p struct page *page = virt_to_page(pgd); page->index = (unsigned long)pgd_list; if (pgd_list) - pgd_list->private = (unsigned long)&page->index; + set_page_private(pgd_list, (unsigned long)&page->index); pgd_list = page; - page->private = (unsigned long)&pgd_list; + set_page_private(page, (unsigned long)&pgd_list); } static inline void pgd_list_del(pgd_t *pgd) { struct page *next, **pprev, *page = virt_to_page(pgd); next = (struct page *)page->index; - pprev = (struct page **)page->private; + pprev = (struct page **)page_private(page); *pprev = next; if (next) - next->private = (unsigned long)pprev; + set_page_private(next, (unsigned long)pprev); } void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) diff -puN fs/afs/file.c~mm-split-page-table-lock-fixes fs/afs/file.c --- devel/fs/afs/file.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/fs/afs/file.c 2005-10-23 22:17:21.000000000 -0700 @@ -291,8 +291,8 @@ static int afs_file_releasepage(struct p cachefs_uncache_page(vnode->cache, page); #endif - pageio = (struct cachefs_page *) page->private; - page->private = 0; + pageio = (struct cachefs_page *) page_private(page); + set_page_private(page, 0); ClearPagePrivate(page); if (pageio) diff -puN fs/buffer.c~mm-split-page-table-lock-fixes fs/buffer.c --- devel/fs/buffer.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/fs/buffer.c 2005-10-23 22:17:21.000000000 -0700 @@ -96,7 +96,7 @@ static void __clear_page_buffers(struct page *page) { ClearPagePrivate(page); - page->private = 0; + set_page_private(page, 0); page_cache_release(page); } diff -puN fs/jfs/jfs_metapage.c~mm-split-page-table-lock-fixes fs/jfs/jfs_metapage.c --- devel/fs/jfs/jfs_metapage.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/fs/jfs/jfs_metapage.c 2005-10-23 22:17:21.000000000 -0700 @@ -86,7 +86,7 @@ struct meta_anchor { atomic_t io_count; struct metapage *mp[MPS_PER_PAGE]; }; -#define mp_anchor(page) ((struct meta_anchor *)page->private) +#define mp_anchor(page) ((struct meta_anchor *)page_private(page)) static inline struct metapage *page_to_mp(struct page *page, uint offset) { @@ -108,7 +108,7 @@ static inline int insert_metapage(struct if (!a) return -ENOMEM; memset(a, 0, sizeof(struct meta_anchor)); - page->private = (unsigned long)a; + set_page_private(page, (unsigned long)a); SetPagePrivate(page); kmap(page); } @@ -136,7 +136,7 @@ static inline void remove_metapage(struc a->mp[index] = NULL; if (--a->mp_count == 0) { kfree(a); - page->private = 0; + set_page_private(page, 0); ClearPagePrivate(page); kunmap(page); } @@ -156,13 +156,13 @@ static inline void dec_io(struct page *p #else static inline struct metapage *page_to_mp(struct page *page, uint offset) { - return PagePrivate(page) ? (struct metapage *)page->private : NULL; + return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL; } static inline int insert_metapage(struct page *page, struct metapage *mp) { if (mp) { - page->private = (unsigned long)mp; + set_page_private(page, (unsigned long)mp); SetPagePrivate(page); kmap(page); } @@ -171,7 +171,7 @@ static inline int insert_metapage(struct static inline void remove_metapage(struct page *page, struct metapage *mp) { - page->private = 0; + set_page_private(page, 0); ClearPagePrivate(page); kunmap(page); } diff -puN include/linux/buffer_head.h~mm-split-page-table-lock-fixes include/linux/buffer_head.h --- devel/include/linux/buffer_head.h~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/include/linux/buffer_head.h 2005-10-23 22:17:21.000000000 -0700 @@ -126,8 +126,8 @@ BUFFER_FNS(Eopnotsupp, eopnotsupp) /* If we *know* page->private refers to buffer_heads */ #define page_buffers(page) \ ({ \ - BUG_ON(!PagePrivate(page)); \ - ((struct buffer_head *)(page)->private); \ + BUG_ON(!PagePrivate(page)); \ + ((struct buffer_head *)page_private(page)); \ }) #define page_has_buffers(page) PagePrivate(page) @@ -219,7 +219,7 @@ static inline void attach_page_buffers(s { page_cache_get(page); SetPagePrivate(page); - page->private = (unsigned long)head; + set_page_private(page, (unsigned long)head); } static inline void get_bh(struct buffer_head *bh) diff -puN include/linux/mm.h~mm-split-page-table-lock-fixes include/linux/mm.h --- devel/include/linux/mm.h~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/include/linux/mm.h 2005-10-23 22:17:21.000000000 -0700 @@ -220,13 +220,18 @@ struct page { * to show when page is mapped * & limit reverse map searches. */ - unsigned long private; /* Mapping-private opaque data: + union { + unsigned long private; /* Mapping-private opaque data: * usually used for buffer_heads * if PagePrivate set; used for * swp_entry_t if PageSwapCache * When page is free, this indicates * order in the buddy system. */ +#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS + spinlock_t ptl; +#endif + } u; struct address_space *mapping; /* If low bit clear, points to * inode address_space, or NULL. * If page mapped as anonymous @@ -254,6 +259,9 @@ struct page { #endif /* WANT_PAGE_VIRTUAL */ }; +#define page_private(page) ((page)->u.private) +#define set_page_private(page, v) ((page)->u.private = (v)) + /* * FIXME: take this include out, include page-flags.h in * files which need it (119 of them) @@ -305,17 +313,17 @@ extern void FASTCALL(__page_cache_releas #ifdef CONFIG_HUGETLB_PAGE -static inline int page_count(struct page *p) +static inline int page_count(struct page *page) { - if (PageCompound(p)) - p = (struct page *)p->private; - return atomic_read(&(p)->_count) + 1; + if (PageCompound(page)) + page = (struct page *)page_private(page); + return atomic_read(&page->_count) + 1; } static inline void get_page(struct page *page) { if (unlikely(PageCompound(page))) - page = (struct page *)page->private; + page = (struct page *)page_private(page); atomic_inc(&page->_count); } @@ -581,7 +589,7 @@ static inline int PageAnon(struct page * static inline pgoff_t page_index(struct page *page) { if (unlikely(PageSwapCache(page))) - return page->private; + return page_private(page); return page->index; } @@ -780,10 +788,8 @@ static inline pmd_t *pmd_alloc(struct mm * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */ -#define __pte_lockptr(page) ((spinlock_t *)&((page)->private)) +#define __pte_lockptr(page) &((page)->u.ptl) #define pte_lock_init(_page) do { \ - BUILD_BUG_ON((size_t)(__pte_lockptr((struct page *)0) + 1) > \ - sizeof(struct page)); \ spin_lock_init(__pte_lockptr(_page)); \ } while (0) #define pte_lock_deinit(page) ((page)->mapping = NULL) diff -puN kernel/kexec.c~mm-split-page-table-lock-fixes kernel/kexec.c --- devel/kernel/kexec.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/kernel/kexec.c 2005-10-23 22:17:21.000000000 -0700 @@ -348,7 +348,7 @@ static void kimage_free_pages(struct pag { unsigned int order, count, i; - order = page->private; + order = page_private(page); count = 1 << order; for (i = 0; i < count; i++) ClearPageReserved(page + i); diff -puN mm/filemap.c~mm-split-page-table-lock-fixes mm/filemap.c --- devel/mm/filemap.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/filemap.c 2005-10-23 22:17:21.000000000 -0700 @@ -152,7 +152,7 @@ static int sync_page(void *word) * in the ->sync_page() methods make essential use of the * page_mapping(), merely passing the page down to the backing * device's unplug functions when it's non-NULL, which in turn - * ignore it for all cases but swap, where only page->private is + * ignore it for all cases but swap, where only page_private(page) is * of interest. When page_mapping() does go NULL, the entire * call stack gracefully ignores the page and returns. * -- wli diff -puN mm/page_alloc.c~mm-split-page-table-lock-fixes mm/page_alloc.c --- devel/mm/page_alloc.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/page_alloc.c 2005-10-23 22:17:21.000000000 -0700 @@ -157,7 +157,7 @@ static void prep_compound_page(struct pa struct page *p = page + i; SetPageCompound(p); - p->private = (unsigned long)page; + set_page_private(p, (unsigned long)page); } } @@ -177,7 +177,7 @@ static void destroy_compound_page(struct if (!PageCompound(p)) bad_page(__FUNCTION__, page); - if (p->private != (unsigned long)page) + if (page_private(p) != (unsigned long)page) bad_page(__FUNCTION__, page); ClearPageCompound(p); } @@ -190,18 +190,18 @@ static void destroy_compound_page(struct * So, we don't need atomic page->flags operations here. */ static inline unsigned long page_order(struct page *page) { - return page->private; + return page_private(page); } static inline void set_page_order(struct page *page, int order) { - page->private = order; + set_page_private(page, order); __SetPagePrivate(page); } static inline void rmv_page_order(struct page *page) { __ClearPagePrivate(page); - page->private = 0; + set_page_private(page, 0); } /* @@ -241,7 +241,7 @@ __find_combined_index(unsigned long page * (a) the buddy is free && * (b) the buddy is on the buddy system && * (c) a page and its buddy have the same order. - * for recording page's order, we use page->private and PG_private. + * for recording page's order, we use page_private(page) and PG_private. * */ static inline int page_is_buddy(struct page *page, int order) @@ -267,7 +267,7 @@ static inline int page_is_buddy(struct p * parts of the VM system. * At each level, we keep a list of pages, which are heads of continuous * free pages of length of (1 << order) and marked with PG_Private.Page's - * order is recorded in page->private field. + * order is recorded in page_private(page) field. * So when we are allocating or freeing one, we can derive the state of the * other. That is, if we allocate a small block, and both were * free, the remainder of the region must be split into blocks. @@ -466,7 +466,7 @@ static void prep_new_page(struct page *p page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked | 1 << PG_mappedtodisk); - page->private = 0; + set_page_private(page, 0); set_page_refs(page, order); kernel_map_pages(page, 1 << order, 1); } diff -puN mm/page_io.c~mm-split-page-table-lock-fixes mm/page_io.c --- devel/mm/page_io.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/page_io.c 2005-10-23 22:17:21.000000000 -0700 @@ -91,7 +91,8 @@ int swap_writepage(struct page *page, st unlock_page(page); goto out; } - bio = get_swap_bio(GFP_NOIO, page->private, page, end_swap_bio_write); + bio = get_swap_bio(GFP_NOIO, page_private(page), page, + end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); @@ -115,7 +116,8 @@ int swap_readpage(struct file *file, str BUG_ON(!PageLocked(page)); ClearPageUptodate(page); - bio = get_swap_bio(GFP_KERNEL, page->private, page, end_swap_bio_read); + bio = get_swap_bio(GFP_KERNEL, page_private(page), page, + end_swap_bio_read); if (bio == NULL) { unlock_page(page); ret = -ENOMEM; diff -puN mm/rmap.c~mm-split-page-table-lock-fixes mm/rmap.c --- devel/mm/rmap.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/rmap.c 2005-10-23 22:17:21.000000000 -0700 @@ -550,7 +550,7 @@ static int try_to_unmap_one(struct page update_hiwater_rss(mm); if (PageAnon(page)) { - swp_entry_t entry = { .val = page->private }; + swp_entry_t entry = { .val = page_private(page) }; /* * Store the swap location in the pte. * See handle_pte_fault() ... diff -puN mm/shmem.c~mm-split-page-table-lock-fixes mm/shmem.c --- devel/mm/shmem.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/shmem.c 2005-10-23 22:17:21.000000000 -0700 @@ -71,9 +71,6 @@ /* Pretend that each entry is of this size in directory's i_size */ #define BOGO_DIRENT_SIZE 20 -/* Keep swapped page count in private field of indirect struct page */ -#define nr_swapped private - /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ enum sgp_type { SGP_QUICK, /* don't try more than file page cache lookup */ @@ -324,8 +321,10 @@ static void shmem_swp_set(struct shmem_i entry->val = value; info->swapped += incdec; - if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) - kmap_atomic_to_page(entry)->nr_swapped += incdec; + if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { + struct page *page = kmap_atomic_to_page(entry); + set_page_private(page, page_private(page) + incdec); + } } /* @@ -368,9 +367,8 @@ static swp_entry_t *shmem_swp_alloc(stru spin_unlock(&info->lock); page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); - if (page) { - page->nr_swapped = 0; - } + if (page) + set_page_private(page, 0); spin_lock(&info->lock); if (!page) { @@ -561,7 +559,7 @@ static void shmem_truncate(struct inode diroff = 0; } subdir = dir[diroff]; - if (subdir && subdir->nr_swapped) { + if (subdir && page_private(subdir)) { size = limit - idx; if (size > ENTRIES_PER_PAGE) size = ENTRIES_PER_PAGE; @@ -572,10 +570,10 @@ static void shmem_truncate(struct inode nr_swaps_freed += freed; if (offset) spin_lock(&info->lock); - subdir->nr_swapped -= freed; + set_page_private(subdir, page_private(subdir) - freed); if (offset) spin_unlock(&info->lock); - BUG_ON(subdir->nr_swapped > offset); + BUG_ON(page_private(subdir) > offset); } if (offset) offset = 0; @@ -743,7 +741,7 @@ static int shmem_unuse_inode(struct shme dir = shmem_dir_map(subdir); } subdir = *dir; - if (subdir && subdir->nr_swapped) { + if (subdir && page_private(subdir)) { ptr = shmem_swp_map(subdir); size = limit - idx; if (size > ENTRIES_PER_PAGE) diff -puN mm/swap.c~mm-split-page-table-lock-fixes mm/swap.c --- devel/mm/swap.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/swap.c 2005-10-23 22:17:21.000000000 -0700 @@ -39,7 +39,7 @@ int page_cluster; void put_page(struct page *page) { if (unlikely(PageCompound(page))) { - page = (struct page *)page->private; + page = (struct page *)page_private(page); if (put_page_testzero(page)) { void (*dtor)(struct page *page); diff -puN mm/swapfile.c~mm-split-page-table-lock-fixes mm/swapfile.c --- devel/mm/swapfile.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/swapfile.c 2005-10-23 22:17:21.000000000 -0700 @@ -61,7 +61,7 @@ void swap_unplug_io_fn(struct backing_de swp_entry_t entry; down_read(&swap_unplug_sem); - entry.val = page->private; + entry.val = page_private(page); if (PageSwapCache(page)) { struct block_device *bdev = swap_info[swp_type(entry)].bdev; struct backing_dev_info *bdi; @@ -69,8 +69,8 @@ void swap_unplug_io_fn(struct backing_de /* * If the page is removed from swapcache from under us (with a * racy try_to_unuse/swapoff) we need an additional reference - * count to avoid reading garbage from page->private above. If - * the WARN_ON triggers during a swapoff it maybe the race + * count to avoid reading garbage from page_private(page) above. + * If the WARN_ON triggers during a swapoff it maybe the race * condition and it's harmless. However if it triggers without * swapoff it signals a problem. */ @@ -294,7 +294,7 @@ static inline int page_swapcount(struct struct swap_info_struct *p; swp_entry_t entry; - entry.val = page->private; + entry.val = page_private(page); p = swap_info_get(entry); if (p) { /* Subtract the 1 for the swap cache itself */ @@ -339,7 +339,7 @@ int remove_exclusive_swap_page(struct pa if (page_count(page) != 2) /* 2: us + cache */ return 0; - entry.val = page->private; + entry.val = page_private(page); p = swap_info_get(entry); if (!p) return 0; @@ -1042,7 +1042,7 @@ int page_queue_congested(struct page *pa BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ if (PageSwapCache(page)) { - swp_entry_t entry = { .val = page->private }; + swp_entry_t entry = { .val = page_private(page) }; struct swap_info_struct *sis; sis = get_swap_info_struct(swp_type(entry)); diff -puN mm/swap_state.c~mm-split-page-table-lock-fixes mm/swap_state.c --- devel/mm/swap_state.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/swap_state.c 2005-10-23 22:17:21.000000000 -0700 @@ -83,7 +83,7 @@ static int __add_to_swap_cache(struct pa page_cache_get(page); SetPageLocked(page); SetPageSwapCache(page); - page->private = entry.val; + set_page_private(page, entry.val); total_swapcache_pages++; pagecache_acct(1); } @@ -126,8 +126,8 @@ void __delete_from_swap_cache(struct pag BUG_ON(PageWriteback(page)); BUG_ON(PagePrivate(page)); - radix_tree_delete(&swapper_space.page_tree, page->private); - page->private = 0; + radix_tree_delete(&swapper_space.page_tree, page_private(page)); + set_page_private(page, 0); ClearPageSwapCache(page); total_swapcache_pages--; pagecache_acct(-1); @@ -197,7 +197,7 @@ void delete_from_swap_cache(struct page { swp_entry_t entry; - entry.val = page->private; + entry.val = page_private(page); write_lock_irq(&swapper_space.tree_lock); __delete_from_swap_cache(page); diff -puN mm/vmscan.c~mm-split-page-table-lock-fixes mm/vmscan.c --- devel/mm/vmscan.c~mm-split-page-table-lock-fixes 2005-10-23 22:17:21.000000000 -0700 +++ devel-akpm/mm/vmscan.c 2005-10-23 22:17:21.000000000 -0700 @@ -521,7 +521,7 @@ static int shrink_list(struct list_head #ifdef CONFIG_SWAP if (PageSwapCache(page)) { - swp_entry_t swap = { .val = page->private }; + swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); write_unlock_irq(&mapping->tree_lock); swap_free(swap); _