From: Nick Piggin Now that compound page handling is properly fixed in the VM, move nommu over to using compound pages rather than rolling their own refcounting. nommu vm page refcounting is broken anyway, but there is no need to have divergent code in the core VM now, nor when it gets fixed. Signed-off-by: Nick Piggin Cc: David Howells (Needs testing, please). Signed-off-by: Andrew Morton --- fs/ramfs/file-nommu.c | 3 +-- include/linux/mm.h | 4 ---- mm/internal.h | 12 ------------ mm/nommu.c | 4 ++-- mm/page_alloc.c | 7 ------- mm/slab.c | 9 ++++++++- 6 files changed, 11 insertions(+), 28 deletions(-) diff -puN fs/ramfs/file-nommu.c~mm-nommu-use-compound-pages fs/ramfs/file-nommu.c --- devel/fs/ramfs/file-nommu.c~mm-nommu-use-compound-pages 2006-02-27 20:57:55.000000000 -0800 +++ devel-akpm/fs/ramfs/file-nommu.c 2006-02-27 20:57:55.000000000 -0800 @@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mappin xpages = 1UL << order; npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; - for (loop = 0; loop < npages; loop++) - set_page_count(pages + loop, 1); + split_page(pages, order); /* trim off any pages we don't actually require */ for (loop = npages; loop < xpages; loop++) diff -puN include/linux/mm.h~mm-nommu-use-compound-pages include/linux/mm.h --- devel/include/linux/mm.h~mm-nommu-use-compound-pages 2006-02-27 20:57:55.000000000 -0800 +++ devel-akpm/include/linux/mm.h 2006-02-27 20:57:55.000000000 -0800 @@ -327,11 +327,7 @@ static inline void get_page(struct page void put_page(struct page *page); -#ifdef CONFIG_MMU void split_page(struct page *page, unsigned int order); -#else -static inline void split_page(struct page *page, unsigned int order) {} -#endif /* * Multiple processes may "see" the same page. E.g. for untouched diff -puN mm/internal.h~mm-nommu-use-compound-pages mm/internal.h --- devel/mm/internal.h~mm-nommu-use-compound-pages 2006-02-27 20:57:55.000000000 -0800 +++ devel-akpm/mm/internal.h 2006-02-27 20:57:55.000000000 -0800 @@ -15,19 +15,7 @@ static inline void set_page_refs(struct page *page, int order) { -#ifdef CONFIG_MMU set_page_count(page, 1); -#else - int i; - - /* - * We need to reference all the pages for this order, otherwise if - * anyone accesses one of the pages with (get/put) it will be freed. - * - eg: access_process_vm() - */ - for (i = 0; i < (1 << order); i++) - set_page_count(page + i, 1); -#endif /* CONFIG_MMU */ } static inline void __put_page(struct page *page) diff -puN mm/nommu.c~mm-nommu-use-compound-pages mm/nommu.c --- devel/mm/nommu.c~mm-nommu-use-compound-pages 2006-02-27 20:57:55.000000000 -0800 +++ devel-akpm/mm/nommu.c 2006-02-27 20:57:55.000000000 -0800 @@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_ /* * kmalloc doesn't like __GFP_HIGHMEM for some reason */ - return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM); + return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); } struct page * vmalloc_to_page(void *addr) @@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_are * - note that this may not return a page-aligned address if the object * we're allocating is smaller than a page */ - base = kmalloc(len, GFP_KERNEL); + base = kmalloc(len, GFP_KERNEL|__GFP_COMP); if (!base) goto enomem; diff -puN mm/page_alloc.c~mm-nommu-use-compound-pages mm/page_alloc.c --- devel/mm/page_alloc.c~mm-nommu-use-compound-pages 2006-02-27 20:57:55.000000000 -0800 +++ devel-akpm/mm/page_alloc.c 2006-02-27 20:57:55.000000000 -0800 @@ -422,11 +422,6 @@ static void __free_pages_ok(struct page mutex_debug_check_no_locks_freed(page_address(page), PAGE_SIZE<lru.next; } @@ -600,6 +602,8 @@ static inline void page_set_slab(struct static inline struct slab *page_get_slab(struct page *page) { + if (unlikely(PageCompound(page))) + page = (struct page *)page_private(page); return (struct slab *)page->lru.prev; } @@ -2409,8 +2413,11 @@ static void set_slab_attr(struct kmem_ca struct page *page; /* Nasty!!!!!! I hope this is OK. */ - i = 1 << cachep->gfporder; page = virt_to_page(objp); + + i = 1; + if (likely(!PageCompound(page))) + i <<= cachep->gfporder; do { page_set_cache(page, cachep); page_set_slab(page, slabp); _