From: Christoph Lameter After moving the lockless_freelist to kmem_cache_cpu we no longer need page->lockless_freelist. Restructure the use of the struct page fields in such a way that we never touch the mapping field. This is turn allows us to remove the special casing of SLUB when determining the mapping of a page (needed for corner cases of virtual caches machines that need to flush caches of processors mapping a page). Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- include/linux/mm_types.h | 9 ++------- mm/slub.c | 2 -- 2 files changed, 2 insertions(+), 9 deletions(-) diff -puN include/linux/mm_types.h~slub-do-not-use-page-mapping include/linux/mm_types.h --- a/include/linux/mm_types.h~slub-do-not-use-page-mapping +++ a/include/linux/mm_types.h @@ -62,13 +62,8 @@ struct page { #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif - struct { /* SLUB uses */ - void **lockless_freelist; - struct kmem_cache *slab; /* Pointer to slab */ - }; - struct { - struct page *first_page; /* Compound pages */ - }; + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct page *first_page; /* Compound tail pages */ }; union { pgoff_t index; /* Our offset within mapping. */ diff -puN mm/slub.c~slub-do-not-use-page-mapping mm/slub.c --- a/mm/slub.c~slub-do-not-use-page-mapping +++ a/mm/slub.c @@ -1127,7 +1127,6 @@ static struct page *new_slab(struct kmem set_freepointer(s, last, NULL); page->freelist = start; - page->lockless_freelist = NULL; page->inuse = 0; out: if (flags & __GFP_WAIT) @@ -1153,7 +1152,6 @@ static void __free_slab(struct kmem_cach NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); - page->mapping = NULL; __free_pages(page, s->order); } _