Index: linux-2.6.18-rc4-mm3/include/linux/mm.h =================================================================== --- linux-2.6.18-rc4-mm3.orig/include/linux/mm.h 2006-08-30 16:27:51.516179968 -0700 +++ linux-2.6.18-rc4-mm3/include/linux/mm.h 2006-08-30 16:31:16.761910567 -0700 @@ -226,10 +226,13 @@ unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ atomic_t _count; /* Usage count, see below. */ - atomic_t _mapcount; /* Count of ptes mapped in mms, + union { + atomic_t _mapcount; /* Count of ptes mapped in mms, * to show when page is mapped * & limit reverse map searches. */ + int inuse; /* Slabifier: used objects */ + }; union { struct { unsigned long private; /* Mapping-private opaque data: @@ -250,8 +253,15 @@ #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif + struct { /* Slabifier */ + struct page *first_page; /* Compound pages */ + struct slab_cache *slab; /* Slab pointer */ + }; + }; + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* Slabifier: Pointer to first free */ }; - pgoff_t index; /* Our offset within mapping. */ struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! */ Index: linux-2.6.18-rc4-mm3/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-30 16:31:15.641922947 -0700 +++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-30 16:32:23.325174675 -0700 @@ -40,13 +40,6 @@ * The page struct is used to keep necessary information about a slab. * For a compound page the first page keeps the slab state. * - * Overloaded fields in struct page: - * - * lru -> used to a slab on the lists - * mapping -> pointer to struct slab - * index -> pointer to next free object - * _mapcount -> count number of elements in use - * * Lock order: * 1. slab_lock(page) * 2. slab->list_lock @@ -71,56 +64,6 @@ #define lru_to_first_page(_head) (list_entry((_head)->next, struct page, lru)) /* - * Some definitions to overload fields in struct page - */ -static __always_inline void *get_object_pointer(struct page *page) -{ - return (void *)page->index; -} - -static __always_inline void set_object_pointer(struct page *page, - void *object) -{ - page->index = (unsigned long)object; -} - -static __always_inline struct slab *get_slab(struct page *page) -{ - return (struct slab *)page->mapping; -} - -static __always_inline void set_slab(struct page *page, struct slab *s) -{ - page->mapping = (void *)s; -} - -static __always_inline int *object_counter(struct page *page) -{ - return (int *)&page->_mapcount; -} - -static __always_inline void inc_object_counter(struct page *page) -{ - (*object_counter(page))++; -} - -static __always_inline void dec_object_counter(struct page *page) -{ - (*object_counter(page))--; -} - -static __always_inline void set_object_counter(struct page *page, - int counter) -{ - *object_counter(page) = counter; -} - -static __always_inline int get_object_counter(struct page *page) -{ - return *object_counter(page); -} - -/* * Locking for each individual slab using the pagelock */ static __always_inline void slab_lock(struct page *page) @@ -281,7 +224,7 @@ static int on_freelist(struct slab *s, struct page *page, void *search) { int nr = 0; - void **object = get_object_pointer(page); + void **object = page->freelist; void *origin = &page->lru; if (s->objects == 1) @@ -299,16 +242,16 @@ nr++; } - if (get_object_counter(page) != s->objects - nr) { + if (page->inuse != s->objects - nr) { printk(KERN_CRIT "slab %s: page %p wrong object count." " counter is %d but counted were %d\n", - s->sc.name, page, get_object_counter(page), + s->sc.name, page, page->inuse, s->objects - nr); try_recover: printk(KERN_CRIT "****** Trying to continue by marking " "all objects used (memory leak!)\n"); - set_object_counter(page, s->objects); - set_object_pointer(page, NULL); + page->inuse = s->objects; + page->freelist = NULL; } return 0; } @@ -346,11 +289,11 @@ struct page *page; page = s->sc.page_alloc->allocate(s->sc.page_alloc, s->sc.order, - flags, node < 0 ? s->sc.node : node); + flags, node == -1 ? s->sc.node : node); if (!page) return NULL; - set_slab(page, s); + page->slab = (struct slab_cache *)s; __SetPageSlab(page); atomic_long_inc(&s->nr_slabs); return page; @@ -364,12 +307,8 @@ */ static void __always_inline putback_slab(struct slab *s, struct page *page) { - int inuse; - - inuse = get_object_counter(page); - - if (inuse) { - if (inuse < s->objects) + if (page->inuse) { + if (page->inuse < s->objects) add_partial(s, page); slab_unlock(page); } else { @@ -568,7 +507,7 @@ return NULL; start = page_address(page); - set_object_pointer(page, start); + page->freelist = start; end = start + s->objects * s->size; last = start; @@ -577,7 +516,7 @@ last = p; } last[s->offset] = NULL; - set_object_counter(page, 0); + page->inuse = 0; slab_lock(page); check_free_chain(s, page); @@ -637,8 +576,8 @@ if (unlikely(!page)) goto load; - while (unlikely(!get_object_pointer(page) || - (node > 0 && page_to_nid(page) != node))) { + while (unlikely(!page->freelist || + (node != -1 && page_to_nid(page) != node))) { deactivate_slab(s, page, cpu); load: @@ -649,10 +588,10 @@ } } - inc_object_counter(page); - object = get_object_pointer(page); + page->inuse++; + object = page->freelist; next_object = object[s->offset]; - set_object_pointer(page, next_object); + page->freelist = next_object; check_free_chain(s, page); SetPageReferenced(page); slab_unlock(page); @@ -677,7 +616,7 @@ struct page * page = virt_to_page(x); if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); + page = page->first_page; if (!PageSlab(page)) return NULL; @@ -704,21 +643,19 @@ } if (!s) { - s = get_slab(page); - - if (unlikely(!s)) { + if (unlikely(!page->slab)) { printk(KERN_CRIT "slab_free : no slab(NULL) for object %p.\n", object); goto dumpret; } } else - if (unlikely(s != get_slab(page))) { + if (unlikely(sc != page->slab)) { printk(KERN_CRIT "slab_free %s: object at %p" " belongs to slab %p\n", - s->sc.name, object, get_slab(page)); + s->sc.name, object, page->slab); dump_stack(); - s = get_slab(page); + s = (struct slab *)page->slab; } if (unlikely(!check_valid_pointer(s, page, object, NULL))) { @@ -746,16 +683,16 @@ } #endif - prior = get_object_pointer(page); + prior = page->freelist; object[s->offset] = prior; - set_object_pointer(page, object); - dec_object_counter(page); + page->freelist = object; + page->inuse--; if (unlikely(PageActive(page))) goto out_unlock; - if (unlikely(get_object_counter(page) == 0)) { + if (unlikely(page->inuse == 0)) { remove_partial(s, page); check_free_chain(s, page); slab_unlock(page); @@ -787,7 +724,7 @@ page = get_object_page(object); - if (!page || s != get_slab(page)) + if (!page || sc != page->slab) return 0; addr = page_address(page); @@ -807,14 +744,15 @@ const void *object) { struct page *page; - struct slab *s; + struct slab_cache *s; + page = get_object_page(object); if (page) { - s = get_slab(page); - BUG_ON(sc && s != (void *)sc); + s = page->slab; + BUG_ON(sc && s != sc); if (s) - return s->size; + return sc->size; } BUG(); return 0; /* Satisfy compiler */ @@ -832,7 +770,7 @@ int unfreeable = 0; void *addr = page_address(page); - while (get_object_counter(page) - unfreeable > 0) { + while (page->inuse - unfreeable > 0) { void *p; for (p = addr; p < addr + s->objects; p+= s->size) { @@ -896,7 +834,7 @@ */ SetPageActive(page); - if (get_object_counter(page) < s->objects && move_object) + if (page->inuse < s->objects && move_object) if (move_slab_objects(s, page, move_object) == 0) slabs_freed++; @@ -960,7 +898,7 @@ spin_lock_irqsave(&s->list_lock, flags); list_for_each_entry(page, list, lru) - count += get_object_counter(page); + count += page->inuse; spin_unlock_irqrestore(&s->list_lock, flags); return count; } @@ -981,7 +919,7 @@ if (page) { nr_active++; - active += get_object_counter(page); + active += page->inuse; } }