--- mm/slub.c | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-18 12:25:40.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-18 12:25:43.000000000 -0700 @@ -100,6 +100,7 @@ */ #define FROZEN (1 << PG_active) +#define SLABPARTIAL (1 << PG_writeback) #ifdef CONFIG_SLUB_DEBUG #define SLABDEBUG (1 << PG_error) @@ -130,6 +131,24 @@ static inline void ClearSlabFrozen(struc } /* + * The partialbit is updated under list_lock + */ +static inline int SlabPartial(struct page *page) +{ + return page->flags & SLABPARTIAL; +} + +static inline void SetSlabPartial(struct page *page) +{ + page->flags |= SLABPARTIAL; +} + +static inline void ClearSlabPartial(struct page *page) +{ + page->flags &= ~SLABPARTIAL; +} + +/* * Slabdebug is only set on slab creation and destruction */ static inline int SlabDebug(struct page *page) @@ -844,7 +863,8 @@ static void add_full(struct kmem_cache * unsigned long flags; spin_lock_irqsave(&n->list_lock, flags); - list_add(&page->lru, &n->full); + if (!SlabPartial(page)) + list_add(&page->lru, &n->full); spin_unlock_irqrestore(&n->list_lock, flags); } @@ -1244,11 +1264,14 @@ static void add_partial(struct kmem_cach unsigned long flags; spin_lock_irqsave(&n->list_lock, flags); - n->nr_partial++; - if (tail) - list_add_tail(&page->lru, &n->partial); - else - list_add(&page->lru, &n->partial); + if (!SlabPartial(page)) { + SetSlabPartial(page); + n->nr_partial++; + if (tail) + list_add_tail(&page->lru, &n->partial); + else + list_add(&page->lru, &n->partial); + } spin_unlock_irqrestore(&n->list_lock, flags); } @@ -1259,10 +1282,12 @@ static void add_partial(struct kmem_cach */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { + BUG_ON(!SlabPartial(page)); if (slab_trylock(page)) { list_del(&page->lru); n->nr_partial--; SetSlabFrozen(page); + ClearSlabPartial(page); return 1; } return 0; @@ -2680,6 +2705,7 @@ int kmem_cache_shrink(struct kmem_cache * may have freed the last object and be * waiting to release the slab. */ + ClearSlabPartial(page); list_del(&page->lru); n->nr_partial--; slab_unlock(page);