Index: linux-2.6.21-rc1/mm/slub.c =================================================================== --- linux-2.6.21-rc1.orig/mm/slub.c 2007-02-25 20:11:47.000000000 -0800 +++ linux-2.6.21-rc1/mm/slub.c 2007-02-25 20:15:25.000000000 -0800 @@ -42,7 +42,7 @@ * to push back per cpu slabs if they are unused * for a longer time period. * - * PagePrivate Only a single object exists per slab. Objects are not + * PageError Only a single object exists per slab. Objects are not * cached instead we use the page allocator for * object allocation and freeing. */ @@ -404,7 +404,7 @@ static int on_freelist(struct kmem_cache void **object = page->freelist; void *origin = &page->lru; - if (PagePrivate(page)) + if (PageError(page)) return 0; check_slab(page); @@ -451,9 +451,7 @@ static void discard_slab(struct kmem_cac page->mapping = NULL; reset_page_mapcount(page); - __ClearPageSlab(page); - __ClearPagePrivate(page); - + page->flags &= ~(1 << PG_slab | 1 << PG_error); free_slab(s, page); } @@ -478,7 +476,7 @@ static struct page *new_slab(struct kmem atomic_long_inc(&n->nr_slabs); page->offset = s->offset; page->slab = s; - __SetPageSlab(page); + page->flags |= 1 << PG_slab; if (s->objects > 1) { void *start = page_address(page); @@ -496,7 +494,7 @@ static struct page *new_slab(struct kmem page->inuse = 0; check_free_chain(s, page); } else - __SetPagePrivate(page); + page->flags |= 1 << PG_error; out: if (flags & __GFP_WAIT) @@ -735,7 +733,7 @@ void kmem_cache_free(struct kmem_cache * #endif local_irq_save(flags); - if (unlikely(PagePrivate(page))) + if (unlikely(PageError(page))) goto single_object_slab; slab_lock(page);