Index: linux-2.6.19-rc2-mm1/mm/hugetlb.c =================================================================== --- linux-2.6.19-rc2-mm1.orig/mm/hugetlb.c 2006-10-18 21:24:03.990964479 -0500 +++ linux-2.6.19-rc2-mm1/mm/hugetlb.c 2006-10-18 21:26:23.144479786 -0500 @@ -30,7 +30,7 @@ static unsigned int free_huge_pages_node * Number of pages taken from the pool that we have not processed yet. * This is used to avoid falsely reporting OOM conditions. */ -static unsigned int tentative_huge_pages; /* = 0 */ +static atomic_t tentative_huge_pages; /* = 0 */ /* * Protects updates to hugepage_freelists, nr_huge_pages, free_huge_pages and @@ -39,20 +39,6 @@ static unsigned int tentative_huge_pages static DEFINE_SPINLOCK(hugetlb_lock); -static int page_tentative(struct page *page) -{ - BUG_ON(!PageCompound(page)); - BUG_ON(page != (struct page *)page_private(page)); - return atomic_read(&page[1]._count); -} - -static void set_page_tentative(struct page *page, int value) -{ - BUG_ON(!PageCompound(page)); - BUG_ON(page != (struct page *)page_private(page)); - atomic_set(&page[1]._count, value); -} - static void clear_huge_page(struct page *page, unsigned long addr) { int i; @@ -116,14 +102,11 @@ static void free_huge_page(struct page * INIT_LIST_HEAD(&page->lru); spin_lock(&hugetlb_lock); - switch (page_tentative(page)) { - case 2: - /* tentative and from the reserved pool */ - resv_huge_pages++; - /* FALLTHROUGH */ - case 1: - /* tentative */ - tentative_huge_pages--; + if (!TestSetPageUptodate(page)) { + if (PageReferenced(page)) + resv_huge_pages++; + else + atomic_inc(&tentative_huge_pages); } enqueue_huge_page(page); spin_unlock(&hugetlb_lock); @@ -163,14 +146,14 @@ static struct page *alloc_huge_page(stru * attempting to instantiate the same page in a shared * mapping. */ if (! resv_huge_pages ) { - BUG_ON(! tentative_huge_pages); + BUG_ON(! atomic_read(&tentative_huge_pages)); spin_unlock(&hugetlb_lock); return ERR_PTR(-EAGAIN); } resv_huge_pages--; } else if (free_huge_pages <= resv_huge_pages) { /* Draw from the non-reserved free pool */ - if (tentative_huge_pages) + if (atomic_read(&tentative_huge_pages)) page = ERR_PTR(-EAGAIN); else page = ERR_PTR(-ENOMEM); @@ -181,21 +164,21 @@ static struct page *alloc_huge_page(stru page = dequeue_huge_page(vma, addr); BUG_ON(! page); /* We've already checked the counts */ - tentative_huge_pages++; - set_page_tentative(page, (vma->vm_flags & VM_MAYSHARE) ? 2 : 1); - spin_unlock(&hugetlb_lock); + ClearPageUptodate(page); + atomic_inc(&tentative_huge_pages); + if (vma->vm_flags & VM_MAYSHARE) + SetPageReferenced(page); + else + ClearPageReferenced(page); set_page_refcounted(page); + spin_unlock(&hugetlb_lock); return page; } static void keep_huge_page(struct page *page) { - spin_lock(&hugetlb_lock); - if (page_tentative(page)) { - tentative_huge_pages--; - set_page_tentative(page, 0); - } - spin_unlock(&hugetlb_lock); + if (!TestSetPageUptodate(page)) + atomic_dec(&tentative_huge_pages); } static int __init hugetlb_init(void) Index: linux-2.6.19-rc2-mm1/include/linux/page-flags.h =================================================================== --- linux-2.6.19-rc2-mm1.orig/include/linux/page-flags.h 2006-10-18 21:24:04.004637525 -0500 +++ linux-2.6.19-rc2-mm1/include/linux/page-flags.h 2006-10-18 21:25:06.078311240 -0500 @@ -139,6 +139,7 @@ static inline void SetPageUptodate(struc #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) #endif #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) +#define TestSetPageUptodate(page) test_and_set_bit(PG_uptodate, &(page)->flags) #define PageDirty(page) test_bit(PG_dirty, &(page)->flags) #define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)