From: David Gibson While I was adjusting accordingly, I noticed a broken BUG_ON() which could trigger if you reduced the size of the hugepage pool while pages were reserved. The revised patch below fixes both problems. Signed-off-by: David Gibson Signed-off-by: Andrew Morton --- mm/hugetlb.c | 20 +++++--------------- 1 files changed, 5 insertions(+), 15 deletions(-) diff -puN mm/hugetlb.c~hugepage-strict-page-reservation-for-hugepage-inodes-fix mm/hugetlb.c --- devel/mm/hugetlb.c~hugepage-strict-page-reservation-for-hugepage-inodes-fix 2006-02-28 17:03:55.000000000 -0800 +++ devel-akpm/mm/hugetlb.c 2006-02-28 17:03:55.000000000 -0800 @@ -181,10 +181,7 @@ int hugetlb_extend_reservation(struct hu unsigned long atleast) { struct inode *inode = &info->vfs_inode; - struct address_space *mapping = inode->i_mapping; - unsigned long idx; unsigned long change_in_reserve = 0; - struct page *page; int ret = 0; spin_lock(&hugetlb_lock); @@ -193,19 +190,12 @@ int hugetlb_extend_reservation(struct hu if (info->prereserved_hpages >= atleast) goto out; - /* prereserved_hpages stores the number of pages already - * guaranteed (reserved or instantiated) for this inode. - * Count how many extra pages we need to reserve. */ - for (idx = info->prereserved_hpages; idx < atleast; idx++) { - page = radix_tree_lookup(&mapping->page_tree, idx); - if (!page) - /* Pages which are already instantiated don't - * need to be reserved */ - change_in_reserve++; - } + /* Because we always call this on shared mappings, none of the + * pages beyond info->prereserved_hpages can have been + * instantiated, so we need to reserve all of them now. */ + change_in_reserve = atleast - info->prereserved_hpages; - BUG_ON(reserved_huge_pages > free_huge_pages); - if (change_in_reserve > (free_huge_pages-reserved_huge_pages)) { + if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) { ret = -ENOMEM; goto out; } _