--- fs/bio.c | 7 ++++--- include/linux/mm_inline.h | 15 +++++++++++++++ mm/page_alloc.c | 1 + mm/vmscan.c | 43 ++++++++++++++++++++++++++++++++++++------- 4 files changed, 56 insertions(+), 10 deletions(-) Index: linux-2.6.21-rc7/mm/vmscan.c =================================================================== --- linux-2.6.21-rc7.orig/mm/vmscan.c 2007-04-23 18:00:34.000000000 -0700 +++ linux-2.6.21-rc7/mm/vmscan.c 2007-04-23 20:10:27.000000000 -0700 @@ -627,6 +627,16 @@ static unsigned long isolate_lru_pages(u struct page *page; unsigned long scan; + if (list_empty(src)) + printk(KERN_CRIT "isolate_lru_pages() list empty!\n"); + else { + int count = 0; + struct list_head *cursor; + + list_for_each(cursor, src) + count++; + printk(KERN_CRIT "%d entries in list\n", count); + } for (scan = 0; scan < nr_to_scan && !list_empty(src); ) { struct list_head *target; page = lru_to_page(src); @@ -645,7 +655,13 @@ static unsigned long isolate_lru_pages(u ClearPageLRU(page); target = dst; nr_taken += compound_pages(page); + if (compound_order(page)) { + printk(KERN_CRIT "Took compound page off LRU %p flags=%lx mapping=%p order=%d\n", + page, page->flags, page->mapping, compound_order(page)); + } } /* else it is being freed elsewhere */ + else + printk(KERN_CRIT "page being freed elsewhere\n"); list_add(&page->lru, target); scan += compound_pages(page); @@ -667,6 +683,7 @@ static unsigned long shrink_inactive_lis unsigned long nr_scanned = 0; unsigned long nr_reclaimed = 0; + printk(KERN_CRIT "shrink_inactive_list(%lu)\n", max_scan); pagevec_init(&pvec, 1); lru_add_drain(); @@ -682,6 +699,7 @@ static unsigned long shrink_inactive_lis &page_list, &nr_scan); __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken); zone->pages_scanned += nr_scan; + printk("Took %ld pages off the inctive list\n", nr_taken); spin_unlock_irq(&zone->lru_lock); nr_scanned += nr_scan; @@ -775,6 +793,7 @@ static void shrink_active_list(unsigned struct pagevec pvec; int reclaim_mapped = 0; + printk(KERN_CRIT "shrink_active_list(%lu) priority=%d\n", nr_pages, priority); if (sc->may_swap) { long mapped_ratio; long distress; @@ -859,6 +878,7 @@ force_reclaim_mapped: list_move(&page->lru, &zone->inactive_list); pgmoved += compound_pages(page); if (!pagevec_add(&pvec, page)) { + printk(KERN_ERR "Moved %ld off inactive\n", pgmoved); __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); spin_unlock_irq(&zone->lru_lock); pgdeactivate += pgmoved; @@ -869,6 +889,7 @@ force_reclaim_mapped: spin_lock_irq(&zone->lru_lock); } } + printk(KERN_CRIT "Moved %ld off inactive\n", pgmoved); __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); pgdeactivate += pgmoved; if (buffer_heads_over_limit) { @@ -887,6 +908,7 @@ force_reclaim_mapped: list_move(&page->lru, &zone->active_list); pgmoved += compound_pages(page); if (!pagevec_add(&pvec, page)) { + printk(KERN_ERR "Moved %ld to active\n", pgmoved); __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); pgmoved = 0; spin_unlock_irq(&zone->lru_lock); @@ -894,6 +916,7 @@ force_reclaim_mapped: spin_lock_irq(&zone->lru_lock); } } + printk(KERN_CRIT "Moved %ld to active\n", pgmoved); __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); __count_zone_vm_events(PGREFILL, zone, pgscanned); @@ -909,11 +932,14 @@ force_reclaim_mapped: static unsigned long shrink_zone(int priority, struct zone *zone, struct scan_control *sc) { - unsigned long nr_active; - unsigned long nr_inactive; + long nr_active; + long nr_inactive; unsigned long nr_to_scan; unsigned long nr_reclaimed = 0; + printk(KERN_CRIT "entering shrink_zone(%d,zone_idx=%d z_active=%ld z_inactive=%ld\n", + priority, zone_idx(zone), zone->nr_scan_active, zone->nr_scan_active); + atomic_inc(&zone->reclaim_in_progress); /* @@ -936,17 +962,20 @@ static unsigned long shrink_zone(int pri else nr_inactive = 0; - while (nr_active || nr_inactive) { - if (nr_active) { + while (nr_active > 0 || nr_inactive > 0) { + printk(KERN_CRIT "Shrink zone: nr_active=%ld nr_inactive=%ld NR_ACTIVE=%ld NR_INACTIVE=%ld\n", nr_active, nr_inactive, zone_page_state(zone, NR_ACTIVE), zone_page_state(zone, NR_INACTIVE)); + if (nr_active > 0) { + if (list_empty(&zone->active_list)) + panic("Active counter mismatch.\n"); nr_to_scan = min(nr_active, - (unsigned long)sc->swap_cluster_max); + (long)sc->swap_cluster_max); nr_active -= nr_to_scan; shrink_active_list(nr_to_scan, zone, sc, priority); } - if (nr_inactive) { + if (nr_inactive > 0) { nr_to_scan = min(nr_inactive, - (unsigned long)sc->swap_cluster_max); + (long)sc->swap_cluster_max); nr_inactive -= nr_to_scan; nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, sc); Index: linux-2.6.21-rc7/fs/bio.c =================================================================== --- linux-2.6.21-rc7.orig/fs/bio.c 2007-04-23 18:21:05.000000000 -0700 +++ linux-2.6.21-rc7/fs/bio.c 2007-04-23 18:36:28.000000000 -0700 @@ -306,7 +306,8 @@ int bio_get_nr_vecs(struct block_device request_queue_t *q = bdev_get_queue(bdev); int nr_pages; - nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; + nr_pages = page_cache_next(bdev->bd_inode->i_mapping, + q->max_sectors << 9); if (nr_pages > q->max_phys_segments) nr_pages = q->max_phys_segments; if (nr_pages > q->max_hw_segments) @@ -911,7 +912,7 @@ void bio_set_pages_dirty(struct bio *bio for (i = 0; i < bio->bi_vcnt; i++) { struct page *page = bvec[i].bv_page; - if (page && !PageCompound(page)) + if (page) set_page_dirty_lock(page); } } @@ -978,7 +979,7 @@ void bio_check_pages_dirty(struct bio *b for (i = 0; i < bio->bi_vcnt; i++) { struct page *page = bvec[i].bv_page; - if (PageDirty(page) || PageCompound(page)) { + if (PageDirty(page)) { page_cache_release(page); bvec[i].bv_page = NULL; } else { Index: linux-2.6.21-rc7/include/linux/mm_inline.h =================================================================== --- linux-2.6.21-rc7.orig/include/linux/mm_inline.h 2007-04-23 19:38:42.000000000 -0700 +++ linux-2.6.21-rc7/include/linux/mm_inline.h 2007-04-23 19:40:14.000000000 -0700 @@ -3,6 +3,9 @@ add_page_to_active_list(struct zone *zon { list_add(&page->lru, &zone->active_list); __inc_zone_page_state(page, NR_ACTIVE); + if (compound_order(page)) + printk(KERN_CRIT "Add compound page to active %p flags=%lx mapping=%p order=%d\n", + page, page->flags, page->mapping, compound_order(page)); } static inline void @@ -10,6 +13,9 @@ add_page_to_inactive_list(struct zone *z { list_add(&page->lru, &zone->inactive_list); __inc_zone_page_state(page, NR_INACTIVE); + if (compound_order(page)) + printk(KERN_CRIT "Add compound page to inactive %p flags=%lx mapping=%p order=%d\n", + page, page->flags, page->mapping, compound_order(page)); } static inline void @@ -17,6 +23,9 @@ del_page_from_active_list(struct zone *z { list_del(&page->lru); __dec_zone_page_state(page, NR_ACTIVE); + if (compound_order(page)) + printk(KERN_CRIT "Del compound page from active %p flags=%lx mapping=%p order=%d\n", + page, page->flags, page->mapping, compound_order(page)); } static inline void @@ -24,6 +33,9 @@ del_page_from_inactive_list(struct zone { list_del(&page->lru); __dec_zone_page_state(page, NR_INACTIVE); + if (compound_order(page)) + printk(KERN_CRIT "Del compound page from inactive %p flags=%lx mapping=%p order=%d\n", + page, page->flags, page->mapping, compound_order(page)); } static inline void @@ -36,5 +48,8 @@ del_page_from_lru(struct zone *zone, str } else { __dec_zone_page_state(page, NR_INACTIVE); } + if (compound_order(page)) + printk(KERN_CRIT "Del page from lru %p flags=%lx mapping=%p order=%d\n", + page, page->flags, page->mapping, compound_order(page)); } Index: linux-2.6.21-rc7/mm/page_alloc.c =================================================================== --- linux-2.6.21-rc7.orig/mm/page_alloc.c 2007-04-23 18:55:33.000000000 -0700 +++ linux-2.6.21-rc7/mm/page_alloc.c 2007-04-23 18:56:32.000000000 -0700 @@ -267,6 +267,7 @@ static void destroy_compound_page(struct __ClearPageTail(p); __ClearPageCompound(p); } + WARN_ON(order == 4); } static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)