diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/drivers/block/cfq-iosched.c linux-2.6.7-ck6/drivers/block/cfq-iosched.c --- linux-2.6.7-ck5/drivers/block/cfq-iosched.c 2004-07-30 15:18:13.576849634 +1000 +++ linux-2.6.7-ck6/drivers/block/cfq-iosched.c 2004-07-30 15:18:22.018607536 +1000 @@ -59,6 +59,14 @@ struct cfq_data { unsigned int max_queued; mempool_t *crq_pool; + + request_queue_t *queue; + + /* + * tunables + */ + unsigned int cfq_quantum; + unsigned int cfq_queued; }; struct cfq_queue { @@ -89,7 +97,8 @@ struct cfq_rq { static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq); static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid); -static void cfq_dispatch_sort(struct list_head *head, struct cfq_rq *crq); +static void cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_rq *crq); /* * lots of deadline iosched dupes, can be abstracted later... @@ -206,8 +215,7 @@ retry: return; } - cfq_del_crq_rb(cfqq, __alias); - cfq_dispatch_sort(cfqd->dispatch, __alias); + cfq_dispatch_sort(cfqd, cfqq, __alias); goto retry; } @@ -321,11 +329,16 @@ cfq_merged_requests(request_queue_t *q, cfq_remove_request(q, next); } -static void cfq_dispatch_sort(struct list_head *head, struct cfq_rq *crq) +static void +cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_rq *crq) { - struct list_head *entry = head; + struct list_head *head = cfqd->dispatch, *entry = head; struct request *__rq; + cfq_del_crq_rb(cfqq, crq); + cfq_remove_merge_hints(cfqd->queue, crq); + if (!list_empty(head)) { __rq = list_entry_rq(head->next); @@ -352,9 +365,7 @@ __cfq_dispatch_requests(request_queue_t { struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list)); - cfq_del_crq_rb(cfqq, crq); - cfq_remove_merge_hints(q, crq); - cfq_dispatch_sort(cfqd->dispatch, crq); + cfq_dispatch_sort(cfqd, cfqq, crq); } static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd) @@ -385,7 +396,7 @@ restart: ret = 1; } - if ((queued < cfq_quantum) && good_queues) + if ((queued < cfqd->cfq_quantum) && good_queues) goto restart; return ret; @@ -445,32 +456,61 @@ static void cfq_put_queue(struct cfq_dat mempool_free(cfqq, cfq_mpool); } -static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid, int mask) +static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid, + int gfp_mask) { const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT); - struct cfq_queue *cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval); + struct cfq_queue *cfqq, *new_cfqq = NULL; + request_queue_t *q = cfqd->queue; - if (!cfqq) { - cfqq = mempool_alloc(cfq_mpool, mask); +retry: + cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval); - if (cfqq) { - INIT_LIST_HEAD(&cfqq->cfq_hash); - INIT_LIST_HEAD(&cfqq->cfq_list); - RB_CLEAR_ROOT(&cfqq->sort_list); - - cfqq->pid = pid; - cfqq->queued[0] = cfqq->queued[1] = 0; - list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); - } + if (!cfqq) { + if (new_cfqq) { + cfqq = new_cfqq; + new_cfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + spin_unlock_irq(q->queue_lock); + new_cfqq = mempool_alloc(cfq_mpool, gfp_mask); + spin_lock_irq(q->queue_lock); + goto retry; + } else + return NULL; + + INIT_LIST_HEAD(&cfqq->cfq_hash); + INIT_LIST_HEAD(&cfqq->cfq_list); + RB_CLEAR_ROOT(&cfqq->sort_list); + + cfqq->pid = pid; + cfqq->queued[0] = cfqq->queued[1] = 0; + list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); } + if (new_cfqq) + mempool_free(new_cfqq, cfq_mpool); + + return cfqq; +} + +static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid, + int gfp_mask) +{ + request_queue_t *q = cfqd->queue; + struct cfq_queue *cfqq; + + spin_lock_irq(q->queue_lock); + cfqq = __cfq_get_queue(cfqd, pid, gfp_mask); + spin_unlock_irq(q->queue_lock); + return cfqq; } static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq) { - struct cfq_queue *cfqq = cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC); + struct cfq_queue *cfqq; + cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC); if (cfqq) { cfq_add_crq_rb(cfqd, cfqq, crq); @@ -565,7 +605,7 @@ static int cfq_may_queue(request_queue_t cfqq = cfq_find_cfq_hash(cfqd, current->tgid); if (cfqq) { - int limit = (q->nr_requests - cfq_queued) / cfqd->busy_queues; + int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues; if (limit < 3) limit = 3; @@ -583,6 +623,8 @@ static void cfq_put_request(request_queu { struct cfq_data *cfqd = q->elevator.elevator_data; struct cfq_rq *crq = RQ_DATA(rq); + struct request_list *rl; + int other_rw; if (crq) { BUG_ON(q->last_merge == rq); @@ -591,6 +633,23 @@ static void cfq_put_request(request_queu mempool_free(crq, cfqd->crq_pool); rq->elevator_private = NULL; } + + /* + * work-around for may_queue "bug": if a read gets issued and refused + * to queue because writes ate all the allowed slots and no other + * reads are pending for this queue, it could get stuck infinitely + * since freed_request() only checks the waitqueue for writes when + * freeing them. or vice versa for a single write vs many reads. + * so check here whether "the other" data direction might be able + * to queue and wake them + */ + rl = &q->rq; + other_rw = rq_data_dir(rq) ^ 1; + if (rl->count[other_rw] <= q->nr_requests) { + smp_mb(); + if (waitqueue_active(&rl->wait[other_rw])) + wake_up(&rl->wait[other_rw]); + } } static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) @@ -608,6 +667,7 @@ static int cfq_set_request(request_queue crq = mempool_alloc(cfqd->crq_pool, gfp_mask); if (crq) { + memset(crq, 0, sizeof(*crq)); RB_CLEAR(&crq->rb_node); crq->request = rq; crq->cfq_queue = NULL; @@ -661,6 +721,7 @@ static int cfq_init(request_queue_t *q, cfqd->dispatch = &q->queue_head; e->elevator_data = cfqd; + cfqd->queue = q; /* * just set it to some high value, we want anyone to be able to queue @@ -669,6 +730,9 @@ static int cfq_init(request_queue_t *q, cfqd->max_queued = q->nr_requests; q->nr_requests = 8192; + cfqd->cfq_queued = cfq_queued; + cfqd->cfq_quantum = cfq_quantum; + return 0; out_crqpool: kfree(cfqd->cfq_hash); @@ -703,8 +767,110 @@ static int __init cfq_slab_setup(void) subsys_initcall(cfq_slab_setup); +/* + * sysfs parts below --> + */ +struct cfq_fs_entry { + struct attribute attr; + ssize_t (*show)(struct cfq_data *, char *); + ssize_t (*store)(struct cfq_data *, const char *, size_t); +}; + +static ssize_t +cfq_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +cfq_var_store(unsigned int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtoul(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR) \ +static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ +{ \ + return cfq_var_show(__VAR, (page)); \ +} +SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum); +SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ +static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ +{ \ + int ret = cfq_var_store(__PTR, (page), count); \ + if (*(__PTR) < (MIN)) \ + *(__PTR) = (MIN); \ + else if (*(__PTR) > (MAX)) \ + *(__PTR) = (MAX); \ + return ret; \ +} +STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX); +STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX); +#undef STORE_FUNCTION + +static struct cfq_fs_entry cfq_quantum_entry = { + .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_quantum_show, + .store = cfq_quantum_store, +}; +static struct cfq_fs_entry cfq_queued_entry = { + .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_queued_show, + .store = cfq_queued_store, +}; + +static struct attribute *default_attrs[] = { + &cfq_quantum_entry.attr, + &cfq_queued_entry.attr, + NULL, +}; + +#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr) + +static ssize_t +cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + elevator_t *e = container_of(kobj, elevator_t, kobj); + struct cfq_fs_entry *entry = to_cfq(attr); + + if (!entry->show) + return 0; + + return entry->show(e->elevator_data, page); +} + +static ssize_t +cfq_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + elevator_t *e = container_of(kobj, elevator_t, kobj); + struct cfq_fs_entry *entry = to_cfq(attr); + + if (!entry->store) + return -EINVAL; + + return entry->store(e->elevator_data, page, length); +} + +static struct sysfs_ops cfq_sysfs_ops = { + .show = cfq_attr_show, + .store = cfq_attr_store, +}; + +struct kobj_type cfq_ktype = { + .sysfs_ops = &cfq_sysfs_ops, + .default_attrs = default_attrs, +}; + elevator_t iosched_cfq = { .elevator_name = "cfq", + .elevator_ktype = &cfq_ktype, .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/include/linux/swap.h linux-2.6.7-ck6/include/linux/swap.h --- linux-2.6.7-ck5/include/linux/swap.h 2004-07-30 15:18:13.824813136 +1000 +++ linux-2.6.7-ck6/include/linux/swap.h 2004-06-16 17:35:46.000000000 +1000 @@ -175,7 +175,6 @@ extern void swap_setup(void); extern int try_to_free_pages(struct zone **, unsigned int, unsigned int); extern int shrink_all_memory(int); extern int vm_swappiness; -extern int vm_autoregulate; #ifdef CONFIG_MMU /* linux/mm/shmem.c */ diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/include/linux/sysctl.h linux-2.6.7-ck6/include/linux/sysctl.h --- linux-2.6.7-ck5/include/linux/sysctl.h 2004-07-30 15:18:13.825812988 +1000 +++ linux-2.6.7-ck6/include/linux/sysctl.h 2004-07-30 15:18:22.080598419 +1000 @@ -166,7 +166,6 @@ enum VM_LAPTOP_MODE=23, /* vm laptop mode */ VM_BLOCK_DUMP=24, /* block dump mode */ VM_HUGETLB_GROUP=25, /* permitted hugetlb group */ - VM_AUTOREGULATE=26, /* swappiness and inactivation autoregulated */ }; diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/init/main.c linux-2.6.7-ck6/init/main.c --- linux-2.6.7-ck5/init/main.c 2004-07-30 15:18:13.834811664 +1000 +++ linux-2.6.7-ck6/init/main.c 2004-06-16 17:35:46.000000000 +1000 @@ -314,19 +314,8 @@ static void __init smp_init(void) #define smp_init() do { } while (0) #endif -unsigned long cache_decay_ticks; static inline void setup_per_cpu_areas(void) { } -static void smp_prepare_cpus(unsigned int maxcpus) -{ - /* - * Generic 2 tick cache_decay for uniprocessor. - * FIXME - True cache decay ticks should be extracted from all the - * arch dependant SMP code to be set on UP as well. - */ - cache_decay_ticks = 2; - printk("Generic cache decay timeout: %ld msecs.\n", - (cache_decay_ticks * 1000 / HZ)); -} +static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/kernel/sched.c linux-2.6.7-ck6/kernel/sched.c --- linux-2.6.7-ck5/kernel/sched.c 2004-07-30 15:18:13.839810928 +1000 +++ linux-2.6.7-ck6/kernel/sched.c 2004-07-30 15:18:22.085597684 +1000 @@ -188,30 +188,6 @@ static inline void rq_unlock(runqueue_t spin_unlock_irq(&rq->lock); } -static int rr_interval(task_t * p) -{ - int rr_interval = RR_INTERVAL(); - if (batch_task(p)) - rr_interval *= 10; - else if (iso_task(p)) - rr_interval /= 2; - if (!rr_interval) - rr_interval = 1; - return rr_interval; -} - -static int task_preempts_curr(struct task_struct *p, runqueue_t *rq) -{ - if (p->prio >= rq->curr->prio) - return 0; - if (!sched_compute || rq->cache_ticks >= cache_decay_ticks || - rt_task(p) || !p->mm || rq->curr == rq->idle || - (batch_task(rq->curr) && !batch_task(p))) - return 1; - rq->preempted = 1; - return 0; -} - static inline int task_queued(task_t *task) { return !list_empty(&task->run_list); @@ -268,16 +244,13 @@ static inline void __activate_idle_task( */ static unsigned int burst(task_t *p) { - unsigned int task_user_prio; - if (rt_task(p)) - return p->burst; - task_user_prio = TASK_USER_PRIO(p); - if (iso_task(p)) - task_user_prio /= 2; - if (likely(task_user_prio < 40)) + if (likely(!rt_task(p))) { + unsigned int task_user_prio = TASK_USER_PRIO(p); + if (iso_task(p)) + task_user_prio /= 2; return 39 - task_user_prio; - else - return 0; + } else + return p->burst; } static void inc_burst(task_t *p) @@ -301,10 +274,10 @@ static void dec_burst(task_t *p) static unsigned int slice(task_t *p) { unsigned int slice = RR_INTERVAL(); - if (!rt_task(p)) + if (likely(!rt_task(p) && !batch_task(p))) slice += burst(p) * RR_INTERVAL(); - if (batch_task(p)) - slice *= 10; + else if (batch_task(p)) + slice *= 40 - TASK_USER_PRIO(p); return slice; } @@ -313,6 +286,18 @@ static unsigned int slice(task_t *p) */ int sched_interactive = 1; +static int rr_interval(task_t * p) +{ + int rr_interval = RR_INTERVAL(); + if (batch_task(p)) + rr_interval *= 10; + else if (iso_task(p)) + rr_interval /= 2; + if (!rr_interval) + rr_interval = 1; + return rr_interval; +} + /* * effective_prio - dynamic priority dependent on burst. * The priority normally decreases by one each RR_INTERVAL. @@ -406,7 +391,7 @@ static void activate_task(task_t *p, run p->flags &= ~PF_UISLEEP; p->time_slice = rr_interval(p); if (batch_task(p)) - p->time_slice *= 10; + p->time_slice = p->slice; p->timestamp = now; __activate_task(p, rq); } @@ -624,6 +609,26 @@ static inline int wake_idle(int cpu, tas } #endif +/* + * cache_delay is the time preemption is delayed in sched_compute mode + * and is set to 5*cache_decay_ticks + */ +static int cache_delay; + +static int task_preempts_curr(struct task_struct *p, runqueue_t *rq) +{ + if (p->prio > rq->curr->prio) + return 0; + if (p->prio == rq->curr->prio && (p->slice < slice(p) || + rt_task(rq->curr) || !sched_interactive)) + return 0; + if (!sched_compute || rq->cache_ticks >= cache_delay || + !p->mm || rt_task(p)) + return 1; + rq->preempted = 1; + return 0; +} + /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread @@ -1822,7 +1827,7 @@ void scheduler_tick(int user_ticks, int enqueue_task(p, rq); goto out_unlock; } - if (rq->preempted && rq->cache_ticks >= cache_decay_ticks) + if (rq->preempted && rq->cache_ticks >= cache_delay) set_tsk_need_resched(p); out_unlock: spin_unlock(&rq->lock); @@ -2696,7 +2701,7 @@ asmlinkage long sys_sched_yield(void) dequeue_task(current, rq); current->slice = slice(current); current->time_slice = RR_INTERVAL(); - if (!rt_task(current) && !batch_task(current)) { + if (likely(!rt_task(current) && !batch_task(current))) { current->flags |= PF_YIELDED; current->prio = MAX_PRIO - 2; } @@ -3620,7 +3625,8 @@ void __init sched_init(void) for (i = 0; i < NR_CPUS; i++) { rq = cpu_rq(i); spin_lock_init(&rq->lock); - + + cache_delay = 10 * HZ / 1000; rq->cache_ticks = 0; rq->preempted = 0; @@ -3631,6 +3637,7 @@ void __init sched_init(void) rq->push_cpu = 0; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); + cache_delay = cache_decay_ticks * 5; #endif atomic_set(&rq->nr_iowait, 0); for (j = 0; j <= MAX_PRIO; j++) diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/kernel/sysctl.c linux-2.6.7-ck6/kernel/sysctl.c --- linux-2.6.7-ck5/kernel/sysctl.c 2004-07-30 15:18:13.840810781 +1000 +++ linux-2.6.7-ck6/kernel/sysctl.c 2004-07-30 15:18:22.086597537 +1000 @@ -743,14 +743,6 @@ static ctl_table vm_table[] = { .extra1 = &zero, .extra2 = &one_hundred, }, - { - .ctl_name = VM_AUTOREGULATE, - .procname = "autoregulate", - .data = &vm_autoregulate, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, #ifdef CONFIG_HUGETLB_PAGE { .ctl_name = VM_HUGETLB_PAGES, diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/Makefile linux-2.6.7-ck6/Makefile --- linux-2.6.7-ck5/Makefile 2004-07-30 15:18:13.882804600 +1000 +++ linux-2.6.7-ck6/Makefile 2004-07-30 15:18:22.087597390 +1000 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 7 -EXTRAVERSION =-ck5 +EXTRAVERSION =-ck6 NAME=Zonked Quokka # *DOCUMENTATION* diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-ck5/mm/vmscan.c linux-2.6.7-ck6/mm/vmscan.c --- linux-2.6.7-ck5/mm/vmscan.c 2004-07-30 15:18:13.917799449 +1000 +++ linux-2.6.7-ck6/mm/vmscan.c 2004-07-30 15:18:22.088597243 +1000 @@ -42,9 +42,7 @@ /* * From 0 .. 100. Higher means more swappy. */ -int vm_swappiness = 60; -int vm_autoregulate = 1; -static int app_percent = 1; +int vm_swappiness = 33; static long total_memory; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) @@ -648,11 +646,8 @@ refill_inactive_zone(struct zone *zone, struct pagevec pvec; int reclaim_mapped = 0; long mapped_ratio; - long distress; long swap_tendency; - struct sysinfo i; - si_meminfo(&i); lru_add_drain(); pgmoved = 0; spin_lock_irq(&zone->lru_lock); @@ -682,12 +677,6 @@ refill_inactive_zone(struct zone *zone, spin_unlock_irq(&zone->lru_lock); /* - * `distress' is a measure of how much trouble we're having reclaiming - * pages. 0 -> no problems. 100 -> great trouble. - */ - distress = 100 >> zone->prev_priority; - - /* * The point of this algorithm is to decide when to start reclaiming * mapped memory instead of just pagecache. Work out how much memory * is mapped. @@ -695,51 +684,12 @@ refill_inactive_zone(struct zone *zone, mapped_ratio = (sc->nr_mapped * 100) / total_memory; /* - * app_percent is the percentage of physical ram used - * by application pages. - */ - si_meminfo(&i); -#ifdef CONFIG_SWAP - app_percent = 100 - ((i.freeram + get_page_cache_size() - - swapper_space.nrpages) / (i.totalram / 100)); - - if (vm_autoregulate) { - si_swapinfo(&i); - - if (likely(i.totalswap >= 100)) { - int swap_centile; - - - /* - * swap_centile is the percentage of the last (sizeof physical - * ram) of swap free. - */ - swap_centile = i.freeswap / - (min(i.totalswap, i.totalram) / 100); - /* - * Autoregulate vm_swappiness to be equal to the lowest of - * app_percent and swap_centile. Bias it downwards -ck - */ - vm_swappiness = min(app_percent, swap_centile); - vm_swappiness = vm_swappiness * vm_swappiness / 100; - } else - vm_swappiness = 0; - } -#else - app_percent = 100 - ((i.freeram + get_page_cache_size()) / - (i.totalram / 100)); -#endif - - /* * Now decide how much we really want to unmap some pages. The mapped * ratio is downgraded - just because there's a lot of mapped memory * doesn't necessarily mean that page reclaim isn't succeeding. - * - * The distress ratio is important - we don't want to start going oom. - * * A 100% value of vm_swappiness overrides this algorithm altogether. */ - swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; + swap_tendency = mapped_ratio + vm_swappiness; /* * Now use this metric to decide whether to start moving mapped memory @@ -838,16 +788,11 @@ refill_inactive_zone(struct zone *zone, static void shrink_zone(struct zone *zone, struct scan_control *sc) { - unsigned long scan_active, scan_inactive, biased_active; - int count, biased_ap; + unsigned long scan_active, scan_inactive; + int count; scan_inactive = (zone->nr_active + zone->nr_inactive) >> sc->priority; - if (vm_autoregulate) { - biased_ap = app_percent * app_percent / 100; - biased_active = zone->nr_active / (101 - biased_ap) * 100; - } else - biased_active = zone->nr_active; /* * Try to keep the active list 2/3 of the size of the cache. And * make sure that refill_inactive is given a decent number of pages. @@ -858,7 +803,7 @@ shrink_zone(struct zone *zone, struct sc * `scan_active' just to make sure that the kernel will slowly sift * through the active list. */ - if (biased_active >= 4*(zone->nr_inactive*2 + 1)) { + if (zone->nr_active >= 4*(zone->nr_inactive*2 + 1)) { /* Don't scan more than 4 times the inactive list scan size */ scan_active = 4*scan_inactive; } else { @@ -866,7 +811,7 @@ shrink_zone(struct zone *zone, struct sc /* Cast to long long so the multiply doesn't overflow */ - tmp = (unsigned long long)scan_inactive * biased_active; + tmp = (unsigned long long)scan_inactive * zone->nr_active; do_div(tmp, zone->nr_inactive*2 + 1); scan_active = (unsigned long)tmp; }