diff -Naurp linux-2.6.11-ck1/drivers/block/cfq-iosched.c linux-2.6.11-ck2/drivers/block/cfq-iosched.c --- linux-2.6.11-ck1/drivers/block/cfq-iosched.c 2005-03-09 23:04:13.000000000 +1100 +++ linux-2.6.11-ck2/drivers/block/cfq-iosched.c 2005-03-09 23:04:22.000000000 +1100 @@ -41,10 +41,12 @@ static int cfq_slice_idle = HZ / 50; #define CFQ_IDLE_GRACE (HZ / 10) #define CFQ_SLICE_SCALE (5) +#define CFQ_KEY_ASYNC (0) + /* * disable queueing at the driver/hardware level */ -static int cfq_max_depth = 1; +static int cfq_max_depth = 2; /* * for the hash of cfqq inside the cfqd @@ -94,8 +96,7 @@ static kmem_cache_t *cfq_ioc_pool; #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) -#define CFQ_ASYNC (0) -#define CFQ_SYNC (1) +#define cfq_cfqq_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) /* * Per block device queue structure @@ -168,7 +169,6 @@ struct cfq_data { unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; unsigned int cfq_max_depth; - unsigned int cfq_write_fairness; }; /* @@ -186,17 +186,15 @@ struct cfq_queue { /* on either rr or empty list of cfqd */ struct list_head cfq_list; /* sorted list of pending requests */ - struct rb_root sort_list[2]; - /* slice data direction */ - int dir; + struct rb_root sort_list; /* if fifo isn't expired, next request to serve */ - struct cfq_rq *next_crq[2]; + struct cfq_rq *next_crq; /* requests queued in sort_list */ int queued[2]; /* currently allocated requests */ int allocated[2]; /* fifo list of requests in sort_list */ - struct list_head fifo[2]; + struct list_head fifo; unsigned long slice_start; unsigned long slice_end; @@ -383,7 +381,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, if (ON_RB(&last->rb_node)) rbnext = rb_next(&last->rb_node); else { - rbnext = rb_first(&cfqq->sort_list[cfqq->dir]); + rbnext = rb_first(&cfqq->sort_list); if (rbnext == &last->rb_node) rbnext = NULL; } @@ -401,10 +399,9 @@ cfq_find_next_crq(struct cfq_data *cfqd, static void cfq_update_next_crq(struct cfq_rq *crq) { struct cfq_queue *cfqq = crq->cfq_queue; - const int rw = crq->is_sync; - if (cfqq->next_crq[rw] == crq) - cfqq->next_crq[rw] = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); + if (cfqq->next_crq == crq) + cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); } static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) @@ -500,10 +497,10 @@ static inline void cfq_del_crq_rb(struct cfq_update_next_crq(crq); - rb_erase(&crq->rb_node, &cfqq->sort_list[sync]); + rb_erase(&crq->rb_node, &cfqq->sort_list); RB_CLEAR_COLOR(&crq->rb_node); - if (cfqq->on_rr && !(cfqq->queued[0] + cfqq->queued[1])) + if (cfqq->on_rr && RB_EMPTY(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); } } @@ -511,7 +508,7 @@ static inline void cfq_del_crq_rb(struct static struct cfq_rq * __cfq_add_crq_rb(struct cfq_rq *crq) { - struct rb_node **p = &crq->cfq_queue->sort_list[crq->is_sync].rb_node; + struct rb_node **p = &crq->cfq_queue->sort_list.rb_node; struct rb_node *parent = NULL; struct cfq_rq *__crq; @@ -537,7 +534,6 @@ static void cfq_add_crq_rb(struct cfq_rq struct cfq_data *cfqd = cfqq->cfqd; struct request *rq = crq->request; struct cfq_rq *__alias; - int dir = crq->is_sync; crq->rb_key = rq_rb_key(rq); cfqq->queued[crq->is_sync]++; @@ -549,7 +545,7 @@ static void cfq_add_crq_rb(struct cfq_rq while ((__alias = __cfq_add_crq_rb(crq)) != NULL) cfq_dispatch_sort(cfqd->queue, __alias); - rb_insert_color(&crq->rb_node, &cfqq->sort_list[dir]); + rb_insert_color(&crq->rb_node, &cfqq->sort_list); if (!cfqq->on_rr) cfq_add_cfqq_rr(cfqd, cfqq, crq->requeued); @@ -557,22 +553,21 @@ static void cfq_add_crq_rb(struct cfq_rq /* * check if this request is a better next-serve candidate */ - cfqq->next_crq[dir] = cfq_choose_req(cfqd, cfqq->next_crq[dir], crq); + cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); } static inline void cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) { if (ON_RB(&crq->rb_node)) { - rb_erase(&crq->rb_node, &cfqq->sort_list[crq->is_sync]); + rb_erase(&crq->rb_node, &cfqq->sort_list); cfqq->queued[crq->is_sync]--; } cfq_add_crq_rb(crq); } -static struct request * -cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector, int rw) +static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) { struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid); @@ -581,7 +576,7 @@ cfq_find_rq_rb(struct cfq_data *cfqd, se if (!cfqq) goto out; - n = cfqq->sort_list[rw].rb_node; + n = cfqq->sort_list.rb_node; while (n) { struct cfq_rq *crq = rb_entry_crq(n); @@ -624,8 +619,7 @@ static void cfq_requeue_request(request_ if (blk_fs_request(rq)) { struct cfq_queue *cfqq = crq->cfq_queue; - printk("crq requeued\n"); - cfqq->next_crq[crq->is_sync] = crq; + cfqq->next_crq = crq; crq->requeued = 1; cfq_enqueue(cfqd, rq); } else @@ -648,7 +642,6 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; - const int rw = bio_data_dir(bio); struct request *__rq; int ret; @@ -664,7 +657,7 @@ cfq_merge(request_queue_t *q, struct req goto out; } - __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio), rw); + __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); if (__rq && elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_FRONT_MERGE; goto out; @@ -726,14 +719,6 @@ __cfq_set_active_queue(struct cfq_data * cfqq->slice_left = 0; cfqq->must_alloc_slice = 0; cfqq->fifo_expire = 0; - - /* - * we reverse data direction on each slice. if the chosen - * direction is empty, switch again - */ - cfqq->dir ^= 1; - if (RB_EMPTY(&cfqq->sort_list[cfqq->dir])) - cfqq->dir ^= 1; } cfqd->active_queue = cfqq; @@ -872,7 +857,7 @@ static inline void cfq_slice_expired(str static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - WARN_ON(!RB_EMPTY(&cfqq->sort_list[cfqq->dir])); + WARN_ON(!RB_EMPTY(&cfqq->sort_list)); WARN_ON(cfqq != cfqd->active_queue); /* @@ -935,7 +920,7 @@ static void cfq_dispatch_sort(request_qu cfqd->last_sector = last; - cfqq->next_crq[crq->is_sync] = cfq_find_next_crq(cfqd, cfqq, crq); + cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); cfq_del_crq_rb(crq); cfq_remove_merge_hints(q, crq); @@ -959,10 +944,12 @@ static inline struct cfq_rq *cfq_check_f if (cfqq->fifo_expire) return NULL; - if (!list_empty(&cfqq->fifo[cfqq->dir])) { - crq = RQ_DATA(list_entry_fifo(cfqq->fifo[cfqq->dir].next)); + if (!list_empty(&cfqq->fifo)) { + int fifo = cfq_cfqq_sync(cfqq); + + crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next)); rq = crq->request; - if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[cfqq->dir])) { + if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { cfqq->fifo_expire = 1; return crq; } @@ -974,26 +961,30 @@ static inline struct cfq_rq *cfq_check_f /* * Scale schedule slice based on io priority */ -static inline int cfq_prio_to_slice(struct cfq_data *cfqd, int prio, int sync) +static inline int +cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - int base_slice = cfqd->cfq_slice[sync]; - int prio_index = IOPRIO_PRIO_DATA(prio); + const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; - return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio_index)); + WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); + + return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); } static inline void -cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq, int prio) +cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - cfqq->slice_end = cfq_prio_to_slice(cfqd, prio, cfqq->dir) + jiffies; + cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; } -static inline int cfq_prio_to_maxrq(struct cfq_data *cfqd, int prio) +static inline int +cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - int base_rq = cfqd->cfq_slice_async_rq; - int prio_index = IOPRIO_PRIO_DATA(prio); + const int base_rq = cfqd->cfq_slice_async_rq; - return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - prio_index)); + WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); + + return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); } /* @@ -1018,9 +1009,9 @@ static struct cfq_queue *cfq_select_queu * if queue has requests, dispatch one. if not, check if * enough slice is left to wait for one */ - if (!RB_EMPTY(&cfqq->sort_list[cfqq->dir])) + if (!RB_EMPTY(&cfqq->sort_list)) goto keep_queue; - else if (cfqq->dir == CFQ_SYNC && time_before(now, cfqq->slice_end)) { + else if (cfq_cfqq_sync(cfqq) && time_before(now, cfqq->slice_end)) { if (cfq_arm_slice_timer(cfqd, cfqq)) return NULL; } @@ -1036,11 +1027,10 @@ static int __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, int max_dispatch) { - int dispatched = 0, prio; + int dispatched = 0; - BUG_ON(RB_EMPTY(&cfqq->sort_list[cfqq->dir])); + BUG_ON(RB_EMPTY(&cfqq->sort_list)); - prio = cfqq->ioprio | (cfqq->ioprio_class << IOPRIO_CLASS_SHIFT); do { struct cfq_rq *crq; @@ -1048,7 +1038,7 @@ __cfq_dispatch_requests(struct cfq_data * follow expired path, else get first next available */ if ((crq = cfq_check_fifo(cfqq)) == NULL) - crq = cfqq->next_crq[cfqq->dir]; + crq = cfqq->next_crq; /* * finally, insert request into driver dispatch list @@ -1063,7 +1053,7 @@ __cfq_dispatch_requests(struct cfq_data cfqd->active_cic = crq->io_context; } - if (RB_EMPTY(&cfqq->sort_list[cfqq->dir])) + if (RB_EMPTY(&cfqq->sort_list)) break; } while (dispatched < max_dispatch); @@ -1073,14 +1063,14 @@ __cfq_dispatch_requests(struct cfq_data * sync, use the sync time slice value */ if (!cfqq->slice_end) - cfq_set_prio_slice(cfqd, cfqq, prio); + cfq_set_prio_slice(cfqd, cfqq); /* * expire an async queue immediately if it has used up its slice. idle * queue always expire after 1 dispatch round. */ - if ((cfqq->dir == CFQ_ASYNC && - cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, prio)) || + if ((!cfq_cfqq_sync(cfqq) && + cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || cfq_class_idle(cfqq)) cfq_slice_expired(cfqd, 0); @@ -1202,8 +1192,7 @@ static void cfq_put_queue(struct cfq_que if (!atomic_dec_and_test(&cfqq->ref)) return; - BUG_ON(rb_first(&cfqq->sort_list[READ])); - BUG_ON(rb_first(&cfqq->sort_list[WRITE])); + BUG_ON(rb_first(&cfqq->sort_list)); BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); BUG_ON(cfqq->on_rr); @@ -1420,12 +1409,9 @@ retry: INIT_HLIST_NODE(&cfqq->cfq_hash); INIT_LIST_HEAD(&cfqq->cfq_list); - RB_CLEAR_ROOT(&cfqq->sort_list[0]); - RB_CLEAR_ROOT(&cfqq->sort_list[1]); - INIT_LIST_HEAD(&cfqq->fifo[0]); - INIT_LIST_HEAD(&cfqq->fifo[1]); + RB_CLEAR_ROOT(&cfqq->sort_list); + INIT_LIST_HEAD(&cfqq->fifo); - cfqq->dir = CFQ_SYNC; cfqq->key = key; hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); atomic_set(&cfqq->ref, 0); @@ -1610,7 +1596,7 @@ cfq_should_preempt(struct cfq_data *cfqd */ if (new_cfqq->slice_left < cfqd->cfq_slice_idle) return 0; - if (crq->is_sync && cfqq->dir == CFQ_ASYNC) + if (crq->is_sync && !cfq_cfqq_sync(cfqq)) return 1; return 0; @@ -1628,7 +1614,7 @@ static void cfq_preempt_queue(struct cfq cfq_resort_rr_list(__cfqq, 1); if (!cfqq->slice_left) - cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq->ioprio, cfqq->dir) / 2; + cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; cfqq->slice_end = cfqq->slice_left + jiffies; cfq_slice_expired(cfqd, 1); @@ -1658,7 +1644,7 @@ cfq_crq_enqueued(struct cfq_data *cfqd, { const int sync = crq->is_sync; - cfqq->next_crq[sync] = cfq_choose_req(cfqd, cfqq->next_crq[sync], crq); + cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); if (sync) { struct cfq_io_context *cic = crq->io_context; @@ -1701,7 +1687,7 @@ static void cfq_enqueue(struct cfq_data cfq_add_crq_rb(crq); - list_add_tail(&rq->queuelist, &cfqq->fifo[crq->is_sync]); + list_add_tail(&rq->queuelist, &cfqq->fifo); if (rq_mergeable(rq)) { cfq_add_crq_hash(cfqd, crq); @@ -1822,23 +1808,34 @@ static void cfq_prio_boost(struct cfq_qu cfq_resort_rr_list(cfqq, 0); } -static inline pid_t cfq_queue_pid(struct task_struct *task) +static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) { - return task->pid; + if (rw == READ || process_sync(task)) + return task->pid; + + return CFQ_KEY_ASYNC; } static inline int __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct task_struct *task, int rw) { + if (cfqq->wait_request && cfqq->must_alloc) + return ELV_MQUEUE_MUST; + + return ELV_MQUEUE_MAY; +#if 0 if (!cfqq || task->flags & PF_MEMALLOC) return ELV_MQUEUE_MAY; if (!cfqq->allocated[rw] || cfqq->must_alloc) { + if (cfqq->wait_request) + return ELV_MQUEUE_MUST; + /* * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we * can quickly flood the queue with writes from a single task */ - if (cfqq->dir == CFQ_SYNC || !cfqq->must_alloc_slice) { + if (rw == READ || !cfqq->must_alloc_slice) { cfqq->must_alloc_slice = 1; return ELV_MQUEUE_MUST; } @@ -1859,6 +1856,7 @@ __cfq_may_queue(struct cfq_data *cfqd, s } return ELV_MQUEUE_MAY; +#endif } static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) @@ -1873,7 +1871,7 @@ static int cfq_may_queue(request_queue_t * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ - cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk)); + cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw)); if (cfqq) { cfq_init_prio_data(cfqq); cfq_prio_boost(cfqq); @@ -1943,7 +1941,7 @@ cfq_set_request(request_queue_t *q, stru might_sleep_if(gfp_mask & __GFP_WAIT); - cic = cfq_get_io_context(cfqd, cfq_queue_pid(current), gfp_mask); + cic = cfq_get_io_context(cfqd, cfq_queue_pid(current, rw), gfp_mask); spin_lock_irqsave(q->queue_lock, flags); @@ -1951,7 +1949,7 @@ cfq_set_request(request_queue_t *q, stru goto queue_fail; if (!cic->cfqq) { - cfqq = cfq_get_queue(cfqd, cic->ioc->pid, gfp_mask); + cfqq = cfq_get_queue(cfqd, current->pid, gfp_mask); if (!cfqq) goto queue_fail; @@ -2040,8 +2038,6 @@ static void cfq_idle_slice_timer(unsigne if ((cfqq = cfqd->active_queue) != NULL) { unsigned long now = jiffies; - WARN_ON(cfqq->dir == CFQ_ASYNC); - /* * expired */ @@ -2061,24 +2057,9 @@ static void cfq_idle_slice_timer(unsigne /* * not expired and it has a request pending, let it dispatch */ - if (!RB_EMPTY(&cfqq->sort_list[cfqq->dir])) { + if (!RB_EMPTY(&cfqq->sort_list)) { cfqq->must_dispatch = 1; goto out_kick; - } else if (!RB_EMPTY(&cfqq->sort_list[CFQ_ASYNC]) && - time_before(now + cfqd->cfq_slice_idle, cfqq->slice_end)) { - /* - * see if we should switch to writes if we have some - * slice left, since the read idled out. assign a slice - * end value scale on how much read slice we had left - */ - unsigned long left = cfqq->slice_end - now; - int p, pn; - - p = cfq_prio_to_slice(cfqd, cfqq->ioprio, CFQ_SYNC); - pn = cfq_prio_to_slice(cfqd, cfqq->ioprio, CFQ_ASYNC); - cfqq->slice_end = now + ((pn * left) / p); - cfqq->dir = CFQ_ASYNC; - goto out_kick; } } expire: @@ -2207,11 +2188,6 @@ static int cfq_init_queue(request_queue_ cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_max_depth = cfq_max_depth; -#if defined(CONFIG_IOPRIO_WRITE) - cfqd->cfq_write_fairness = 1; -#else - cfqd->cfq_write_fairness = 0; -#endif return 0; out_crqpool: kfree(cfqd->cfq_hash); @@ -2298,7 +2274,6 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd- SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); -SHOW_FUNCTION(cfq_write_fairness_show, cfqd->cfq_write_fairness, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -2327,9 +2302,6 @@ STORE_FUNCTION(cfq_slice_sync_store, &cf STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); -#if defined(CONFIG_IOPRIO_WRITE) -STORE_FUNCTION(cfq_write_fairness_store, &cfqd->cfq_write_fairness, 0, 1, 0); -#endif #undef STORE_FUNCTION static struct cfq_fs_entry cfq_quantum_entry = { @@ -2387,13 +2359,6 @@ static struct cfq_fs_entry cfq_max_depth .show = cfq_max_depth_show, .store = cfq_max_depth_store, }; -static struct cfq_fs_entry cfq_write_fairness_entry = { - .attr = {.name = "write_fairness", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_write_fairness_show, -#if defined(CONFIG_IOPRIO_WRITE) - .store = cfq_write_fairness_store, -#endif -}; static struct attribute *default_attrs[] = { &cfq_quantum_entry.attr, &cfq_queued_entry.attr, @@ -2406,7 +2371,6 @@ static struct attribute *default_attrs[] &cfq_slice_async_rq_entry.attr, &cfq_slice_idle_entry.attr, &cfq_max_depth_entry.attr, - &cfq_write_fairness_entry.attr, NULL, }; @@ -2442,7 +2406,7 @@ static struct sysfs_ops cfq_sysfs_ops = .store = cfq_attr_store, }; -struct kobj_type cfq_ktype = { +static struct kobj_type cfq_ktype = { .sysfs_ops = &cfq_sysfs_ops, .default_attrs = default_attrs, }; diff -Naurp linux-2.6.11-ck1/drivers/input/serio/i8042-x86ia64io.h linux-2.6.11-ck2/drivers/input/serio/i8042-x86ia64io.h --- linux-2.6.11-ck1/drivers/input/serio/i8042-x86ia64io.h 2005-03-02 19:30:26.000000000 +1100 +++ linux-2.6.11-ck2/drivers/input/serio/i8042-x86ia64io.h 2005-03-09 23:04:22.000000000 +1100 @@ -88,7 +88,7 @@ static struct dmi_system_id __initdata i }; #endif -#ifdef CONFIG_ACPI +#if defined(__ia64__) && defined(CONFIG_ACPI) #include #include @@ -281,7 +281,7 @@ static inline int i8042_platform_init(vo i8042_kbd_irq = I8042_MAP_IRQ(1); i8042_aux_irq = I8042_MAP_IRQ(12); -#ifdef CONFIG_ACPI +#if defined(__ia64__) && defined(CONFIG_ACPI) if (i8042_acpi_init()) return -1; #endif @@ -300,7 +300,7 @@ static inline int i8042_platform_init(vo static inline void i8042_platform_exit(void) { -#ifdef CONFIG_ACPI +#if defined(__ia64__) && defined(CONFIG_ACPI) i8042_acpi_exit(); #endif } diff -Naurp linux-2.6.11-ck1/drivers/md/raid6altivec.uc linux-2.6.11-ck2/drivers/md/raid6altivec.uc --- linux-2.6.11-ck1/drivers/md/raid6altivec.uc 2005-03-02 19:30:26.000000000 +1100 +++ linux-2.6.11-ck2/drivers/md/raid6altivec.uc 2005-03-09 23:04:22.000000000 +1100 @@ -108,7 +108,11 @@ int raid6_have_altivec(void); int raid6_have_altivec(void) { /* This assumes either all CPUs have Altivec or none does */ +#ifdef CONFIG_PPC64 return cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC; +#else + return cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC; +#endif } #endif diff -Naurp linux-2.6.11-ck1/fs/eventpoll.c linux-2.6.11-ck2/fs/eventpoll.c --- linux-2.6.11-ck1/fs/eventpoll.c 2004-12-25 10:14:50.000000000 +1100 +++ linux-2.6.11-ck2/fs/eventpoll.c 2005-03-09 23:04:22.000000000 +1100 @@ -619,6 +619,7 @@ eexit_1: return error; } +#define MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) /* * Implement the event wait interface for the eventpoll file. It is the kernel @@ -635,7 +636,7 @@ asmlinkage long sys_epoll_wait(int epfd, current, epfd, events, maxevents, timeout)); /* The maximum number of event must be greater than zero */ - if (maxevents <= 0) + if (maxevents <= 0 || maxevents > MAX_EVENTS) return -EINVAL; /* Verify that the area passed by the user is writeable */ diff -Naurp linux-2.6.11-ck1/kernel/sched.c linux-2.6.11-ck2/kernel/sched.c --- linux-2.6.11-ck1/kernel/sched.c 2005-03-09 23:04:14.000000000 +1100 +++ linux-2.6.11-ck2/kernel/sched.c 2005-03-09 23:04:23.000000000 +1100 @@ -16,8 +16,9 @@ * by Davide Libenzi, preemptible kernel bits by Robert Love. * 2003-09-03 Interactivity tuning by Con Kolivas. * 2004-04-02 Scheduler domains code by Nick Piggin - * 2004-07-07 New staircase scheduling policy by Con Kolivas with help + * 2005-03-09 New staircase scheduling policy by Con Kolivas with help * from William Lee Irwin III, Zwane Mwaikambo & Peter Williams. + * Staircase v10.6 */ #include @@ -701,9 +702,8 @@ static inline void recalc_task_prio(task } if (sleep_time >= p->totalrun) { - if (!(p->flags & PF_UISLEEP) && (NS_TO_JIFFIES(sleep_time - - p->totalrun) > p->burst * rr_interval(p))) - inc_burst(p); + if (!(p->flags & PF_UISLEEP)) + inc_burst(p); goto new_slice; } diff -Naurp linux-2.6.11-ck1/Makefile linux-2.6.11-ck2/Makefile --- linux-2.6.11-ck1/Makefile 2005-03-09 23:04:14.000000000 +1100 +++ linux-2.6.11-ck2/Makefile 2005-03-09 23:04:23.000000000 +1100 @@ -1,8 +1,8 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 11 -EXTRAVERSION =-ck1 -NAME=Woozy Numbat +EXTRAVERSION = -ck2 +NAME=Numbat Woozy from Cognac # *DOCUMENTATION* # To see a list of typical targets execute "make help"