GIT 11ab2bc407a58c0a5d082add7ed97a1a06b0cc53 master.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git#cfq commit 11ab2bc407a58c0a5d082add7ed97a1a06b0cc53 Author: Jens Axboe Date: Tue Nov 15 14:37:12 2005 +0100 [BLOCK] cfq-iosched: Fix oops with only idle queues If we entered cfq_get_next_cfqq() with cfqd->busy_queues != 0, we may still end up with empty cur_rr at the end if there are only idle queues in the system. So check for that. commit a861357ab18451f88b3068cb3b9897b8f1740ff4 Author: Jens Axboe Date: Tue Nov 15 14:02:45 2005 +0100 [BLOCK] cfq-iosched: change from escalator to staircase type service Currently, the priority RR algorithm in CFQ behaves like a see-saw, where the swing extends one extra prio level per iteration until they are all covered (then it starts over). This works fine for bandwidth distribution, but not so well for latencies. Writing a test model for this algorithm, gives the following computed latencies for one process running at each (of 8) priority levels: prio0: 30.01% disk time, 700msec max latency (tested 710msec) prio1: 23.34% disk time, 900msec max latency (tested 728msec) prio2: 17.50% disk time, 1260msec max latency (tested 1084msec) prio3: 12.50% disk time, 1760msec max latency (tested 1581msec) prio4: 8.33% disk time, 2380msec max latency (tested 2228msec) prio5: 5.00% disk time, 3100msec max latency (tested 2922msec) prio6: 2.50% disk time, 3900msec max latency (tested 3730msec) prio7: 0.83% disk time, 4760msec max latency (tested 4588msec) 'tested' shows actual latencies measured with 'fio', reality matches the theory. So far, so good. If we simulate 3 processes at each prio level, the max latency for prio0 rises to 2460msec. prio4 (which is the default for a process) rises to 7340msec! Looking at a more generic staircase model where the climb down the stairs (priority levels) and let the dynamic priority of a process increase until it hits the top and then slide back to its original prio, could be more interesting from a latency POV. Simulating that shows (for 1 process at each level): prio0: 27.85% disk time, 400msec max latency (tested 405msec) prio1: 22.15% disk time, 420msec max latency (tested 429msec) prio2: 17.09% disk time, 620msec max latency (tested 662msec) prio3: 12.66% disk time, 1080msec max latency (tested 1126msec) prio4: 8.86% disk time, 1600msec max latency (tested 1641msec) prio5: 5.70% disk time, 2140msec max latency (tested 2182msec) prio6: 3.16% disk time, 2660msec max latency (tested 2669msec) prio7: 2.53% disk time, 2800msec max latency (tested 2803msec) Latency is almost halved, while the aggregate and individually measured throughput is the same. Service distribution is a little different from the old algorithm, however not very much. For 3 processes at each level, prio0 has a max latency of 1440msec and 6120msec for prio4. As a bonus, we drop one list from cfqd and make the code a lot more readable. Signed-off-by: Jens Axboe commit 8241510c72dd2dd92415c721efcb5d5641e9ae77 Author: Jens Axboe Date: Tue Nov 15 14:01:40 2005 +0100 [BLOCK] cfq-iosched: change cfq io context linking from list to tree On setups with many disks, we spend a considerable amount of time looking up the process-disk mapping on each queue of io. Testing with a NULL based block driver, this costs 40-50% reduction in throughput for 1000 disks. Signed-off-by: Jens Axboe --- diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 2b64f58..cde687b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -118,8 +118,9 @@ struct cfq_data { * rr list of queues with requests and the count of them */ struct list_head rr_list[CFQ_PRIO_LISTS]; - struct list_head busy_rr; struct list_head cur_rr; + unsigned short cur_prio; + struct list_head idle_rr; unsigned int busy_queues; @@ -155,7 +156,6 @@ struct cfq_data { struct cfq_queue *active_queue; struct cfq_io_context *active_cic; - int cur_prio, cur_end_prio; unsigned int dispatch_slice; struct timer_list idle_class_timer; @@ -213,8 +213,13 @@ struct cfq_queue { int on_dispatch[2]; /* io prio of this group */ - unsigned short ioprio, org_ioprio; - unsigned short ioprio_class, org_ioprio_class; + unsigned short ioprio_class, ioprio; + + /* current dynamic stair priority */ + unsigned short dyn_ioprio; + + /* same as real ioprio, except if queue has been elevated */ + unsigned short org_ioprio_class, org_ioprio; /* various state flags, see below */ unsigned int flags; @@ -473,25 +478,13 @@ static void cfq_resort_rr_list(struct cf list = &cfqd->cur_rr; else if (cfq_class_idle(cfqq)) list = &cfqd->idle_rr; - else { - /* - * if cfqq has requests in flight, don't allow it to be - * found in cfq_set_active_queue before it has finished them. - * this is done to increase fairness between a process that - * has lots of io pending vs one that only generates one - * sporadically or synchronously - */ - if (cfq_cfqq_dispatched(cfqq)) - list = &cfqd->busy_rr; - else - list = &cfqd->rr_list[cfqq->ioprio]; - } + else + list = &cfqd->rr_list[cfqq->dyn_ioprio]; /* - * if queue was preempted, just add to front to be fair. busy_rr - * isn't sorted. + * if queue was preempted, just add to front to be fair. */ - if (preempted || list == &cfqd->busy_rr) { + if (preempted) { list_add(&cfqq->cfq_list, list); return; } @@ -503,6 +496,8 @@ static void cfq_resort_rr_list(struct cf while ((entry = entry->prev) != list) { struct cfq_queue *__cfqq = list_entry_cfqq(entry); + if (__cfqq->ioprio < cfqq->ioprio) + break; if (!__cfqq->service_last) break; if (time_before(__cfqq->service_last, cfqq->service_last)) @@ -724,81 +719,100 @@ cfq_merged_requests(request_queue_t *q, cfq_remove_request(next); } +/* + * Scale schedule slice based on io priority. Use the sync time slice only + * if a queue is marked sync and has sync io queued. A sync queue with async + * io only, should not get full sync slice length. + */ +static inline int +cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; + unsigned short prio = cfqq->dyn_ioprio; + + WARN_ON(prio >= IOPRIO_BE_NR); + + if (cfq_class_rt(cfqq)) + prio = 0; + + return base_slice + (base_slice / CFQ_SLICE_SCALE * (4 - prio)); +} + static inline void -__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - if (cfqq) { - /* - * stop potential idle class queues waiting service - */ - del_timer(&cfqd->idle_class_timer); + cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; +} - cfqq->slice_start = jiffies; - cfqq->slice_end = 0; - cfqq->slice_left = 0; - cfq_clear_cfqq_must_alloc_slice(cfqq); - cfq_clear_cfqq_fifo_expire(cfqq); - cfq_clear_cfqq_expired(cfqq); - } +static inline int +cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + const int base_rq = cfqd->cfq_slice_async_rq; + unsigned short prio = cfqq->dyn_ioprio; - cfqd->active_queue = cfqq; + WARN_ON(cfqq->dyn_ioprio >= IOPRIO_BE_NR); + + if (cfq_class_rt(cfqq)) + prio = 0; + + return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - prio)); } -/* - * 0 - * 0,1 - * 0,1,2 - * 0,1,2,3 - * 0,1,2,3,4 - * 0,1,2,3,4,5 - * 0,1,2,3,4,5,6 - * 0,1,2,3,4,5,6,7 - */ -static int cfq_get_next_prio_level(struct cfq_data *cfqd) +static inline void cfq_prio_inc(unsigned short *p, unsigned int low_p) { - int prio, wrap; + if (++(*p) == CFQ_PRIO_LISTS) + *p = low_p; +} - prio = -1; - wrap = 0; - do { - int p; +static struct cfq_queue *cfq_get_next_cfqq(struct cfq_data *cfqd) +{ + if (!cfqd->busy_queues) + return NULL; + + if (list_empty(&cfqd->cur_rr)) { + unsigned short prio = cfqd->cur_prio; - for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { - if (!list_empty(&cfqd->rr_list[p])) { - prio = p; + do { + struct list_head *list = &cfqd->rr_list[prio]; + + if (!list_empty(list)) { + list_splice_init(list, &cfqd->cur_rr); break; } - } - if (prio != -1) - break; - cfqd->cur_prio = 0; - if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { - cfqd->cur_end_prio = 0; - if (wrap) - break; - wrap = 1; - } - } while (1); + cfq_prio_inc(&prio, 0); - if (unlikely(prio == -1)) - return -1; + } while (prio != cfqd->cur_prio); - BUG_ON(prio >= CFQ_PRIO_LISTS); + cfq_prio_inc(&cfqd->cur_prio, 0); + } - list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); + if (!list_empty(&cfqd->cur_rr)); + return list_entry_cfqq(cfqd->cur_rr.next); - cfqd->cur_prio = prio + 1; - if (cfqd->cur_prio > cfqd->cur_end_prio) { - cfqd->cur_end_prio = cfqd->cur_prio; - cfqd->cur_prio = 0; - } - if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) { - cfqd->cur_prio = 0; - cfqd->cur_end_prio = 0; + return NULL; +} + +static inline void +__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + if (cfqq) { + WARN_ON(RB_EMPTY(&cfqq->sort_list)); + + /* + * stop potential idle class queues waiting service + */ + del_timer(&cfqd->idle_class_timer); + + cfqq->slice_start = jiffies; + cfqq->slice_end = 0; + cfqq->slice_left = 0; + cfq_clear_cfqq_must_alloc_slice(cfqq); + cfq_clear_cfqq_fifo_expire(cfqq); + cfq_clear_cfqq_expired(cfqq); } - return prio; + cfqd->active_queue = cfqq; } static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) @@ -811,15 +825,10 @@ static struct cfq_queue *cfq_set_active_ */ if ((cfqq = cfqd->active_queue) != NULL) { if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq)) - return NULL; + return cfqq; } - /* - * if current list is non-empty, grab first entry. if it is empty, - * get next prio level and grab first entry then if any are spliced - */ - if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) - cfqq = list_entry_cfqq(cfqd->cur_rr.next); + cfqq = cfq_get_next_cfqq(cfqd); /* * if we have idle queues and no rt or be queues had pending @@ -844,7 +853,7 @@ static struct cfq_queue *cfq_set_active_ */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, - int preempted) + int preempted, int force) { unsigned long now = jiffies; @@ -866,39 +875,43 @@ __cfq_slice_expired(struct cfq_data *cfq else cfqq->slice_left = 0; + cfq_prio_inc(&cfqq->dyn_ioprio, cfqq->ioprio); + if (cfq_cfqq_on_rr(cfqq)) cfq_resort_rr_list(cfqq, preempted); - if (cfqq == cfqd->active_queue) - cfqd->active_queue = NULL; + /* + * use deferred expiry, if there are requests in progress as + * not to disturb the slice of the next queue + */ + if (cfq_cfqq_dispatched(cfqq) && !force) + cfq_mark_cfqq_expired(cfqq); + else { + if (cfqq == cfqd->active_queue) + cfqd->active_queue = NULL; - if (cfqd->active_cic) { - put_io_context(cfqd->active_cic->ioc); - cfqd->active_cic = NULL; - } + if (cfqd->active_cic) { + put_io_context(cfqd->active_cic->ioc); + cfqd->active_cic = NULL; + } - cfqd->dispatch_slice = 0; + cfqd->dispatch_slice = 0; + } } static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) { struct cfq_queue *cfqq = cfqd->active_queue; - if (cfqq) { - /* - * use deferred expiry, if there are requests in progress as - * not to disturb the slice of the next queue - */ - if (cfq_cfqq_dispatched(cfqq)) - cfq_mark_cfqq_expired(cfqq); - else - __cfq_slice_expired(cfqd, cfqq, preempted); - } + if (cfqq) + __cfq_slice_expired(cfqd, cfqq, preempted, 0); } static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + unsigned long sl; + WARN_ON(!RB_EMPTY(&cfqq->sort_list)); WARN_ON(cfqq != cfqd->active_queue); @@ -915,16 +928,19 @@ static int cfq_arm_slice_timer(struct cf if (cfqd->active_cic && !cfqd->active_cic->ioc->task) return 0; + /* + * If timer is already running, continue waiting. If not, mark + * us as waiting for a request and arm the idle timer + */ + if (timer_pending(&cfqd->idle_slice_timer)) + return 1; + cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_wait_request(cfqq); - if (!timer_pending(&cfqd->idle_slice_timer)) { - unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); - - cfqd->idle_slice_timer.expires = jiffies + slice_left; - add_timer(&cfqd->idle_slice_timer); - } - + sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); + cfqd->idle_slice_timer.expires = jiffies + sl; + add_timer(&cfqd->idle_slice_timer); return 1; } @@ -966,37 +982,6 @@ static inline struct cfq_rq *cfq_check_f } /* - * Scale schedule slice based on io priority. Use the sync time slice only - * if a queue is marked sync and has sync io queued. A sync queue with async - * io only, should not get full sync slice length. - */ -static inline int -cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) -{ - const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; - - WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); - - return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); -} - -static inline void -cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) -{ - cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; -} - -static inline int -cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) -{ - const int base_rq = cfqd->cfq_slice_async_rq; - - WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); - - return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); -} - -/* * get next queue for service */ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) @@ -1009,7 +994,7 @@ static struct cfq_queue *cfq_select_queu goto new_queue; if (cfq_cfqq_expired(cfqq)) - goto new_queue; + goto keep_queue; /* * slice has expired @@ -1116,7 +1101,6 @@ cfq_forced_dispatch(struct cfq_data *cfq for (i = 0; i < CFQ_PRIO_LISTS; i++) dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]); - dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr); dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr); @@ -1143,6 +1127,9 @@ cfq_dispatch_requests(request_queue_t *q if (cfqq) { int max_dispatch; + if (cfq_cfqq_expired(cfqq)) + return 0; + /* * if idle window is disabled, allow queue buildup */ @@ -1184,18 +1171,18 @@ static void cfq_put_queue(struct cfq_que BUG_ON(cfq_cfqq_on_rr(cfqq)); if (unlikely(cfqd->active_queue == cfqq)) { - __cfq_slice_expired(cfqd, cfqq, 0); + __cfq_slice_expired(cfqd, cfqq, 0, 1); cfq_schedule_dispatch(cfqd); } - cfq_put_cfqd(cfqq->cfqd); - /* * it's on the empty list and still hashed */ list_del(&cfqq->cfq_list); hlist_del(&cfqq->cfq_hash); kmem_cache_free(cfq_pool, cfqq); + + cfq_put_cfqd(cfqd); } static inline struct cfq_queue * @@ -1222,17 +1209,17 @@ cfq_find_cfq_hash(struct cfq_data *cfqd, return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); } -static void cfq_free_io_context(struct cfq_io_context *cic) +static void cfq_free_io_context(struct io_context *ioc) { struct cfq_io_context *__cic; - struct list_head *entry, *next; + struct rb_node *n; + + while ((n = rb_first(&ioc->cic_root)) != NULL) { + __cic = rb_entry(n, struct cfq_io_context, rb_node); - list_for_each_safe(entry, next, &cic->list) { - __cic = list_entry(entry, struct cfq_io_context, list); + rb_erase(&__cic->rb_node, &ioc->cic_root); kmem_cache_free(cfq_ioc_pool, __cic); } - - kmem_cache_free(cfq_ioc_pool, cic); } /* @@ -1248,7 +1235,7 @@ static void cfq_exit_single_io_context(s spin_lock(q->queue_lock); if (unlikely(cic->cfqq == cfqd->active_queue)) { - __cfq_slice_expired(cfqd, cic->cfqq, 0); + __cfq_slice_expired(cfqd, cic->cfqq, 0, 1); cfq_schedule_dispatch(cfqd); } @@ -1257,27 +1244,25 @@ static void cfq_exit_single_io_context(s spin_unlock(q->queue_lock); } -/* - * Another task may update the task cic list, if it is doing a queue lookup - * on its behalf. cfq_cic_lock excludes such concurrent updates - */ -static void cfq_exit_io_context(struct cfq_io_context *cic) +static void cfq_exit_io_context(struct io_context *ioc) { struct cfq_io_context *__cic; - struct list_head *entry; unsigned long flags; + struct rb_node *n; local_irq_save(flags); /* * put the reference this task is holding to the various queues */ - list_for_each(entry, &cic->list) { - __cic = list_entry(entry, struct cfq_io_context, list); + n = rb_first(&ioc->cic_root); + while (n != NULL) { + __cic = rb_entry(n, struct cfq_io_context, rb_node); + cfq_exit_single_io_context(__cic); + n = rb_next(n); } - cfq_exit_single_io_context(cic); local_irq_restore(flags); } @@ -1287,9 +1272,9 @@ cfq_alloc_io_context(struct cfq_data *cf struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); if (cic) { - INIT_LIST_HEAD(&cic->list); - cic->cfqq = NULL; + RB_CLEAR(&cic->rb_node); cic->key = NULL; + cic->cfqq = NULL; cic->last_end_request = jiffies; cic->ttime_total = 0; cic->ttime_samples = 0; @@ -1342,6 +1327,11 @@ static void cfq_init_prio_data(struct cf cfqq->org_ioprio = cfqq->ioprio; cfqq->org_ioprio_class = cfqq->ioprio_class; + /* + * start priority + */ + cfqq->dyn_ioprio = cfqq->ioprio; + if (cfq_cfqq_on_rr(cfqq)) cfq_resort_rr_list(cfqq, 0); @@ -1365,12 +1355,16 @@ static inline void changed_ioprio(struct */ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) { - struct cfq_io_context *cic = ioc->cic; + struct cfq_io_context *cic; + struct rb_node *n; - changed_ioprio(cic->cfqq); + n = rb_first(&ioc->cic_root); + while (n != NULL) { + cic = rb_entry(n, struct cfq_io_context, rb_node); - list_for_each_entry(cic, &cic->list, list) changed_ioprio(cic->cfqq); + n = rb_next(n); + } return 0; } @@ -1431,14 +1425,62 @@ out: return cfqq; } +static struct cfq_io_context * +cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) +{ + struct rb_node *n = ioc->cic_root.rb_node; + struct cfq_io_context *cic; + void *key = cfqd; + + while (n) { + cic = rb_entry(n, struct cfq_io_context, rb_node); + + if (key < cic->key) + n = n->rb_left; + else if (key > cic->key) + n = n->rb_right; + else + return cic; + } + + return NULL; +} + +static inline void +cfq_cic_rb_add(struct cfq_data *cfqd, struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct rb_node **p = &ioc->cic_root.rb_node; + struct rb_node *parent = NULL; + struct cfq_io_context *__cic; + + cic->ioc = ioc; + cic->key = cfqd; + + while (*p) { + parent = *p; + __cic = rb_entry(parent, struct cfq_io_context, rb_node); + + if (cic->key < __cic->key) + p = &(*p)->rb_left; + else if (cic->key > __cic->key) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&cic->rb_node, parent, p); + rb_insert_color(&cic->rb_node, &ioc->cic_root); + atomic_inc(&cfqd->ref); +} + /* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more - * than one device managed by cfq. Note that caller is holding a reference to - * cfqq, so we don't need to worry about it disappearing + * than one device managed by cfq. */ static struct cfq_io_context * -cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) +cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct io_context *ioc = NULL; struct cfq_io_context *cic; @@ -1449,61 +1491,17 @@ cfq_get_io_context(struct cfq_data *cfqd if (!ioc) return NULL; - if ((cic = ioc->cic) == NULL) { - cic = cfq_alloc_io_context(cfqd, gfp_mask); - - if (cic == NULL) - goto err; - - /* - * manually increment generic io_context usage count, it - * cannot go away since we are already holding one ref to it - */ - ioc->cic = cic; - ioc->set_ioprio = cfq_ioc_set_ioprio; - cic->ioc = ioc; - cic->key = cfqd; - atomic_inc(&cfqd->ref); - } else { - struct cfq_io_context *__cic; - - /* - * the first cic on the list is actually the head itself - */ - if (cic->key == cfqd) - goto out; - - /* - * cic exists, check if we already are there. linear search - * should be ok here, the list will usually not be more than - * 1 or a few entries long - */ - list_for_each_entry(__cic, &cic->list, list) { - /* - * this process is already holding a reference to - * this queue, so no need to get one more - */ - if (__cic->key == cfqd) { - cic = __cic; - goto out; - } - } + ioc->set_ioprio = cfq_ioc_set_ioprio; - /* - * nope, process doesn't have a cic assoicated with this - * cfqq yet. get a new one and add to list - */ - __cic = cfq_alloc_io_context(cfqd, gfp_mask); - if (__cic == NULL) - goto err; + cic = cfq_cic_rb_lookup(cfqd, ioc); + if (cic) + goto out; - __cic->ioc = ioc; - __cic->key = cfqd; - atomic_inc(&cfqd->ref); - list_add(&__cic->list, &cic->list); - cic = __cic; - } + cic = cfq_alloc_io_context(cfqd, gfp_mask); + if (cic == NULL) + goto err; + cfq_cic_rb_add(cfqd, ioc, cic); out: return cic; err: @@ -1610,7 +1608,7 @@ static void cfq_preempt_queue(struct cfq cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; cfqq->slice_end = cfqq->slice_left + jiffies; - __cfq_slice_expired(cfqd, cfqq, 1); + __cfq_slice_expired(cfqd, cfqq, 1, 1); __cfq_set_active_queue(cfqd, cfqq); } @@ -1718,7 +1716,7 @@ static void cfq_completed_request(reques cfq_resort_rr_list(cfqq, 0); } if (cfq_cfqq_expired(cfqq)) { - __cfq_slice_expired(cfqd, cfqq, 0); + __cfq_slice_expired(cfqd, cfqq, 0, 1); cfq_schedule_dispatch(cfqd); } } @@ -1926,21 +1924,22 @@ cfq_set_request(request_queue_t *q, stru might_sleep_if(gfp_mask & __GFP_WAIT); - cic = cfq_get_io_context(cfqd, key, gfp_mask); + cic = cfq_get_io_context(cfqd, gfp_mask); spin_lock_irqsave(q->queue_lock, flags); if (!cic) goto queue_fail; - if (!cic->cfqq) { + cfqq = cic->cfqq; + if (!cfqq || cfqq->key != key) { cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); if (!cfqq) goto queue_fail; - cic->cfqq = cfqq; - } else - cfqq = cic->cfqq; + if (!cic->cfqq) + cic->cfqq = cfqq; + } cfqq->allocated[rw]++; cfq_clear_cfqq_must_alloc(cfqq); @@ -2126,7 +2125,6 @@ static int cfq_init_queue(request_queue_ for (i = 0; i < CFQ_PRIO_LISTS; i++) INIT_LIST_HEAD(&cfqd->rr_list[i]); - INIT_LIST_HEAD(&cfqd->busy_rr); INIT_LIST_HEAD(&cfqd->cur_rr); INIT_LIST_HEAD(&cfqd->idle_rr); INIT_LIST_HEAD(&cfqd->empty_list); diff --git a/block/elevator.c b/block/elevator.c index e4c5882..5720409 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -650,11 +650,14 @@ void elv_unregister(struct elevator_type read_lock(&tasklist_lock); do_each_thread(g, p) { struct io_context *ioc = p->io_context; - if (ioc && ioc->cic) { - ioc->cic->exit(ioc->cic); - ioc->cic->dtor(ioc->cic); - ioc->cic = NULL; + struct cfq_io_context *cic; + + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->exit(ioc); + cic->dtor(ioc); } + if (ioc && ioc->aic) { ioc->aic->exit(ioc->aic); ioc->aic->dtor(ioc->aic); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 5f52e30..7ca5a54 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3256,10 +3256,15 @@ void put_io_context(struct io_context *i BUG_ON(atomic_read(&ioc->refcount) == 0); if (atomic_dec_and_test(&ioc->refcount)) { + struct cfq_io_context *cic; + if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - if (ioc->cic && ioc->cic->dtor) - ioc->cic->dtor(ioc->cic); + + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->dtor(ioc); + } kmem_cache_free(iocontext_cachep, ioc); } @@ -3271,6 +3276,7 @@ void exit_io_context(void) { unsigned long flags; struct io_context *ioc; + struct cfq_io_context *cic; local_irq_save(flags); task_lock(current); @@ -3282,8 +3288,11 @@ void exit_io_context(void) if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); - if (ioc->cic && ioc->cic->exit) - ioc->cic->exit(ioc->cic); + + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->exit(ioc); + } put_io_context(ioc); } @@ -3313,7 +3322,8 @@ struct io_context *current_io_context(gf ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; - ret->cic = NULL; + ret->cic_root.rb_node = NULL; + tsk->io_context = ret; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a33a31e..866a914 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -54,13 +54,11 @@ struct as_io_context { struct cfq_queue; struct cfq_io_context { - /* - * circular list of cfq_io_contexts belonging to a process io context - */ - struct list_head list; - struct cfq_queue *cfqq; + struct rb_node rb_node; void *key; + struct cfq_queue *cfqq; + struct io_context *ioc; unsigned long last_end_request; @@ -69,8 +67,8 @@ struct cfq_io_context { unsigned long ttime_samples; unsigned long ttime_mean; - void (*dtor)(struct cfq_io_context *); - void (*exit)(struct cfq_io_context *); + void (*dtor)(struct io_context *); /* destructor */ + void (*exit)(struct io_context *); /* called on task exit */ }; /* @@ -91,7 +89,7 @@ struct io_context { int nr_batch_requests; /* Number of requests left in the batch */ struct as_io_context *aic; - struct cfq_io_context *cic; + struct rb_root cic_root; }; void put_io_context(struct io_context *ioc);