• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/block/

Lines Matching defs:cfqd

104 	struct cfq_data *cfqd;
298 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
359 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
360 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
364 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
365 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
369 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
370 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
371 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0);
373 #define cfq_log(cfqd, fmt, args...) \
374 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
387 static inline bool iops_mode(struct cfq_data *cfqd)
396 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
422 struct cfq_data *cfqd,
433 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
461 static inline void *cfqd_dead_key(struct cfq_data *cfqd)
463 return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
468 struct cfq_data *cfqd = cic->key;
470 if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
473 return cfqd;
489 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
491 if (cfqd->busy_queues) {
492 cfq_log(cfqd, "schedule dispatch");
493 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
499 struct cfq_data *cfqd = q->elevator->elevator_data;
501 return !cfqd->rq_queued;
509 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
512 const int base_slice = cfqd->cfq_slice[sync];
520 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
522 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
576 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
582 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
592 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
594 struct cfq_rb_root *st = &cfqd->grp_service_tree;
600 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
602 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
603 if (cfqd->cfq_latency) {
608 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
610 unsigned sync_slice = cfqd->cfq_slice[1];
612 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
615 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
629 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
653 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
682 back_max = cfqd->cfq_back_max * 2;
692 d1 = (last - s1) * cfqd->cfq_back_penalty;
699 d2 = (last - s2) * cfqd->cfq_back_penalty;
788 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
808 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
811 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
817 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
818 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
856 cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
858 struct cfq_rb_root *st = &cfqd->grp_service_tree;
884 cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
886 struct cfq_rb_root *st = &cfqd->grp_service_tree;
898 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
933 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
936 struct cfq_rb_root *st = &cfqd->grp_service_tree;
938 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
944 if (iops_mode(cfqd))
955 if (time_after(cfqd->workload_expires, jiffies)) {
956 cfqg->saved_workload_slice = cfqd->workload_expires
958 cfqg->saved_workload = cfqd->serving_type;
959 cfqg->saved_serving_prio = cfqd->serving_prio;
963 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
965 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
967 iops_mode(cfqd), cfqq->nr_sectors);
987 cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
991 void *key = cfqd;
994 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1006 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1030 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1033 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1038 /* Add group on cfqd list */
1039 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1049 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1056 cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
1058 cfqg = &cfqd->root_group;
1073 cfqg = &cfqq->cfqd->root_group;
1093 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1107 static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1112 hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1119 cfq_destroy_cfqg(cfqd, cfqg);
1140 struct cfq_data *cfqd = key;
1142 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1143 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1144 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1148 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1150 return &cfqd->root_group;
1163 static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1169 * The cfqd->service_trees holds all pending cfq_queue's that have
1173 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1185 if (!cfqd->cfq_group_isolation
1187 && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1189 cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1191 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1193 cfqq->cfqg = &cfqd->root_group;
1194 atomic_inc(&cfqd->root_group.ref);
1196 } else if (!cfqd->cfq_group_isolation
1199 BUG_ON(cfqq->cfqg != &cfqd->root_group);
1201 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1206 cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1227 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1281 cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1285 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1320 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1335 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1336 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1348 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1354 cfq_service_tree_add(cfqd, cfqq, 0);
1355 cfq_prio_tree_add(cfqd, cfqq);
1363 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1365 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1368 cfqd->busy_queues++;
1370 cfq_resort_rr_list(cfqd, cfqq);
1377 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1379 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1392 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1393 BUG_ON(!cfqd->busy_queues);
1394 cfqd->busy_queues--;
1426 struct cfq_data *cfqd = cfqq->cfqd;
1436 cfq_dispatch_insert(cfqd->queue, __alias);
1439 cfq_add_cfqq_rr(cfqd, cfqq);
1445 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1451 cfq_prio_tree_add(cfqd, cfqq);
1464 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1469 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1475 cic = cfq_cic_lookup(cfqd, tsk->io_context);
1491 struct cfq_data *cfqd = q->elevator->elevator_data;
1493 cfqd->rq_in_driver++;
1494 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1495 cfqd->rq_in_driver);
1497 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1502 struct cfq_data *cfqd = q->elevator->elevator_data;
1504 WARN_ON(!cfqd->rq_in_driver);
1505 cfqd->rq_in_driver--;
1506 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1507 cfqd->rq_in_driver);
1515 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1520 cfqq->cfqd->rq_queued--;
1532 struct cfq_data *cfqd = q->elevator->elevator_data;
1535 __rq = cfq_find_rq_fmerge(cfqd, bio);
1585 struct cfq_data *cfqd = q->elevator->elevator_data;
1599 cic = cfq_cic_lookup(cfqd, current->io_context);
1607 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1609 del_timer(&cfqd->idle_slice_timer);
1613 static void __cfq_set_active_queue(struct cfq_data *cfqd,
1617 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1618 cfqd->serving_prio, cfqd->serving_type);
1633 cfq_del_timer(cfqd, cfqq);
1636 cfqd->active_queue = cfqq;
1643 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1646 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1649 cfq_del_timer(cfqd, cfqq);
1668 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1671 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1674 cfq_del_cfqq_rr(cfqd, cfqq);
1676 cfq_resort_rr_list(cfqd, cfqq);
1678 if (cfqq == cfqd->active_queue)
1679 cfqd->active_queue = NULL;
1681 if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1682 cfqd->grp_service_tree.active = NULL;
1684 if (cfqd->active_cic) {
1685 put_io_context(cfqd->active_cic->ioc);
1686 cfqd->active_cic = NULL;
1690 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1692 struct cfq_queue *cfqq = cfqd->active_queue;
1695 __cfq_slice_expired(cfqd, cfqq, timed_out);
1702 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1705 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1706 cfqd->serving_type);
1708 if (!cfqd->rq_queued)
1719 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1726 if (!cfqd->rq_queued)
1729 cfqg = cfq_get_next_cfqg(cfqd);
1742 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1746 cfqq = cfq_get_next_queue(cfqd);
1748 __cfq_set_active_queue(cfqd, cfqq);
1752 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1755 if (blk_rq_pos(rq) >= cfqd->last_position)
1756 return blk_rq_pos(rq) - cfqd->last_position;
1758 return cfqd->last_position - blk_rq_pos(rq);
1761 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1764 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1767 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1770 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1773 sector_t sector = cfqd->last_position;
1782 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1791 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1802 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1809 * cfqd - obvious
1814 * one request, and that cfqd->last_position reflects a position on the disk
1818 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1841 cfqq = cfqq_close(cfqd, cur_cfqq);
1870 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1878 if (!cfqd->cfq_slice_idle)
1887 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1896 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1901 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1903 struct cfq_queue *cfqq = cfqd->active_queue;
1912 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1921 if (!cfq_should_idle(cfqd, cfqq)) {
1923 if (cfqd->cfq_group_idle)
1924 group_idle = cfqd->cfq_group_idle;
1938 cic = cfqd->active_cic;
1949 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1961 sl = cfqd->cfq_group_idle;
1963 sl = cfqd->cfq_slice_idle;
1965 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1967 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1976 struct cfq_data *cfqd = q->elevator->elevator_data;
1979 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1981 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1987 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2012 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2017 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2019 const int base_rq = cfqd->cfq_slice_async_rq;
2081 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2104 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2112 cfqd->serving_prio = IDLE_WORKLOAD;
2113 cfqd->workload_expires = jiffies + 1;
2118 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2119 cfqd->serving_prio = RT_WORKLOAD;
2120 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2121 cfqd->serving_prio = BE_WORKLOAD;
2123 cfqd->serving_prio = IDLE_WORKLOAD;
2124 cfqd->workload_expires = jiffies + 1;
2133 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2139 if (count && !time_after(jiffies, cfqd->workload_expires))
2143 cfqd->serving_type =
2144 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2145 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2153 group_slice = cfq_group_slice(cfqd, cfqg);
2156 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2157 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2159 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2169 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2170 tmp = tmp/cfqd->busy_queues;
2175 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2178 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2181 cfq_log(cfqd, "workload slice:%d", slice);
2182 cfqd->workload_expires = jiffies + slice;
2183 cfqd->noidle_tree_requires_idle = false;
2186 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2188 struct cfq_rb_root *st = &cfqd->grp_service_tree;
2199 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2201 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2203 cfqd->serving_group = cfqg;
2207 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2208 cfqd->serving_type = cfqg->saved_workload;
2209 cfqd->serving_prio = cfqg->saved_serving_prio;
2211 cfqd->workload_expires = jiffies - 1;
2213 choose_service_tree(cfqd, cfqg);
2220 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2224 cfqq = cfqd->active_queue;
2228 if (!cfqd->rq_queued)
2251 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2271 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2283 if (timer_pending(&cfqd->idle_slice_timer)) {
2288 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2298 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2305 cfq_slice_expired(cfqd, 0);
2312 cfq_choose_cfqg(cfqd);
2314 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2324 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2331 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2339 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2345 cfq_slice_expired(cfqd, 0);
2346 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2347 __cfq_set_active_queue(cfqd, cfqq);
2351 BUG_ON(cfqd->busy_queues);
2353 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2357 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2363 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2370 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2377 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2383 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2386 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2403 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
2409 if (cfqd->busy_queues == 1)
2418 max_dispatch = cfqd->cfq_quantum;
2426 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2427 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2430 depth = last_sync / cfqd->cfq_slice[1];
2447 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2453 if (!cfq_may_dispatch(cfqd, cfqq))
2466 cfq_dispatch_insert(cfqd->queue, rq);
2468 if (!cfqd->active_cic) {
2472 cfqd->active_cic = cic;
2484 struct cfq_data *cfqd = q->elevator->elevator_data;
2487 if (!cfqd->busy_queues)
2491 return cfq_forced_dispatch(cfqd);
2493 cfqq = cfq_select_queue(cfqd);
2500 if (!cfq_dispatch_request(cfqd, cfqq))
2510 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2511 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2514 cfq_slice_expired(cfqd, 0);
2517 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2530 struct cfq_data *cfqd = cfqq->cfqd;
2538 cfq_log_cfqq(cfqd, cfqq, "put_queue");
2544 if (unlikely(cfqd->active_queue == cfqq)) {
2545 __cfq_slice_expired(cfqd, cfqq, 0);
2546 cfq_schedule_dispatch(cfqd);
2663 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2665 if (unlikely(cfqq == cfqd->active_queue)) {
2666 __cfq_slice_expired(cfqd, cfqq, 0);
2667 cfq_schedule_dispatch(cfqd);
2675 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2686 cic->key = cfqd_dead_key(cfqd);
2692 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2697 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2705 struct cfq_data *cfqd = cic_to_cfqd(cic);
2707 if (cfqd) {
2708 struct request_queue *q = cfqd->queue;
2718 if (cic->key == cfqd)
2719 __cfq_exit_single_io_context(cfqd, cic);
2735 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2740 cfqd->queue->node);
2798 struct cfq_data *cfqd = cic_to_cfqd(cic);
2802 if (unlikely(!cfqd))
2805 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2810 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2822 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2831 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2839 cfqq->cfqd = cfqd;
2855 struct cfq_data *cfqd = cic_to_cfqd(cic);
2859 if (unlikely(!cfqd))
2862 q = cfqd->queue;
2871 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2887 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2895 cfqg = cfq_get_cfqg(cfqd, 1);
2896 cic = cfq_cic_lookup(cfqd, ioc);
2904 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2910 spin_unlock_irq(cfqd->queue->queue_lock);
2913 cfqd->queue->node);
2914 spin_lock_irq(cfqd->queue->queue_lock);
2920 cfqd->queue->node);
2924 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2927 cfq_log_cfqq(cfqd, cfqq, "alloced");
2929 cfqq = &cfqd->oom_cfqq;
2939 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2943 return &cfqd->async_cfqq[0][ioprio];
2945 return &cfqd->async_cfqq[1][ioprio];
2947 return &cfqd->async_idle_cfqq;
2954 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2963 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2968 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2986 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2992 BUG_ON(cic->key != cfqd_dead_key(cfqd));
2998 radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
3006 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3020 if (cic && cic->key == cfqd) {
3026 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3030 if (unlikely(cic->key != cfqd)) {
3031 cfq_drop_dead_cic(cfqd, ioc, cic);
3046 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3048 * Also adds the cic to a per-cfqd list, used when this queue is removed.
3050 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3059 cic->key = cfqd;
3063 cfqd->cic_index, cic);
3071 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3072 list_add(&cic->queue_list, &cfqd->cic_list);
3073 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3089 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3096 ioc = get_io_context(gfp_mask, cfqd->queue->node);
3100 cic = cfq_cic_lookup(cfqd, ioc);
3104 cic = cfq_alloc_io_context(cfqd, gfp_mask);
3108 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3129 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3132 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3140 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3153 if (blk_queue_nonrot(cfqd->queue))
3164 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3180 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3184 if (cic->ttime_mean > cfqd->cfq_slice_idle)
3191 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3204 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3209 cfqq = cfqd->active_queue;
3239 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3258 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3265 if (cfq_rq_close(cfqd, cfqq, rq))
3275 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3277 cfq_log_cfqq(cfqd, cfqq, "preempt");
3278 cfq_slice_expired(cfqd, 1);
3286 cfq_service_tree_add(cfqd, cfqq, 1);
3297 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3302 cfqd->rq_queued++;
3306 cfq_update_io_thinktime(cfqd, cic);
3307 cfq_update_io_seektime(cfqd, cfqq, rq);
3308 cfq_update_idle_window(cfqd, cfqq, cic);
3312 if (cfqq == cfqd->active_queue) {
3325 cfqd->busy_queues > 1) {
3326 cfq_del_timer(cfqd, cfqq);
3328 __blk_run_queue(cfqd->queue);
3335 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3342 cfq_preempt_queue(cfqd, cfqq);
3343 __blk_run_queue(cfqd->queue);
3349 struct cfq_data *cfqd = q->elevator->elevator_data;
3352 cfq_log_cfqq(cfqd, cfqq, "insert_request");
3355 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3359 &cfqd->serving_group->blkg, rq_data_dir(rq),
3361 cfq_rq_enqueued(cfqd, cfqq, rq);
3368 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3370 struct cfq_queue *cfqq = cfqd->active_queue;
3372 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3373 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3375 if (cfqd->hw_tag == 1)
3378 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3379 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3389 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3392 if (cfqd->hw_tag_samples++ < 50)
3395 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3396 cfqd->hw_tag = 1;
3398 cfqd->hw_tag = 0;
3401 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3403 struct cfq_io_context *cic = cfqd->active_cic;
3437 struct cfq_data *cfqd = cfqq->cfqd;
3442 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3445 cfq_update_hw_tag(cfqd);
3447 WARN_ON(!cfqd->rq_in_driver);
3449 cfqd->rq_in_driver--;
3456 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3460 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3461 cfqd->last_delayed_sync = now;
3468 if (cfqd->active_queue == cfqq) {
3472 cfq_set_prio_slice(cfqd, cfqq);
3480 if (cfq_should_wait_busy(cfqd, cfqq)) {
3481 unsigned long extend_sl = cfqd->cfq_slice_idle;
3482 if (!cfqd->cfq_slice_idle)
3483 extend_sl = cfqd->cfq_group_idle;
3486 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3498 cfq_slice_expired(cfqd, 1);
3500 !cfq_close_cooperator(cfqd, cfqq)) {
3501 cfqd->noidle_tree_requires_idle |=
3508 if (cfqd->serving_type == SYNC_WORKLOAD
3509 || cfqd->noidle_tree_requires_idle
3511 cfq_arm_slice_timer(cfqd);
3515 if (!cfqd->rq_in_driver)
3516 cfq_schedule_dispatch(cfqd);
3555 struct cfq_data *cfqd = q->elevator->elevator_data;
3566 cic = cfq_cic_lookup(cfqd, tsk->io_context);
3608 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3611 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3645 struct cfq_data *cfqd = q->elevator->elevator_data;
3654 cic = cfq_get_io_context(cfqd, gfp_mask);
3663 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3664 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3671 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3684 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3701 cfq_schedule_dispatch(cfqd);
3703 cfq_log(cfqd, "set_request fail");
3709 struct cfq_data *cfqd =
3711 struct request_queue *q = cfqd->queue;
3714 __blk_run_queue(cfqd->queue);
3723 struct cfq_data *cfqd = (struct cfq_data *) data;
3728 cfq_log(cfqd, "idle timer fired");
3730 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3732 cfqq = cfqd->active_queue;
3752 if (!cfqd->busy_queues)
3767 cfq_slice_expired(cfqd, timed_out);
3769 cfq_schedule_dispatch(cfqd);
3771 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3774 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3776 del_timer_sync(&cfqd->idle_slice_timer);
3777 cancel_work_sync(&cfqd->unplug_work);
3780 static void cfq_put_async_queues(struct cfq_data *cfqd)
3785 if (cfqd->async_cfqq[0][i])
3786 cfq_put_queue(cfqd->async_cfqq[0][i]);
3787 if (cfqd->async_cfqq[1][i])
3788 cfq_put_queue(cfqd->async_cfqq[1][i]);
3791 if (cfqd->async_idle_cfqq)
3792 cfq_put_queue(cfqd->async_idle_cfqq);
3802 struct cfq_data *cfqd = e->elevator_data;
3803 struct request_queue *q = cfqd->queue;
3805 cfq_shutdown_timer_wq(cfqd);
3809 if (cfqd->active_queue)
3810 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3812 while (!list_empty(&cfqd->cic_list)) {
3813 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3817 __cfq_exit_single_io_context(cfqd, cic);
3820 cfq_put_async_queues(cfqd);
3821 cfq_release_cfq_groups(cfqd);
3822 cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3826 cfq_shutdown_timer_wq(cfqd);
3829 ida_remove(&cic_index_ida, cfqd->cic_index);
3833 call_rcu(&cfqd->rcu, cfq_cfqd_free);
3856 struct cfq_data *cfqd;
3865 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3866 if (!cfqd)
3869 cfqd->cic_index = i;
3872 cfqd->grp_service_tree = CFQ_RB_ROOT;
3875 cfqg = &cfqd->root_group;
3891 (void *)cfqd, 0);
3896 * zeroed cfqd on alloc), but better be safe in case someone decides
3900 cfqd->prio_trees[i] = RB_ROOT;
3907 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3908 atomic_inc(&cfqd->oom_cfqq.ref);
3909 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3911 INIT_LIST_HEAD(&cfqd->cic_list);
3913 cfqd->queue = q;
3915 init_timer(&cfqd->idle_slice_timer);
3916 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3917 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3919 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3921 cfqd->cfq_quantum = cfq_quantum;
3922 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3923 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3924 cfqd->cfq_back_max = cfq_back_max;
3925 cfqd->cfq_back_penalty = cfq_back_penalty;
3926 cfqd->cfq_slice[0] = cfq_slice_async;
3927 cfqd->cfq_slice[1] = cfq_slice_sync;
3928 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3929 cfqd->cfq_slice_idle = cfq_slice_idle;
3930 cfqd->cfq_group_idle = cfq_group_idle;
3931 cfqd->cfq_latency = 1;
3932 cfqd->cfq_group_isolation = 0;
3933 cfqd->hw_tag = -1;
3938 cfqd->last_delayed_sync = jiffies - HZ;
3939 return cfqd;
3991 struct cfq_data *cfqd = e->elevator_data; \
3997 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3998 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3999 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4000 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4001 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4002 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4003 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4004 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4005 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4006 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4007 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4008 SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
4014 struct cfq_data *cfqd = e->elevator_data; \
4027 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4028 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4030 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4032 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4033 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4035 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4036 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4037 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4038 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4039 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4041 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4042 STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);