• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/block/

Lines Matching refs:cfqg

149 	struct cfq_group *cfqg;
300 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
304 if (!cfqg)
308 return &cfqg->service_tree_idle;
310 return &cfqg->service_trees[prio][type];
362 blkg_path(&(cfqq)->cfqg->blkg), ##args);
364 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
366 blkg_path(&(cfqg)->blkg), ##args); \
371 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0);
377 #define for_each_cfqg_st(cfqg, i, j, st) \
379 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
380 : &cfqg->service_tree_idle; \
384 &cfqg->service_trees[i][j]: NULL) \
423 struct cfq_group *cfqg)
426 return cfqg->service_tree_idle.count;
428 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
429 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
430 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
434 struct cfq_group *cfqg)
436 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
437 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
525 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
530 do_div(d, cfqg->weight);
555 struct cfq_group *cfqg;
558 cfqg = rb_entry_cfqg(st->active);
559 vdisktime = cfqg->vdisktime;
563 cfqg = rb_entry_cfqg(st->left);
564 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
577 struct cfq_group *cfqg, bool rt)
582 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
584 min_q = min(cfqg->busy_queues_avg[rt], busy);
585 max_q = max(cfqg->busy_queues_avg[rt], busy);
586 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
588 return cfqg->busy_queues_avg[rt];
592 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
596 return cfq_target_latency * cfqg->weight / st->total_weight;
608 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
612 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
817 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
822 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
824 return cfqg->vdisktime - st->min_vdisktime;
828 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
833 s64 key = cfqg_key(st, cfqg);
849 st->left = &cfqg->rb_node;
851 rb_link_node(&cfqg->rb_node, parent, node);
852 rb_insert_color(&cfqg->rb_node, &st->rb);
856 cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
862 cfqg->nr_cfqq++;
863 if (cfqg->on_st)
874 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
876 cfqg->vdisktime = st->min_vdisktime;
878 __cfq_group_service_tree_add(st, cfqg);
879 cfqg->on_st = true;
880 st->total_weight += cfqg->weight;
884 cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
888 if (st->active == &cfqg->rb_node)
891 BUG_ON(cfqg->nr_cfqq < 1);
892 cfqg->nr_cfqq--;
895 if (cfqg->nr_cfqq)
898 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
899 cfqg->on_st = false;
900 st->total_weight -= cfqg->weight;
901 if (!RB_EMPTY_NODE(&cfqg->rb_node))
902 cfq_rb_erase(&cfqg->rb_node, st);
903 cfqg->saved_workload_slice = 0;
904 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
933 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
938 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
939 - cfqg->service_tree_idle.count;
950 cfq_rb_erase(&cfqg->rb_node, st);
951 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
952 __cfq_group_service_tree_add(st, cfqg);
956 cfqg->saved_workload_slice = cfqd->workload_expires
958 cfqg->saved_workload = cfqd->serving_type;
959 cfqg->saved_serving_prio = cfqd->serving_prio;
961 cfqg->saved_workload_slice = 0;
963 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
968 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
969 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
990 struct cfq_group *cfqg = NULL;
997 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
998 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1000 cfqg->blkg.dev = MKDEV(major, minor);
1003 if (cfqg || !create)
1006 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1007 if (!cfqg)
1010 for_each_cfqg_st(cfqg, i, j, st)
1012 RB_CLEAR_NODE(&cfqg->rb_node);
1020 atomic_set(&cfqg->ref, 1);
1030 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1033 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1036 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1039 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1042 return cfqg;
1052 struct cfq_group *cfqg = NULL;
1056 cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
1057 if (!cfqg && create)
1058 cfqg = &cfqd->root_group;
1060 return cfqg;
1063 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1065 atomic_inc(&cfqg->ref);
1066 return cfqg;
1069 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1073 cfqg = &cfqq->cfqd->root_group;
1075 cfqq->cfqg = cfqg;
1076 /* cfqq reference on cfqg */
1077 atomic_inc(&cfqq->cfqg->ref);
1080 static void cfq_put_cfqg(struct cfq_group *cfqg)
1085 BUG_ON(atomic_read(&cfqg->ref) <= 0);
1086 if (!atomic_dec_and_test(&cfqg->ref))
1088 for_each_cfqg_st(cfqg, i, j, st)
1090 kfree(cfqg);
1093 static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1096 BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1098 hlist_del_init(&cfqg->cfqd_node);
1104 cfq_put_cfqg(cfqg);
1110 struct cfq_group *cfqg;
1112 hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1116 * cfqg also.
1118 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1119 cfq_destroy_cfqg(cfqd, cfqg);
1153 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1155 return cfqg;
1159 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1160 cfqq->cfqg = cfqg;
1164 static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1187 && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1191 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1192 cfqq->orig_cfqg = cfqq->cfqg;
1193 cfqq->cfqg = &cfqd->root_group;
1199 BUG_ON(cfqq->cfqg != &cfqd->root_group);
1201 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1202 cfq_put_cfqg(cfqq->cfqg);
1203 cfqq->cfqg = cfqq->orig_cfqg;
1210 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1281 cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1392 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1610 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1619 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1671 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1681 if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1721 struct cfq_group *cfqg;
1729 cfqg = cfq_get_next_cfqg(cfqd);
1730 if (!cfqg)
1733 for_each_cfqg_st(cfqg, i, j, st)
1833 if (cur_cfqq->cfqg->nr_cfqq == 1)
1846 if (cur_cfqq->cfqg != cfqq->cfqg)
1955 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1966 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1989 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
2082 struct cfq_group *cfqg, enum wl_prio_t prio)
2092 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2104 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2111 if (!cfqg) {
2118 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2120 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2133 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2144 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2145 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2153 group_slice = cfq_group_slice(cfqd, cfqg);
2156 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2157 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2169 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2189 struct cfq_group *cfqg;
2193 cfqg = cfq_rb_first_group(st);
2194 st->active = &cfqg->rb_node;
2196 return cfqg;
2201 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2203 cfqd->serving_group = cfqg;
2206 if (cfqg->saved_workload_slice) {
2207 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2208 cfqd->serving_type = cfqg->saved_workload;
2209 cfqd->serving_prio = cfqg->saved_serving_prio;
2213 choose_service_tree(cfqd, cfqg);
2250 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2298 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2299 && cfqq->cfqg->dispatched) {
2531 struct cfq_group *cfqg, *orig_cfqg;
2541 cfqg = cfqq->cfqg;
2551 cfq_put_cfqg(cfqg);
2892 struct cfq_group *cfqg;
2895 cfqg = cfq_get_cfqg(cfqd, 1);
2926 cfq_link_cfqq_cfqg(cfqq, cfqg);
3232 if (new_cfqq->cfqg != cfqq->cfqg)
3331 &cfqq->cfqg->blkg);
3410 if (cfqq->cfqg->nr_cfqq > 1)
3452 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3510 || cfqq->cfqg->nr_cfqq == 1)
3599 /* Put down rq reference on cfqg */
3694 rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
3832 /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3858 struct cfq_group *cfqg;
3875 cfqg = &cfqd->root_group;
3876 for_each_cfqg_st(cfqg, i, j, st)
3878 RB_CLEAR_NODE(&cfqg->rb_node);
3881 cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3888 atomic_set(&cfqg->ref, 1);
3890 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,