Lines Matching refs:rw

147 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
157 ret = tg->bps[rw][td->limit_index];
161 tg->iops[rw][td->limit_index])
167 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
168 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
171 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
172 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
177 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
187 ret = tg->iops[rw][td->limit_index];
191 tg->bps[rw][td->limit_index])
197 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
198 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
201 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
204 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
342 int rw;
356 for (rw = READ; rw <= WRITE; rw++) {
357 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
358 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
422 int rw;
424 for (rw = READ; rw <= WRITE; rw++) {
425 tg->has_rules_iops[rw] =
426 (parent_tg && parent_tg->has_rules_iops[rw]) ||
428 tg_iops_limit(tg, rw) != UINT_MAX);
429 tg->has_rules_bps[rw] =
430 (parent_tg && parent_tg->has_rules_bps[rw]) ||
432 (tg_bps_limit(tg, rw) != U64_MAX));
634 bool rw, unsigned long start)
636 tg->bytes_disp[rw] = 0;
637 tg->io_disp[rw] = 0;
638 tg->carryover_bytes[rw] = 0;
639 tg->carryover_ios[rw] = 0;
647 if (time_after(start, tg->slice_start[rw]))
648 tg->slice_start[rw] = start;
650 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
653 rw == READ ? 'R' : 'W', tg->slice_start[rw],
654 tg->slice_end[rw], jiffies);
657 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
660 tg->bytes_disp[rw] = 0;
661 tg->io_disp[rw] = 0;
662 tg->slice_start[rw] = jiffies;
663 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
665 tg->carryover_bytes[rw] = 0;
666 tg->carryover_ios[rw] = 0;
671 rw == READ ? 'R' : 'W', tg->slice_start[rw],
672 tg->slice_end[rw], jiffies);
675 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
678 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
681 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
684 throtl_set_slice_end(tg, rw, jiffy_end);
687 rw == READ ? 'R' : 'W', tg->slice_start[rw],
688 tg->slice_end[rw], jiffies);
692 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
694 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
736 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
742 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
749 if (throtl_slice_used(tg, rw))
760 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
762 time_elapsed = rounddown(jiffies - tg->slice_start[rw],
767 bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
769 tg->carryover_bytes[rw];
770 io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
771 tg->carryover_ios[rw];
775 tg->carryover_bytes[rw] = 0;
776 if ((long long)tg->bytes_disp[rw] >= bytes_trim)
777 tg->bytes_disp[rw] -= bytes_trim;
779 tg->bytes_disp[rw] = 0;
781 tg->carryover_ios[rw] = 0;
782 if ((int)tg->io_disp[rw] >= io_trim)
783 tg->io_disp[rw] -= io_trim;
785 tg->io_disp[rw] = 0;
787 tg->slice_start[rw] += time_elapsed;
791 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
792 bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
796 static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
798 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
799 u64 bps_limit = tg_bps_limit(tg, rw);
800 u32 iops_limit = tg_iops_limit(tg, rw);
809 tg->carryover_bytes[rw] +=
811 tg->bytes_disp[rw];
813 tg->carryover_ios[rw] +=
815 tg->io_disp[rw];
834 bool rw = bio_data_dir(bio);
842 jiffy_elapsed = jiffies - tg->slice_start[rw];
847 tg->carryover_ios[rw];
848 if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
859 bool rw = bio_data_dir(bio);
870 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
878 tg->carryover_bytes[rw];
879 if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
883 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
904 bool rw = bio_data_dir(bio);
906 u64 bps_limit = tg_bps_limit(tg, rw);
907 u32 iops_limit = tg_iops_limit(tg, rw);
915 BUG_ON(tg->service_queue.nr_queued[rw] &&
916 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
933 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
934 throtl_start_new_slice(tg, rw, true);
936 if (time_before(tg->slice_end[rw],
938 throtl_extend_slice(tg, rw,
955 if (time_before(tg->slice_end[rw], jiffies + max_wait))
956 throtl_extend_slice(tg, rw, jiffies + max_wait);
963 bool rw = bio_data_dir(bio);
968 tg->bytes_disp[rw] += bio_size;
969 tg->last_bytes_disp[rw] += bio_size;
972 tg->io_disp[rw]++;
973 tg->last_io_disp[rw]++;
989 bool rw = bio_data_dir(bio);
992 qn = &tg->qnode_on_self[rw];
1000 if (!sq->nr_queued[rw])
1003 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1005 sq->nr_queued[rw]++;
1036 struct throtl_grp *parent_tg, bool rw)
1038 if (throtl_slice_used(parent_tg, rw)) {
1039 throtl_start_new_slice_with_credit(parent_tg, rw,
1040 child_tg->slice_start[rw]);
1045 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1059 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1060 sq->nr_queued[rw]--;
1072 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1073 start_parent_slice_with_credit(tg, parent_tg, rw);
1076 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1077 &parent_sq->queued[rw]);
1078 BUG_ON(tg->td->nr_queued[rw] <= 0);
1079 tg->td->nr_queued[rw]--;
1082 throtl_trim_slice(tg, rw);
1258 int rw;
1263 for (rw = READ; rw <= WRITE; rw++)
1264 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1829 static bool throtl_low_limit_reached(struct throtl_grp *tg, int rw)
1832 bool limit = tg->bps[rw][LIMIT_LOW] || tg->iops[rw][LIMIT_LOW];
1840 return !limit || sq->nr_queued[rw];
2075 int i, cpu, rw;
2086 for (rw = READ; rw <= WRITE; rw++) {
2088 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2094 bucket = per_cpu_ptr(td->latency_buckets[rw],
2105 latency[rw] = tmp->total_latency;
2109 latency[rw] /= samples;
2110 if (latency[rw] == 0)
2112 avg_latency[rw][i].latency = latency[rw];
2117 for (rw = READ; rw <= WRITE; rw++) {
2119 if (!avg_latency[rw][i].latency) {
2120 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2121 td->avg_buckets[rw][i].latency =
2122 last_latency[rw];
2126 if (!td->avg_buckets[rw][i].valid)
2127 latency[rw] = avg_latency[rw][i].latency;
2129 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2130 avg_latency[rw][i].latency) >> 3;
2132 td->avg_buckets[rw][i].latency = max(latency[rw],
2133 last_latency[rw]);
2134 td->avg_buckets[rw][i].valid = true;
2135 last_latency[rw] = td->avg_buckets[rw][i].latency;
2183 bool rw = bio_data_dir(bio);
2199 if (tg->last_low_overflow_time[rw] == 0)
2200 tg->last_low_overflow_time[rw] = jiffies;
2204 if (sq->nr_queued[rw])
2209 tg->last_low_overflow_time[rw] = jiffies;
2231 throtl_trim_slice(tg, rw);
2238 qn = &tg->qnode_on_parent[rw];
2249 rw == READ ? 'R' : 'W',
2250 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2251 tg_bps_limit(tg, rw),
2252 tg->io_disp[rw], tg_iops_limit(tg, rw),
2255 tg->last_low_overflow_time[rw] = jiffies;
2257 td->nr_queued[rw]++;
2287 const bool rw = op_is_write(op);
2298 latency = get_cpu_ptr(td->latency_buckets[rw]);
2301 put_cpu_ptr(td->latency_buckets[rw]);
2321 int rw = bio_data_dir(bio);
2349 threshold = tg->td->avg_buckets[rw][bucket].latency +