Lines Matching defs:sched_domain

212 				  unsigned int sched_domain, unsigned int type)
214 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
215 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
227 unsigned int sched_domain, unsigned int type,
230 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
243 if (!kqd->latency_timeout[sched_domain])
244 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
246 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
249 kqd->latency_timeout[sched_domain] = 0;
257 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
259 trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
267 unsigned int sched_domain, unsigned int depth)
269 depth = clamp(depth, 1U, kyber_depth[sched_domain]);
270 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
271 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
272 trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
280 unsigned int sched_domain;
289 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
290 flush_latency_buckets(kqd, cpu_latency, sched_domain,
292 flush_latency_buckets(kqd, cpu_latency, sched_domain,
302 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
305 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
316 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
320 p99 = calculate_percentile(kqd, sched_domain,
332 p99 = kqd->domain_p99[sched_domain];
333 kqd->domain_p99[sched_domain] = -1;
335 kqd->domain_p99[sched_domain] = p99;
350 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
352 kyber_resize_domain(kqd, sched_domain, depth);
543 unsigned int sched_domain;
548 sched_domain = kyber_sched_domain(rq->cmd_flags);
549 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
574 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
575 struct list_head *rq_list = &kcq->rq_list[sched_domain];
598 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
600 struct list_head *head = &kcq->rq_list[sched_domain];
608 sbitmap_set_bit(&khd->kcq_map[sched_domain],
622 unsigned int sched_domain, unsigned int type,
636 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
643 unsigned int sched_domain;
646 sched_domain = kyber_sched_domain(rq->cmd_flags);
647 if (sched_domain == KYBER_OTHER)
651 target = kqd->latency_targets[sched_domain];
652 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
654 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
663 unsigned int sched_domain;
673 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
682 unsigned int sched_domain,
687 .sched_domain = sched_domain,
691 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
710 unsigned int sched_domain = khd->cur_domain;
711 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
712 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
725 &khd->wait_index[sched_domain]);
726 khd->domain_ws[sched_domain] = ws;
744 ws = khd->domain_ws[sched_domain];