Lines Matching refs:rq
251 * and the cfs rq, to which they are attached, have the same position in the
346 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
348 if (___update_load_sum(now, &rq->avg_rt,
353 ___update_load_avg(&rq->avg_rt, 1);
354 trace_pelt_rt_tp(rq);
372 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
374 if (___update_load_sum(now, &rq->avg_dl,
379 ___update_load_avg(&rq->avg_dl, 1);
380 trace_pelt_dl_tp(rq);
403 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
405 if (___update_load_sum(now, &rq->avg_hw,
409 ___update_load_avg(&rq->avg_hw, 1);
410 trace_pelt_hw_tp(rq);
430 int update_irq_load_avg(struct rq *rq, u64 running)
439 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
440 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
447 * of rq clock during which the metric is updated.
450 * We can safely remove running from rq->clock because
451 * rq->clock += delta with delta >= running
453 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
457 ret += ___update_load_sum(rq->clock, &rq->avg_irq,
463 ___update_load_avg(&rq->avg_irq, 1);
464 trace_pelt_irq_tp(rq);