Lines Matching defs:dl

64 	return container_of(dl_se, struct task_struct, dl);
69 return container_of(dl_rq, struct rq, dl);
84 return &rq_of_dl_se(dl_se)->dl;
191 rq->dl.extra_bw += bw;
197 return &cpu_rq(i)->dl.dl_bw;
218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
220 dl->extra_bw += bw;
327 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
333 if (p->dl.dl_non_contending) {
334 sub_running_bw(&p->dl, &rq->dl);
335 p->dl.dl_non_contending = 0;
343 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
346 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
347 __add_rq_bw(new_bw, &rq->dl);
410 struct dl_rq *dl_rq = &rq->dl;
452 sub_rq_bw(dl_se, &rq->dl);
582 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
587 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
601 &rq->dl.pushable_dl_tasks_root,
604 rq->dl.earliest_dl.next = p->dl.deadline;
606 if (!rq->dl.overloaded) {
608 rq->dl.overloaded = 1;
614 struct dl_rq *dl_rq = &rq->dl;
623 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
627 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
629 rq->dl.overloaded = 0;
693 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
700 sub_running_bw(&p->dl, &rq->dl);
701 sub_rq_bw(&p->dl, &rq->dl);
703 add_rq_bw(&p->dl, &later_rq->dl);
704 add_running_bw(&p->dl, &later_rq->dl);
706 sub_rq_bw(&p->dl, &rq->dl);
707 add_rq_bw(&p->dl, &later_rq->dl);
717 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
722 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
838 * This could be the case for a !-dl task that is boosted.
1294 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1296 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1297 * is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1305 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1313 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1316 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1318 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1369 update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1380 if (!is_leftmost(dl_se, &rq->dl))
1445 struct sched_dl_entity *dl_se = &curr->dl;
1490 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1491 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1496 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1507 sub_running_bw(dl_se, &rq->dl);
1789 if (is_dl_boosted(&p->dl)) {
1802 if (p->dl.dl_throttled) {
1808 hrtimer_try_to_cancel(&p->dl.dl_timer);
1809 p->dl.dl_throttled = 0;
1821 p->dl.dl_throttled = 0;
1830 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1835 enqueue_dl_entity(&p->dl, flags);
1837 if (dl_server(&p->dl))
1840 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
1851 dequeue_dl_entity(&p->dl, flags);
1852 if (!p->dl.dl_throttled && !dl_server(&p->dl))
1874 rq->curr->dl.dl_yielded = 1;
1891 return (!rq->dl.dl_nr_running ||
1892 dl_time_before(p->dl.deadline,
1893 rq->dl.earliest_dl.curr));
1924 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1962 if (p->dl.dl_non_contending) {
1964 sub_running_bw(&p->dl, &rq->dl);
1965 p->dl.dl_non_contending = 0;
1973 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1976 sub_rq_bw(&p->dl, &rq->dl);
2003 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2026 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
2036 if ((p->dl.deadline == rq->curr->dl.deadline) &&
2055 struct sched_dl_entity *dl_se = &p->dl;
2056 struct dl_rq *dl_rq = &rq->dl;
2059 if (on_dl_rq(&p->dl))
2087 struct dl_rq *dl_rq = &rq->dl;
2125 start_hrtick_dl(rq, &p->dl);
2132 struct sched_dl_entity *dl_se = &p->dl;
2133 struct dl_rq *dl_rq = &rq->dl;
2135 if (on_dl_rq(&p->dl))
2141 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2163 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2164 is_leftmost(&p->dl, &rq->dl))
2165 start_hrtick_dl(rq, &p->dl);
2201 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2368 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2402 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2494 if (this_rq->dl.dl_nr_running &&
2495 dl_time_before(this_rq->dl.earliest_dl.curr,
2496 src_rq->dl.earliest_dl.next))
2507 if (src_rq->dl.dl_nr_running <= 1)
2517 if (p && dl_time_before(p->dl.deadline, dmin) &&
2526 if (dl_time_before(p->dl.deadline,
2527 src_rq->curr->dl.deadline))
2536 dmin = p->dl.deadline;
2570 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2601 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2611 if (rq->dl.overloaded)
2615 if (rq->dl.dl_nr_running > 0)
2616 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2622 if (rq->dl.overloaded)
2655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2677 * time is in the future). If the task switches back to dl before
2683 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2684 task_non_contending(&p->dl);
2699 if (p->dl.dl_non_contending)
2700 sub_running_bw(&p->dl, &rq->dl);
2701 sub_rq_bw(&p->dl, &rq->dl);
2709 if (p->dl.dl_non_contending)
2710 p->dl.dl_non_contending = 0;
2717 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2729 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2740 add_rq_bw(&p->dl, &rq->dl);
2747 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2776 if (!rq->dl.overloaded)
2785 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2795 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
2810 return p->dl.dl_throttled;
2814 DEFINE_SCHED_CLASS(dl) = {
2932 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2958 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2972 if (hrtimer_active(&p->dl.inactive_timer))
2973 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2977 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2985 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3012 struct sched_dl_entity *dl_se = &p->dl;
3024 struct sched_dl_entity *dl_se = &p->dl;
3048 /* special dl tasks don't actually use any parameter */
3122 struct sched_dl_entity *dl_se = &p->dl;
3212 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);