Lines Matching refs:rt

16  * period over which we measure -rt task CPU usage in us.
22 * part of the period that we allow rt tasks to run in us.
174 return container_of(rt_se, struct task_struct, rt);
234 rt_se->rt_rq = &rq->rt;
289 return container_of(rt_se, struct task_struct, rt);
294 return container_of(rt_rq, struct rq, rt);
308 return &rq->rt;
326 return rq->online && rq->rt.highest_prio.curr > prev->prio;
365 return !plist_head_empty(&rq->rt.pushable_tasks);
389 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
391 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
394 if (p->prio < rq->rt.highest_prio.next)
395 rq->rt.highest_prio.next = p->prio;
397 if (!rq->rt.overloaded) {
399 rq->rt.overloaded = 1;
405 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
409 p = plist_first_entry(&rq->rt.pushable_tasks,
411 rq->rt.highest_prio.next = p->prio;
413 rq->rt.highest_prio.next = MAX_RT_PRIO-1;
415 if (rq->rt.overloaded) {
417 rq->rt.overloaded = 0;
622 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
661 return &cpu_rq(cpu)->rt;
910 * When we're idle and a woken (rt) task is
1003 struct sched_rt_entity *rt_se = &curr->rt;
1038 BUG_ON(&rq->rt != rt_rq);
1055 BUG_ON(&rq->rt != rt_rq);
1083 if (&rq->rt != rt_rq)
1099 if (&rq->rt != rt_rq)
1272 /* schedstats is not supported for rt group. */
1455 enqueue_top_rt_rq(&rq->rt);
1472 enqueue_top_rt_rq(&rq->rt);
1481 struct sched_rt_entity *rt_se = &p->rt;
1497 struct sched_rt_entity *rt_se = &p->rt;
1525 struct sched_rt_entity *rt_se = &p->rt;
1603 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1643 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1689 struct sched_rt_entity *rt_se = &p->rt;
1690 struct rt_rq *rt_rq = &rq->rt;
1693 if (on_rt_rq(&p->rt))
1703 * If prev task was rt, put_prev_task() has already updated the
1705 * rt task
1734 struct rt_rq *rt_rq = &rq->rt;
1770 struct sched_rt_entity *rt_se = &p->rt;
1771 struct rt_rq *rt_rq = &rq->rt;
1773 if (on_rt_rq(&p->rt))
1784 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1808 struct plist_head *head = &rq->rt.pushable_tasks;
1930 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1965 if (lowest_rq->rt.highest_prio.curr > task->prio)
1983 p = plist_first_entry(&rq->rt.pushable_tasks,
2007 if (!rq->rt.overloaded)
2284 /* Pass the IPI to the next rt overloaded queue */
2341 if (src_rq->rt.highest_prio.next >=
2342 this_rq->rt.highest_prio.curr)
2363 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2430 if (rq->rt.overloaded)
2435 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2441 if (rq->rt.overloaded)
2450 * When switch from the rt queue, we bring ourselves to a position
2462 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2502 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2533 if (p->prio > rq->rt.highest_prio.curr)
2563 if (p->rt.watchdog_stamp != jiffies) {
2564 p->rt.timeout++;
2565 p->rt.watchdog_stamp = jiffies;
2569 if (p->rt.timeout > next) {
2589 struct sched_rt_entity *rt_se = &p->rt;
2603 if (--p->rt.time_slice)
2606 p->rt.time_slice = sched_rr_timeslice;
2640 rt_rq = &cpu_rq(cpu)->rt;
2647 DEFINE_SCHED_CLASS(rt) = {
2920 struct rt_rq *rt_rq = &cpu_rq(i)->rt;