Lines Matching defs:rt_b

11 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
71 struct rt_bandwidth *rt_b =
76 raw_spin_lock(&rt_b->rt_runtime_lock);
78 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
82 raw_spin_unlock(&rt_b->rt_runtime_lock);
83 idle = do_sched_rt_period_timer(rt_b, overrun);
84 raw_spin_lock(&rt_b->rt_runtime_lock);
87 rt_b->rt_period_active = 0;
88 raw_spin_unlock(&rt_b->rt_runtime_lock);
93 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
95 rt_b->rt_period = ns_to_ktime(period);
96 rt_b->rt_runtime = runtime;
98 raw_spin_lock_init(&rt_b->rt_runtime_lock);
100 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
102 rt_b->rt_period_timer.function = sched_rt_period_timer;
105 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
107 raw_spin_lock(&rt_b->rt_runtime_lock);
108 if (!rt_b->rt_period_active) {
109 rt_b->rt_period_active = 1;
118 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
119 hrtimer_start_expires(&rt_b->rt_period_timer,
122 raw_spin_unlock(&rt_b->rt_runtime_lock);
125 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
127 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
130 do_start_rt_bandwidth(rt_b);
162 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
164 hrtimer_cancel(&rt_b->rt_period_timer);
597 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
599 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
659 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
673 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
675 return (hrtimer_active(&rt_b->rt_period_timer) ||
676 rt_rq->rt_time < rt_b->rt_runtime);
685 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
692 raw_spin_lock(&rt_b->rt_runtime_lock);
693 rt_period = ktime_to_ns(rt_b->rt_period);
695 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
729 raw_spin_unlock(&rt_b->rt_runtime_lock);
745 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
749 raw_spin_lock(&rt_b->rt_runtime_lock);
757 rt_rq->rt_runtime == rt_b->rt_runtime)
766 want = rt_b->rt_runtime - rt_rq->rt_runtime;
772 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
810 raw_spin_unlock(&rt_b->rt_runtime_lock);
829 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
831 raw_spin_lock(&rt_b->rt_runtime_lock);
833 rt_rq->rt_runtime = rt_b->rt_runtime;
837 raw_spin_unlock(&rt_b->rt_runtime_lock);
856 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
872 if (rt_b == &root_task_group.rt_bandwidth)
877 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
888 rt_rq->rt_runtime = rt_b->rt_runtime;
935 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
969 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
975 if (likely(rt_b->rt_runtime)) {