• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching defs:rt_rq

18 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
20 return rt_rq->rq;
23 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
25 return rt_se->rt_rq;
37 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
39 return container_of(rt_rq, struct rq, rt);
42 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
86 static void update_rt_migration(struct rt_rq *rt_rq)
88 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
93 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
99 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
106 rt_rq->rt_nr_total++;
108 rt_rq->rt_nr_migratory++;
110 update_rt_migration(rt_rq);
113 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
120 rt_rq->rt_nr_total--;
122 rt_rq->rt_nr_migratory--;
124 update_rt_migration(rt_rq);
155 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
160 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
173 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
175 if (!rt_rq->tg)
178 return rt_rq->rt_runtime;
181 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
183 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
186 #define for_each_leaf_rt_rq(rt_rq, rq) \
187 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
192 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
200 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
203 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
206 rt_se = rt_rq->tg->rt_se[this_cpu];
208 if (rt_rq->rt_nr_running) {
211 if (rt_rq->highest_prio.curr < curr->prio)
216 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
221 rt_se = rt_rq->tg->rt_se[this_cpu];
227 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
229 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
234 struct rt_rq *rt_rq = group_rt_rq(rt_se);
237 if (rt_rq)
238 return !!rt_rq->rt_nr_boosted;
257 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
259 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
262 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
264 return &rt_rq->tg->rt_bandwidth;
269 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
271 return rt_rq->rt_runtime;
274 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
279 #define for_each_leaf_rt_rq(rt_rq, rq) \
280 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
285 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
290 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
292 if (rt_rq->rt_nr_running)
293 resched_task(rq_of_rt_rq(rt_rq)->curr);
296 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
300 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
302 return rt_rq->rt_throttled;
311 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
316 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
327 static int do_balance_runtime(struct rt_rq *rt_rq)
329 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
339 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
342 if (iter == rt_rq)
361 if (rt_rq->rt_runtime + diff > rt_period)
362 diff = rt_period - rt_rq->rt_runtime;
364 rt_rq->rt_runtime += diff;
366 if (rt_rq->rt_runtime == rt_period) {
385 struct rt_rq *rt_rq;
390 for_each_leaf_rt_rq(rt_rq, rq) {
391 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
396 raw_spin_lock(&rt_rq->rt_runtime_lock);
402 if (rt_rq->rt_runtime == RUNTIME_INF ||
403 rt_rq->rt_runtime == rt_b->rt_runtime)
405 raw_spin_unlock(&rt_rq->rt_runtime_lock);
412 want = rt_b->rt_runtime - rt_rq->rt_runtime;
418 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
424 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
442 raw_spin_lock(&rt_rq->rt_runtime_lock);
453 rt_rq->rt_runtime = RUNTIME_INF;
454 raw_spin_unlock(&rt_rq->rt_runtime_lock);
470 struct rt_rq *rt_rq;
478 for_each_leaf_rt_rq(rt_rq, rq) {
479 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
482 raw_spin_lock(&rt_rq->rt_runtime_lock);
483 rt_rq->rt_runtime = rt_b->rt_runtime;
484 rt_rq->rt_time = 0;
485 rt_rq->rt_throttled = 0;
486 raw_spin_unlock(&rt_rq->rt_runtime_lock);
500 static int balance_runtime(struct rt_rq *rt_rq)
504 if (rt_rq->rt_time > rt_rq->rt_runtime) {
505 raw_spin_unlock(&rt_rq->rt_runtime_lock);
506 more = do_balance_runtime(rt_rq);
507 raw_spin_lock(&rt_rq->rt_runtime_lock);
513 static inline int balance_runtime(struct rt_rq *rt_rq)
530 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
531 struct rq *rq = rq_of_rt_rq(rt_rq);
534 if (rt_rq->rt_time) {
537 raw_spin_lock(&rt_rq->rt_runtime_lock);
538 if (rt_rq->rt_throttled)
539 balance_runtime(rt_rq);
540 runtime = rt_rq->rt_runtime;
541 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
542 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
543 rt_rq->rt_throttled = 0;
546 if (rt_rq->rt_time || rt_rq->rt_nr_running)
548 raw_spin_unlock(&rt_rq->rt_runtime_lock);
549 } else if (rt_rq->rt_nr_running)
553 sched_rt_rq_enqueue(rt_rq);
563 struct rt_rq *rt_rq = group_rt_rq(rt_se);
565 if (rt_rq)
566 return rt_rq->highest_prio.curr;
572 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
574 u64 runtime = sched_rt_runtime(rt_rq);
576 if (rt_rq->rt_throttled)
577 return rt_rq_throttled(rt_rq);
579 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
582 balance_runtime(rt_rq);
583 runtime = sched_rt_runtime(rt_rq);
587 if (rt_rq->rt_time > runtime) {
588 rt_rq->rt_throttled = 1;
589 if (rt_rq_throttled(rt_rq)) {
590 sched_rt_rq_dequeue(rt_rq);
606 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
630 rt_rq = rt_rq_of_se(rt_se);
632 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
633 raw_spin_lock(&rt_rq->rt_runtime_lock);
634 rt_rq->rt_time += delta_exec;
635 if (sched_rt_runtime_exceeded(rt_rq))
637 raw_spin_unlock(&rt_rq->rt_runtime_lock);
657 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
659 struct rq *rq = rq_of_rt_rq(rt_rq);
668 rt_rq->highest_prio.next = prev_prio;
673 } else if (prio == rt_rq->highest_prio.curr)
679 rt_rq->highest_prio.next = prio;
680 else if (prio < rt_rq->highest_prio.next)
684 rt_rq->highest_prio.next = next_prio(rq);
688 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
690 struct rq *rq = rq_of_rt_rq(rt_rq);
692 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
693 rt_rq->highest_prio.next = next_prio(rq);
695 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
696 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
702 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
704 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
710 inc_rt_prio(struct rt_rq *rt_rq, int prio)
712 int prev_prio = rt_rq->highest_prio.curr;
715 rt_rq->highest_prio.curr = prio;
717 inc_rt_prio_smp(rt_rq, prio, prev_prio);
721 dec_rt_prio(struct rt_rq *rt_rq, int prio)
723 int prev_prio = rt_rq->highest_prio.curr;
725 if (rt_rq->rt_nr_running) {
734 struct rt_prio_array *array = &rt_rq->active;
736 rt_rq->highest_prio.curr =
741 rt_rq->highest_prio.curr = MAX_RT_PRIO;
743 dec_rt_prio_smp(rt_rq, prio, prev_prio);
748 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
749 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
756 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
759 rt_rq->rt_nr_boosted++;
761 if (rt_rq->tg)
762 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
766 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
769 rt_rq->rt_nr_boosted--;
771 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
777 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
783 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
788 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
793 rt_rq->rt_nr_running++;
795 inc_rt_prio(rt_rq, prio);
796 inc_rt_migration(rt_se, rt_rq);
797 inc_rt_group(rt_se, rt_rq);
801 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
804 WARN_ON(!rt_rq->rt_nr_running);
805 rt_rq->rt_nr_running--;
807 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
808 dec_rt_migration(rt_se, rt_rq);
809 dec_rt_group(rt_se, rt_rq);
814 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
815 struct rt_prio_array *array = &rt_rq->active;
816 struct rt_rq *group_rq = group_rt_rq(rt_se);
834 inc_rt_tasks(rt_se, rt_rq);
839 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
840 struct rt_prio_array *array = &rt_rq->active;
846 dec_rt_tasks(rt_se, rt_rq);
880 struct rt_rq *rt_rq = group_rt_rq(rt_se);
882 if (rt_rq && rt_rq->rt_nr_running)
919 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
922 struct rt_prio_array *array = &rt_rq->active;
935 struct rt_rq *rt_rq;
938 rt_rq = rt_rq_of_se(rt_se);
939 requeue_rt_entity(rt_rq, rt_se, head);
1040 struct rt_rq *rt_rq)
1042 struct rt_prio_array *array = &rt_rq->active;
1060 struct rt_rq *rt_rq;
1062 rt_rq = &rq->rt;
1064 if (unlikely(!rt_rq->rt_nr_running))
1067 if (rt_rq_throttled(rt_rq))
1071 rt_se = pick_next_rt_entity(rq, rt_rq);
1073 rt_rq = group_rt_rq(rt_se);
1074 } while (rt_rq);
1136 struct rt_rq *rt_rq;
1139 for_each_leaf_rt_rq(rt_rq, rq) {
1140 array = &rt_rq->active;
1762 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1766 struct rt_rq *rt_rq;
1769 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1770 print_rt_rq(m, cpu, rt_rq);