Searched refs:cfs_rq (Results 1 - 8 of 8) sorted by path

/linux-master/include/linux/
H A Dsched.h57 struct cfs_rq;
445 * For cfs_rq, they are the aggregated values of all runnable and blocked
460 * with the highest load (=88761), always runnable on a single cfs_rq,
559 struct cfs_rq *cfs_rq; member in struct:sched_entity
561 struct cfs_rq *my_q;
/linux-master/include/trace/events/
H A Dsched.h779 TP_PROTO(struct cfs_rq *cfs_rq),
780 TP_ARGS(cfs_rq));
811 TP_PROTO(struct cfs_rq *cfs_rq),
812 TP_ARGS(cfs_rq));
/linux-master/kernel/sched/
H A Dcore.c4540 p->se.cfs_rq = NULL;
5560 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
9949 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
10436 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
10480 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10845 * Prevent race between setting of cfs_rq->runtime_enabled and
10880 struct cfs_rq *cfs_rq = tg->cfs_rq[ local
[all...]
H A Ddebug.c629 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) argument
638 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
641 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
644 SPLIT_NS(cfs_rq->exec_clock));
647 root = __pick_root_entity(cfs_rq);
650 first = __pick_first_entity(cfs_rq);
653 last = __pick_last_entity(cfs_rq);
656 min_vruntime = cfs_rq
[all...]
H A Dfair.c115 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
116 * each time a cfs_rq requests quota.
310 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
312 struct rq *rq = rq_of(cfs_rq);
315 if (cfs_rq->on_list)
318 cfs_rq->on_list = 1;
325 * cfs_rq. Furthermore, it also means that we will always reset
329 if (cfs_rq->tg->parent &&
330 cfs_rq
378 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
460 cfs_rq_is_idle(struct cfs_rq *cfs_rq) argument
477 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
482 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
508 cfs_rq_is_idle(struct cfs_rq *cfs_rq) argument
555 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
622 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
632 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
642 avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) argument
654 avg_vruntime(struct cfs_rq *cfs_rq) argument
703 update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
727 vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime) argument
743 entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
748 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime) argument
762 update_min_vruntime(struct cfs_rq *cfs_rq) argument
824 __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
832 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
839 __pick_root_entity(struct cfs_rq *cfs_rq) argument
849 __pick_first_entity(struct cfs_rq *cfs_rq) argument
878 pick_eevdf(struct cfs_rq *cfs_rq) argument
944 __pick_last_entity(struct cfs_rq *cfs_rq) argument
978 update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1059 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
1101 update_tg_load_avg(struct cfs_rq *cfs_rq) argument
1156 update_curr(struct cfs_rq *cfs_rq) argument
1184 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1201 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1227 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1247 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
1264 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
1296 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3576 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3593 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3657 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3664 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3674 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3676 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3785 reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) argument
3841 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
3926 calc_group_shares(struct cfs_rq *cfs_rq) argument
3991 cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) argument
4038 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) argument
4052 child_cfs_rq_on_list(struct cfs_rq *cfs_rq) argument
4070 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) argument
4098 update_tg_load_avg(struct cfs_rq *cfs_rq) argument
4129 clear_tg_load_avg(struct cfs_rq *cfs_rq) argument
4163 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
4271 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) argument
4303 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) argument
4333 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) argument
4400 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) argument
4409 struct cfs_rq *cfs_rq, *gcfs_rq; local
4466 update_tg_load_avg(struct cfs_rq *cfs_rq) argument
4475 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) argument
4483 struct cfs_rq *cfs_rq; local
4578 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) argument
4649 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
4704 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
4735 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
4783 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
4796 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
4815 cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) argument
4820 cfs_rq_load_avg(struct cfs_rq *cfs_rq) argument
4847 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
4863 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
4881 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) argument
5136 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) argument
5146 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) argument
5154 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5156 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5164 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
5167 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
5170 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) argument
5177 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
5283 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
5353 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
5361 clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5370 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
5423 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5473 pick_next_entity(struct cfs_rq *cfs_rq) argument
5487 put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) argument
5510 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) argument
5620 __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, struct cfs_rq *cfs_rq, u64 target_runtime) argument
5648 assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
5660 __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) argument
5679 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) argument
5687 cfs_rq_throttled(struct cfs_rq *cfs_rq) argument
5693 throttled_hierarchy(struct cfs_rq *cfs_rq) argument
5718 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
5747 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
5763 throttle_cfs_rq(struct cfs_rq *cfs_rq) argument
5852 unthrottle_cfs_rq(struct cfs_rq *cfs_rq) argument
5978 __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) argument
5998 __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) argument
6004 unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) argument
6020 struct cfs_rq *cfs_rq, *tmp; local
6207 __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6230 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6275 check_enqueue_throttle(struct cfs_rq *cfs_rq) argument
6296 struct cfs_rq *pcfs_rq, *cfs_rq; local
6312 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6418 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6490 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
6515 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
6541 struct cfs_rq *cfs_rq = task_cfs_rq(p); local
6586 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) argument
6587 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6588 check_enqueue_throttle(struct cfs_rq *cfs_rq) argument
6590 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6592 cfs_rq_throttled(struct cfs_rq *cfs_rq) argument
6597 throttled_hierarchy(struct cfs_rq *cfs_rq) argument
6610 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6750 struct cfs_rq *cfs_rq; local
6843 struct cfs_rq *cfs_rq; local
6951 struct cfs_rq *cfs_rq; local
6974 struct cfs_rq *cfs_rq; local
7712 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; local
8353 struct cfs_rq *cfs_rq = task_cfs_rq(curr); local
8431 struct cfs_rq *cfs_rq; local
8463 struct cfs_rq *cfs_rq = &rq->cfs; local
8616 struct cfs_rq *cfs_rq; local
8630 struct cfs_rq *cfs_rq = task_cfs_rq(curr); local
9308 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) argument
9347 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) argument
9383 struct cfs_rq *cfs_rq, *pos; local
9429 update_cfs_rq_h_load(struct cfs_rq *cfs_rq) argument
9464 struct cfs_rq *cfs_rq = task_cfs_rq(p); local
9473 struct cfs_rq *cfs_rq = &rq->cfs; local
12586 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12658 struct cfs_rq *cfs_rq; local
12681 struct cfs_rq *cfs_rq; local
12706 struct cfs_rq *cfs_rq; local
12755 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12784 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12806 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12873 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12881 init_cfs_rq(struct cfs_rq *cfs_rq) argument
12928 struct cfs_rq *cfs_rq; local
13011 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) argument
13141 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
13241 struct cfs_rq *cfs_rq, *pos; local
13244 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) local
[all...]
H A Dpelt.c208 * se has been already dequeued but cfs_rq->curr still points to it.
210 * but also for a cfs_rq if the latter becomes idle. As an example,
250 * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
253 * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
306 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) argument
309 cfs_rq->curr == se)) {
320 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) argument
322 if (___update_load_sum(now, &cfs_rq
[all...]
H A Dpelt.h5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
156 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
160 if (unlikely(cfs_rq->throttle_count))
163 throttled = cfs_rq->throttled_clock_pelt_time;
165 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled);
168 /* rq->task_clock normalized against any time this cfs_rq ha
169 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
177 update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
178 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
187 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) argument
232 update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
[all...]
H A Dsched.h361 struct cfs_rq;
400 struct cfs_rq **cfs_rq; member in struct:task_group
452 * A weight of a cfs_rq is the sum of weights of which entities
453 * are queued on this cfs_rq, so a weight of a entity should not be
495 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
502 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
529 struct cfs_rq *pre
587 struct cfs_rq { struct
1208 rq_of(struct cfs_rq *cfs_rq) argument
1215 rq_of(struct cfs_rq *cfs_rq) argument
[all...]

Completed in 505 milliseconds