Searched refs:cfs_rq (Results 1 - 8 of 8) sorted by relevance

/linux-master/kernel/sched/
H A Dpelt.h5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
156 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
160 if (unlikely(cfs_rq->throttle_count))
163 throttled = cfs_rq->throttled_clock_pelt_time;
165 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled);
168 /* rq->task_clock normalized against any time this cfs_rq ha
169 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
177 update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
178 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
187 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) argument
232 update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) argument
[all...]
H A Dfair.c121 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
122 * each time a cfs_rq requests quota.
316 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
318 struct rq *rq = rq_of(cfs_rq);
321 if (cfs_rq->on_list)
324 cfs_rq->on_list = 1;
331 * cfs_rq. Furthermore, it also means that we will always reset
335 if (cfs_rq->tg->parent &&
336 cfs_rq
384 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
466 cfs_rq_is_idle(struct cfs_rq *cfs_rq) argument
483 list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
488 list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) argument
514 cfs_rq_is_idle(struct cfs_rq *cfs_rq) argument
561 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
628 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
638 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
648 avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) argument
660 avg_vruntime(struct cfs_rq *cfs_rq) argument
709 update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
733 vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime) argument
749 entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
754 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime) argument
768 update_min_vruntime(struct cfs_rq *cfs_rq) argument
830 __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
838 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
845 __pick_root_entity(struct cfs_rq *cfs_rq) argument
855 __pick_first_entity(struct cfs_rq *cfs_rq) argument
884 pick_eevdf(struct cfs_rq *cfs_rq) argument
950 __pick_last_entity(struct cfs_rq *cfs_rq) argument
984 update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1065 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
1107 update_tg_load_avg(struct cfs_rq *cfs_rq) argument
1162 update_curr(struct cfs_rq *cfs_rq) argument
1190 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1207 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1233 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
1253 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
1270 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
1302 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3582 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3599 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3663 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3670 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3680 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3682 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
3791 reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) argument
3847 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
3932 calc_group_shares(struct cfs_rq *cfs_rq) argument
3997 cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) argument
4044 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) argument
4058 child_cfs_rq_on_list(struct cfs_rq *cfs_rq) argument
4076 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) argument
4104 update_tg_load_avg(struct cfs_rq *cfs_rq) argument
4135 clear_tg_load_avg(struct cfs_rq *cfs_rq) argument
4169 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
4277 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) argument
4309 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) argument
4339 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) argument
4406 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) argument
4415 struct cfs_rq *cfs_rq, *gcfs_rq; local
4472 update_tg_load_avg(struct cfs_rq *cfs_rq) argument
4481 add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) argument
4489 struct cfs_rq *cfs_rq; local
4584 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) argument
4655 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
4710 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
4741 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
4789 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
4802 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
4821 cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) argument
4826 cfs_rq_load_avg(struct cfs_rq *cfs_rq) argument
4853 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
4869 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
4887 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) argument
5129 cfs_rq_is_decayed(struct cfs_rq *cfs_rq) argument
5139 update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) argument
5147 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5149 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5157 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
5160 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) argument
5163 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) argument
5170 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
5276 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
5346 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
5354 clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5363 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) argument
5416 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) argument
5466 pick_next_entity(struct cfs_rq *cfs_rq) argument
5480 put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) argument
5503 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) argument
5613 __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, struct cfs_rq *cfs_rq, u64 target_runtime) argument
5641 assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
5653 __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) argument
5672 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) argument
5680 cfs_rq_throttled(struct cfs_rq *cfs_rq) argument
5686 throttled_hierarchy(struct cfs_rq *cfs_rq) argument
5711 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
5740 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
5756 throttle_cfs_rq(struct cfs_rq *cfs_rq) argument
5845 unthrottle_cfs_rq(struct cfs_rq *cfs_rq) argument
5971 __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) argument
5991 __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) argument
5997 unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) argument
6013 struct cfs_rq *cfs_rq, *tmp; local
6200 __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6223 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6268 check_enqueue_throttle(struct cfs_rq *cfs_rq) argument
6289 struct cfs_rq *pcfs_rq, *cfs_rq; local
6305 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6411 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6483 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
6508 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; local
6534 struct cfs_rq *cfs_rq = task_cfs_rq(p); local
6579 account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) argument
6580 check_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6581 check_enqueue_throttle(struct cfs_rq *cfs_rq) argument
6583 return_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6585 cfs_rq_throttled(struct cfs_rq *cfs_rq) argument
6590 throttled_hierarchy(struct cfs_rq *cfs_rq) argument
6603 init_cfs_rq_runtime(struct cfs_rq *cfs_rq) argument
6718 struct cfs_rq *cfs_rq; local
6811 struct cfs_rq *cfs_rq; local
6919 struct cfs_rq *cfs_rq; local
6942 struct cfs_rq *cfs_rq; local
7680 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; local
8290 struct cfs_rq *cfs_rq = task_cfs_rq(curr); local
8368 struct cfs_rq *cfs_rq; local
8400 struct cfs_rq *cfs_rq = &rq->cfs; local
8553 struct cfs_rq *cfs_rq; local
8567 struct cfs_rq *cfs_rq = task_cfs_rq(curr); local
9245 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) argument
9284 cfs_rq_has_blocked(struct cfs_rq *cfs_rq) argument
9320 struct cfs_rq *cfs_rq, *pos; local
9366 update_cfs_rq_h_load(struct cfs_rq *cfs_rq) argument
9401 struct cfs_rq *cfs_rq = task_cfs_rq(p); local
9410 struct cfs_rq *cfs_rq = &rq->cfs; local
12517 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12589 struct cfs_rq *cfs_rq; local
12612 struct cfs_rq *cfs_rq; local
12637 struct cfs_rq *cfs_rq; local
12684 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12713 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12735 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12800 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
12808 init_cfs_rq(struct cfs_rq *cfs_rq) argument
12855 struct cfs_rq *cfs_rq; local
12938 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) argument
13068 struct cfs_rq *cfs_rq = cfs_rq_of(se); local
13168 struct cfs_rq *cfs_rq, *pos; local
13171 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) local
[all...]
H A Dpelt.c208 * se has been already dequeued but cfs_rq->curr still points to it.
210 * but also for a cfs_rq if the latter becomes idle. As an example,
250 * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
253 * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
306 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) argument
309 cfs_rq->curr == se)) {
320 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) argument
322 if (___update_load_sum(now, &cfs_rq
[all...]
H A Ddebug.c629 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) argument
638 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
641 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
644 SPLIT_NS(cfs_rq->exec_clock));
647 root = __pick_root_entity(cfs_rq);
650 first = __pick_first_entity(cfs_rq);
653 last = __pick_last_entity(cfs_rq);
656 min_vruntime = cfs_rq
[all...]
H A Dsched.h347 struct cfs_rq;
386 struct cfs_rq **cfs_rq; member in struct:task_group
438 * A weight of a cfs_rq is the sum of weights of which entities
439 * are queued on this cfs_rq, so a weight of a entity should not be
481 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
488 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
515 struct cfs_rq *pre
573 struct cfs_rq { struct
1189 rq_of(struct cfs_rq *cfs_rq) argument
1196 rq_of(struct cfs_rq *cfs_rq) argument
[all...]
H A Dcore.c4540 p->se.cfs_rq = NULL;
5560 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
9949 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
10436 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
10480 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10845 * Prevent race between setting of cfs_rq->runtime_enabled and
10880 struct cfs_rq *cfs_rq = tg->cfs_rq[ local
[all...]
/linux-master/include/trace/events/
H A Dsched.h744 TP_PROTO(struct cfs_rq *cfs_rq),
745 TP_ARGS(cfs_rq));
776 TP_PROTO(struct cfs_rq *cfs_rq),
777 TP_ARGS(cfs_rq));
/linux-master/include/linux/
H A Dsched.h57 struct cfs_rq;
445 * For cfs_rq, they are the aggregated values of all runnable and blocked
460 * with the highest load (=88761), always runnable on a single cfs_rq,
559 struct cfs_rq *cfs_rq; member in struct:sched_entity
561 struct cfs_rq *my_q;

Completed in 368 milliseconds