Searched refs:load_avg (Results 1 - 6 of 6) sorted by relevance

/linux-master/kernel/sched/
H A Dpelt.h15 return READ_ONCE(rq->avg_thermal.load_avg);
H A Ddebug.c527 P(se->avg.load_avg);
681 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
682 cfs_rq->avg.load_avg);
689 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
690 cfs_rq->removed.load_avg);
699 atomic_long_read(&cfs_rq->tg->load_avg));
1074 P(se.avg.load_avg);
H A Dpelt.c176 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
264 sa->load_avg = div_u64(load * sa->load_sum, divider);
277 * se_weight() = tg->weight * grq->load_avg / tg->load_avg
284 * load_avg = se_weight(se) * load_sum
292 * load_avg = \Sum se->avg.load_avg
342 * load_avg and runnable_avg are not supported and meaningless.
368 * load_avg and runnable_avg are not supported and meaningless.
397 * tracked through load_avg
[all...]
H A Dfair.c1031 sa->load_avg = scale_load_down(se->load.weight);
1033 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
1040 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1088 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
3665 cfs_rq->avg.load_avg += se->avg.load_avg;
3672 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3676 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3823 se->avg.load_avg
4342 unsigned long load_avg; local
[all...]
H A Dsched.h394 * load_avg can be heavily contended at clock tick time, so put
398 atomic_long_t load_avg ____cacheline_aligned;
618 unsigned long load_avg; member in struct:cfs_rq::__anon103
/linux-master/include/linux/
H A Dsched.h409 * has a few: load, load_avg, util_avg, freq, and capacity.
430 * [load_avg definition]
432 * load_avg = runnable% * scale_load_down(load)
466 * Max(load_avg) <= Max(load.weight)
477 unsigned long load_avg; member in struct:sched_avg

Completed in 165 milliseconds