Lines Matching defs:delta

287  * delta /= w
289 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
292 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
294 return delta;
528 s64 delta = (s64)(vruntime - max_vruntime);
529 if (delta > 0)
537 s64 delta = (s64)(vruntime - min_vruntime);
538 if (delta < 0)
641 void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
646 cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
753 s64 delta = (s64)(vruntime - min_vruntime);
754 if (delta > 0) {
755 avg_vruntime_update(cfs_rq, delta);
1214 * will be 0.So it will let the delta wrong. We need to avoid this
2688 u64 runtime, delta, now;
2694 delta = runtime - p->last_sum_exec_runtime;
2701 delta = p->se.avg.load_sum;
2708 return delta;
4100 long delta;
4121 delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
4122 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
4123 atomic_long_add(delta, &cfs_rq->tg->load_avg);
4131 long delta;
4141 delta = 0 - cfs_rq->tg_load_avg_contrib;
4142 atomic_long_add(delta, &cfs_rq->tg->load_avg);
4513 * cfs_idle_lag (delta between rq's update and cfs_rq's update)
4516 * rq_idle_lag (delta between now and rq's update)
5730 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
5734 if (SCHED_WARN_ON((s64)delta < 0))
5735 delta = 0;
5737 cfs_rq->throttled_clock_self_time += delta;
6646 s64 delta = slice - ran;
6648 if (delta < 0) {
6653 hrtick_start(rq, delta);
8883 s64 delta;
8917 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
8919 return delta < (s64)sysctl_sched_migration_cost;
12616 s64 delta;
12646 * Find delta after normalizing se's vruntime with its cfs_rq's
12650 delta = (s64)(sea->vruntime - seb->vruntime) +
12653 return delta > 0;