Lines Matching refs:now

339 		 * The branch is now connected to its tree so we can
1068 update_cfs_rq_load_avg(now, cfs_rq);
1108 u64 now = rq_clock_task(rq);
1111 delta_exec = now - curr->exec_start;
1115 curr->exec_start = now;
1783 unsigned int now, start;
1785 now = jiffies_to_msecs(jiffies);
1789 if (now - start > MSEC_PER_SEC &&
1790 cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
1803 unsigned int now, start, th_period, unit_th, th;
1806 now = jiffies_to_msecs(jiffies);
1809 if (now - start > th_period &&
1810 cmpxchg(&pgdat->nbp_th_start, start, now) == start) {
2104 * Clear previous best_cpu/rq numa-migrate flag, since task now
2688 u64 runtime, delta, now;
2690 now = p->se.exec_start;
2695 *period = now - p->last_task_numa_placement;
2706 p->last_task_numa_placement = now;
3202 unsigned long migrate, next_scan, now = jiffies;
3229 mm->numa_next_scan = now +
3237 if (time_before(now, migrate))
3245 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
3320 vma->numab_state->next_scan = now +
3422 * scanner to the start so check it now.
3495 u64 period, now;
3509 now = curr->se.sum_exec_runtime;
3512 if (now > curr->node_stamp + period) {
4101 u64 now;
4117 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
4118 if (now - cfs_rq->last_update_tg_load_avg < NSEC_PER_MSEC)
4125 cfs_rq->last_update_tg_load_avg = now;
4132 u64 now;
4140 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
4144 cfs_rq->last_update_tg_load_avg = now;
4482 u64 throttled = 0, now, lut;
4506 * Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where:
4516 * rq_idle_lag (delta between now and rq's update)
4521 * now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
4536 now = u64_u32_load(rq->clock_pelt_idle);
4546 now -= throttled;
4547 if (now < lut)
4552 now = lut;
4554 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
4556 __update_load_avg_blocked_se(now, se);
4564 * @now: current time, as per cfs_rq_clock_pelt()
4578 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4634 decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
4737 u64 now = cfs_rq_clock_pelt(cfs_rq);
4745 __update_load_avg_se(now, cfs_rq, se);
4747 decayed = update_cfs_rq_load_avg(now, cfs_rq);
4792 * itself from the cfs_rq (task must be off the queue now).
5298 * - Update loads to have both entity and cfs_rq synced with now.
5315 * XXX now that the entity has been re-weighted, and it's lag adjusted,
5384 * - Update loads to have both entity and cfs_rq synced with now.
7047 * wake_affine_idle() - only considers 'now', it check if the waking CPU is
7586 * per-cpu kthread that is now complete and the wakeup is
9105 * Right now, this is only the second place where
9249 * Right now, this is one of only two places we collect this stat
9356 u64 now = rq_clock_pelt(rq);
9368 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
9369 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
9370 update_hw_load_avg(now, rq, hw_pressure) |
9433 unsigned long now = jiffies;
9436 if (cfs_rq->last_h_load_update == now)
9443 if (cfs_rq->last_h_load_update == now)
9449 cfs_rq->last_h_load_update = now;
9458 cfs_rq->last_h_load_update = now;
10113 * XXX for now avg_load is not computed and always 0 so we
11411 * nohz-idle), we now have balance_cpu in a position to move
11916 unsigned long now = jiffies;
11939 time_after(now, READ_ONCE(nohz.next_blocked)))
11942 if (time_before(now, nohz.next_balance))
12163 unsigned long now = jiffies;
12164 unsigned long next_balance = now + 60*HZ;
12251 now + msecs_to_jiffies(LOAD_AVG_PERIOD));
12439 * now runnable tasks on this rq.