Lines Matching refs:rq

93 struct rq;
105 extern void calc_global_load_tick(struct rq *this_rq);
106 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
108 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
270 /* nests inside the rq lock: */
322 * dl_se::rq -- runqueue we belong to.
341 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
642 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */
713 /* Nests inside the rq lock: */
719 struct rq *rq;
739 * earliest ready task on this rq. Caching these facilitates
751 * Tasks on this rq that can be pushed away. They are kept in
780 * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB).
919 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
943 * struct uclamp_rq - rq's utilization clamp
944 * @value: currently active clamp values for a rq
945 * @bucket: utilization clamp buckets affecting a rq
947 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
948 * A clamp value is affecting a rq when there is at least one task RUNNABLE
962 * the metrics required to compute all the per-rq utilization clamp values.
972 struct rq;
975 void (*func)(struct rq *rq);
985 struct rq {
1134 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1160 /* per rq */
1161 struct rq *core;
1189 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1191 return cfs_rq->rq;
1196 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1198 return container_of(cfs_rq, struct rq, cfs);
1202 static inline int cpu_of(struct rq *rq)
1205 return rq->cpu;
1222 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1236 static inline bool sched_core_enabled(struct rq *rq)
1238 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
1248 * stable unless you actually hold a relevant rq->__lock.
1250 static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1252 if (sched_core_enabled(rq))
1253 return &rq->core->__lock;
1255 return &rq->__lock;
1258 static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1260 if (rq->core_enabled)
1261 return &rq->core->__lock;
1263 return &rq->__lock;
1268 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
1276 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1279 if (!sched_core_enabled(rq))
1282 return rq->core->core_cookie == p->core_cookie;
1285 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1291 if (!sched_core_enabled(rq))
1294 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
1305 return idle_core || rq->core->core_cookie == p->core_cookie;
1308 static inline bool sched_group_cookie_match(struct rq *rq,
1315 if (!sched_core_enabled(rq))
1330 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1331 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1338 static inline bool sched_core_enabled(struct rq *rq)
1348 static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1350 return &rq->__lock;
1353 static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1355 return &rq->__lock;
1358 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1363 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1368 static inline bool sched_group_cookie_match(struct rq *rq,
1376 static inline void lockdep_assert_rq_held(struct rq *rq)
1378 lockdep_assert_held(__rq_lockp(rq));
1381 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1382 extern bool raw_spin_rq_trylock(struct rq *rq);
1383 extern void raw_spin_rq_unlock(struct rq *rq);
1385 static inline void raw_spin_rq_lock(struct rq *rq)
1387 raw_spin_rq_lock_nested(rq, 0);
1390 static inline void raw_spin_rq_lock_irq(struct rq *rq)
1393 raw_spin_rq_lock(rq);
1396 static inline void raw_spin_rq_unlock_irq(struct rq *rq)
1398 raw_spin_rq_unlock(rq);
1402 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
1406 raw_spin_rq_lock(rq);
1410 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
1412 raw_spin_rq_unlock(rq);
1416 #define raw_spin_rq_lock_irqsave(rq, flags) \
1418 flags = _raw_spin_rq_lock_irqsave(rq); \
1422 extern void __update_idle_core(struct rq *rq);
1424 static inline void update_idle_core(struct rq *rq)
1427 __update_idle_core(rq);
1431 static inline void update_idle_core(struct rq *rq) { }
1470 struct rq *rq = task_rq(p);
1472 return &rq->cfs;
1482 extern void update_rq_clock(struct rq *rq);
1485 * rq::clock_update_flags bits
1489 * neighbouring rq clock updates.
1495 * made to update_rq_clock() since the last time rq::lock was pinned.
1501 * if (rq-clock_update_flags >= RQCF_UPDATED)
1511 static inline void assert_clock_updated(struct rq *rq)
1517 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1520 static inline u64 rq_clock(struct rq *rq)
1522 lockdep_assert_rq_held(rq);
1523 assert_clock_updated(rq);
1525 return rq->clock;
1528 static inline u64 rq_clock_task(struct rq *rq)
1530 lockdep_assert_rq_held(rq);
1531 assert_clock_updated(rq);
1533 return rq->clock_task;
1549 static inline u64 rq_clock_thermal(struct rq *rq)
1551 return rq_clock_task(rq) >> sched_thermal_decay_shift;
1554 static inline void rq_clock_skip_update(struct rq *rq)
1556 lockdep_assert_rq_held(rq);
1557 rq->clock_update_flags |= RQCF_REQ_SKIP;
1564 static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1566 lockdep_assert_rq_held(rq);
1567 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1571 * During cpu offlining and rq wide unthrottling, we can trigger
1577 * to clear RQCF_ACT_SKIP of rq->clock_update_flags.
1579 static inline void rq_clock_start_loop_update(struct rq *rq)
1581 lockdep_assert_rq_held(rq);
1582 SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP);
1583 rq->clock_update_flags |= RQCF_ACT_SKIP;
1586 static inline void rq_clock_stop_loop_update(struct rq *rq)
1588 lockdep_assert_rq_held(rq);
1589 rq->clock_update_flags &= ~RQCF_ACT_SKIP;
1597 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1611 * This avoids code that has access to 'struct rq *rq' (basically everything in
1612 * the scheduler) from accidentally unlocking the rq if they do not also have a
1617 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1619 rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
1622 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1625 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
1630 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1633 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1637 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
1640 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1642 lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
1648 rq->clock_update_flags |= rf->clock_update_flags;
1652 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1653 __acquires(rq->lock);
1655 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1657 __acquires(rq->lock);
1659 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1660 __releases(rq->lock)
1662 rq_unpin_lock(rq, rf);
1663 raw_spin_rq_unlock(rq);
1667 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1668 __releases(rq->lock)
1671 rq_unpin_lock(rq, rf);
1672 raw_spin_rq_unlock(rq);
1677 _T->rq = task_rq_lock(_T->lock, &_T->rf),
1678 task_rq_unlock(_T->rq, _T->lock, &_T->rf),
1679 struct rq *rq; struct rq_flags rf)
1682 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1683 __acquires(rq->lock)
1685 raw_spin_rq_lock_irqsave(rq, rf->flags);
1686 rq_pin_lock(rq, rf);
1690 rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1691 __acquires(rq->lock)
1693 raw_spin_rq_lock_irq(rq);
1694 rq_pin_lock(rq, rf);
1698 rq_lock(struct rq *rq, struct rq_flags *rf)
1699 __acquires(rq->lock)
1701 raw_spin_rq_lock(rq);
1702 rq_pin_lock(rq, rf);
1706 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1707 __releases(rq->lock)
1709 rq_unpin_lock(rq, rf);
1710 raw_spin_rq_unlock_irqrestore(rq, rf->flags);
1714 rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1715 __releases(rq->lock)
1717 rq_unpin_lock(rq, rf);
1718 raw_spin_rq_unlock_irq(rq);
1722 rq_unlock(struct rq *rq, struct rq_flags *rf)
1723 __releases(rq->lock)
1725 rq_unpin_lock(rq, rf);
1726 raw_spin_rq_unlock(rq);
1729 DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
1734 DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
1739 DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
1744 static inline struct rq *
1746 __acquires(rq->lock)
1748 struct rq *rq;
1751 rq = this_rq();
1752 rq_lock(rq, rf);
1753 return rq;
1804 queue_balance_callback(struct rq *rq,
1806 void (*func)(struct rq *rq))
1808 lockdep_assert_rq_held(rq);
1815 if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
1819 head->next = rq->balance_callback;
1820 rq->balance_callback = head;
1828 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1984 extern void __sched_core_account_forceidle(struct rq *rq);
1986 static inline void sched_core_account_forceidle(struct rq *rq)
1989 __sched_core_account_forceidle(rq);
1992 extern void __sched_core_tick(struct rq *rq);
1994 static inline void sched_core_tick(struct rq *rq)
1996 if (sched_core_enabled(rq) && schedstat_enabled())
1997 __sched_core_tick(rq);
2002 static inline void sched_core_account_forceidle(struct rq *rq) {}
2004 static inline void sched_core_tick(struct rq *rq) {}
2021 * holding both task_struct::pi_lock and rq::lock.
2153 static inline int task_current(struct rq *rq, struct task_struct *p)
2155 return rq->curr == p;
2158 static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
2163 return task_current(rq, p);
2259 extern s64 update_curr_common(struct rq *rq);
2267 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2268 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2269 void (*yield_task) (struct rq *rq);
2270 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2272 void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
2274 struct task_struct *(*pick_next_task)(struct rq *rq);
2276 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2277 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2280 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2283 struct task_struct * (*pick_task)(struct rq *rq);
2287 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2291 void (*rq_online)(struct rq *rq);
2292 void (*rq_offline)(struct rq *rq);
2294 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2297 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2302 * The switched_from() call is allowed to drop rq->lock, therefore we
2304 * rq->lock. They are however serialized by p->pi_lock.
2306 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2307 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2308 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2311 unsigned int (*get_rr_interval)(struct rq *rq,
2314 void (*update_curr)(struct rq *rq);
2325 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
2327 WARN_ON_ONCE(rq->curr != prev);
2328 prev->sched_class->put_prev_task(rq, prev);
2331 static inline void set_next_task(struct rq *rq, struct task_struct *next)
2333 next->sched_class->set_next_task(rq, next, false);
2370 static inline bool sched_stop_runnable(struct rq *rq)
2372 return rq->stop && task_on_rq_queued(rq->stop);
2375 static inline bool sched_dl_runnable(struct rq *rq)
2377 return rq->dl.dl_nr_running > 0;
2380 static inline bool sched_rt_runnable(struct rq *rq)
2382 return rq->rt.rt_queued > 0;
2385 static inline bool sched_fair_runnable(struct rq *rq)
2387 return rq->cfs.nr_running > 0;
2390 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2391 extern struct task_struct *pick_next_task_idle(struct rq *rq);
2402 extern void trigger_load_balance(struct rq *rq);
2406 static inline struct task_struct *get_push_task(struct rq *rq)
2408 struct task_struct *p = rq->curr;
2410 lockdep_assert_rq_held(rq);
2412 if (rq->push_busy)
2421 rq->push_busy = true;
2430 static inline void idle_set_state(struct rq *rq,
2433 rq->idle_state = idle_state;
2436 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2440 return rq->idle_state;
2443 static inline void idle_set_state(struct rq *rq,
2448 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2467 extern void resched_curr(struct rq *rq);
2487 extern bool sched_can_stop_tick(struct rq *rq);
2495 static inline void sched_update_tick_dependency(struct rq *rq)
2497 int cpu = cpu_of(rq);
2502 if (sched_can_stop_tick(rq))
2509 static inline void sched_update_tick_dependency(struct rq *rq) { }
2512 static inline void add_nr_running(struct rq *rq, unsigned count)
2514 unsigned prev_nr = rq->nr_running;
2516 rq->nr_running = prev_nr + count;
2518 call_trace_sched_update_nr_running(rq, count);
2522 if (prev_nr < 2 && rq->nr_running >= 2) {
2523 if (!READ_ONCE(rq->rd->overload))
2524 WRITE_ONCE(rq->rd->overload, 1);
2528 sched_update_tick_dependency(rq);
2531 static inline void sub_nr_running(struct rq *rq, unsigned count)
2533 rq->nr_running -= count;
2535 call_trace_sched_update_nr_running(rq, -count);
2539 sched_update_tick_dependency(rq);
2542 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2543 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2545 extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
2578 static inline int hrtick_enabled(struct rq *rq)
2580 if (!cpu_active(cpu_of(rq)))
2582 return hrtimer_is_hres_active(&rq->hrtick_timer);
2585 static inline int hrtick_enabled_fair(struct rq *rq)
2589 return hrtick_enabled(rq);
2592 static inline int hrtick_enabled_dl(struct rq *rq)
2596 return hrtick_enabled(rq);
2599 void hrtick_start(struct rq *rq, u64 delay);
2603 static inline int hrtick_enabled_fair(struct rq *rq)
2608 static inline int hrtick_enabled_dl(struct rq *rq)
2613 static inline int hrtick_enabled(struct rq *rq)
2648 * acquire rq lock instead of rq_lock(). So at the end of these two functions
2650 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning.
2652 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
2661 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {}
2672 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
2698 extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2703 * fair double_lock_balance: Safely acquires both rq->locks in a fair
2710 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2729 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2757 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2764 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2815 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2826 extern void set_rq_online (struct rq *rq);
2827 extern void set_rq_offline(struct rq *rq);
2838 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2855 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2866 DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
2922 extern void nohz_balance_exit_idle(struct rq *rq);
2924 static inline void nohz_balance_exit_idle(struct rq *rq) { }
2968 * @rq: Runqueue to carry out the update for.
2988 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2993 cpu_of(rq)));
2995 data->func(data, rq_clock(rq), flags);
2998 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
3034 static inline unsigned long cpu_bw_dl(struct rq *rq)
3036 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
3039 static inline unsigned long cpu_util_dl(struct rq *rq)
3041 return READ_ONCE(rq->avg_dl.util_avg);
3048 static inline unsigned long cpu_util_rt(struct rq *rq)
3050 return READ_ONCE(rq->avg_rt.util_avg);
3057 static inline unsigned long uclamp_rq_get(struct rq *rq,
3060 return READ_ONCE(rq->uclamp[clamp_id].value);
3063 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
3066 WRITE_ONCE(rq->uclamp[clamp_id].value, value);
3069 static inline bool uclamp_rq_is_idle(struct rq *rq)
3071 return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
3074 /* Is the rq being capped/throttled by uclamp_max? */
3075 static inline bool uclamp_rq_is_capped(struct rq *rq)
3083 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
3084 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
3090 * When uclamp is compiled in, the aggregation at rq level is 'turned off'
3094 * Returns true if userspace opted-in to use uclamp and aggregation at rq level
3111 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
3118 static inline unsigned long uclamp_rq_get(struct rq *rq,
3127 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
3132 static inline bool uclamp_rq_is_idle(struct rq *rq)
3139 static inline unsigned long cpu_util_irq(struct rq *rq)
3141 return READ_ONCE(rq->avg_irq.util_avg);
3154 static inline unsigned long cpu_util_irq(struct rq *rq)
3189 * - prior user-space memory accesses and store to rq->membarrier_state,
3190 * - store to rq->membarrier_state and following user-space memory accesses.
3191 * In the same way it provides those guarantees around store to rq->curr.
3193 static inline void membarrier_switch_mm(struct rq *rq,
3203 if (READ_ONCE(rq->membarrier_state) == membarrier_state)
3206 WRITE_ONCE(rq->membarrier_state, membarrier_state);
3209 static inline void membarrier_switch_mm(struct rq *rq,
3249 extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
3250 extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
3262 * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
3323 * which owns a cid without holding a rq lock.
3340 static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
3342 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq));
3344 lockdep_assert_rq_held(rq);
3345 WRITE_ONCE(pcpu_cid->time, rq->clock);
3348 static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
3396 mm_cid_snapshot_time(rq, mm);
3400 static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
3406 lockdep_assert_rq_held(rq);
3410 mm_cid_snapshot_time(rq, mm);
3417 cid = __mm_cid_get(rq, mm);
3422 static inline void switch_mm_cid(struct rq *rq,
3427 * Provide a memory barrier between rq->curr store and load of
3428 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
3441 * kernel -> kernel transition does not change rq->curr->mm
3447 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
3465 mm_cid_snapshot_time(rq, prev->mm);
3470 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm);
3474 static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
3476 static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
3477 static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }