Searched refs:this_rq (Results 1 - 10 of 10) sorted by relevance

/linux-master/kernel/sched/
H A Dloadavg.c78 long calc_load_fold_active(struct rq *this_rq, long adjust) argument
82 nr_active = this_rq->nr_running - adjust;
83 nr_active += (int)this_rq->nr_uninterruptible;
85 if (nr_active != this_rq->calc_load_active) {
86 delta = nr_active - this_rq->calc_load_active;
87 this_rq->calc_load_active = nr_active;
251 calc_load_nohz_fold(this_rq());
265 struct rq *this_rq = this_rq(); local
270 this_rq
385 calc_global_load_tick(struct rq *this_rq) argument
[all...]
H A Dsched.h105 extern void calc_global_load_tick(struct rq *this_rq);
106 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
1225 #define this_rq() this_cpu_ptr(&runqueues) macro
1751 rq = this_rq();
2287 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2306 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2307 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2308 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2710 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2711 __releases(this_rq
2715 raw_spin_rq_unlock(this_rq); variable
2746 raw_spin_rq_unlock(this_rq); variable
2757 double_lock_balance(struct rq *this_rq, struct rq *busiest) argument
[all...]
H A Drt.c588 return this_rq()->rd->span;
2270 rq = this_rq();
2300 static void pull_rt_task(struct rq *this_rq) argument
2302 int this_cpu = this_rq->cpu, cpu;
2306 int rt_overload_count = rt_overloaded(this_rq);
2319 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2324 tell_cpu_to_push(this_rq);
2329 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2343 this_rq
[all...]
H A Ddeadline.c2468 static void pull_dl_task(struct rq *this_rq) argument
2470 int this_cpu = this_rq->cpu, cpu;
2476 if (likely(!dl_overloaded(this_rq)))
2485 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2495 if (this_rq->dl.dl_nr_running &&
2496 dl_time_before(this_rq->dl.earliest_dl.curr,
2500 /* Might drop this_rq->lock */
2502 double_lock_balance(this_rq, src_rq);
2519 dl_task_is_earliest_deadline(p, this_rq)) {
2536 activate_task(this_rq,
[all...]
H A Dcputime.c225 struct rq *rq = this_rq();
260 steal -= this_rq()->prev_steal_time;
263 this_rq()->prev_steal_time += steal;
403 } else if (p == this_rq()->idle) {
509 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
H A Dfair.c4831 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
5976 if (rq == this_rq()) {
7561 this_rq()->nr_running <= 1 &&
7964 struct root_domain *rd = this_rq()->rd;
11259 static int load_balance(int this_cpu, struct rq *this_rq, argument
11272 .dst_rq = this_rq,
11981 SCHED_WARN_ON(rq != this_rq());
12093 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) argument
12100 int this_cpu = this_rq->cpu;
12196 static bool nohz_idle_balance(struct rq *this_rq, enu argument
12242 nohz_newidle_balance(struct rq *this_rq) argument
12272 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) argument
12277 nohz_newidle_balance(struct rq *this_rq) argument
12289 newidle_balance(struct rq *this_rq, struct rq_flags *rf) argument
12417 struct rq *this_rq = this_rq(); local
[all...]
H A Didle.c19 idle_set_state(this_rq(), idle_state);
H A Dmembarrier.c238 struct rq *rq = this_rq();
H A Dcore.c843 if (rq == this_rq())
2427 this_rq()->nr_pinned++;
2462 this_rq()->nr_pinned--;
2588 struct rq *rq = this_rq();
2691 struct rq *lowest_rq = NULL, *rq = this_rq();
3732 rq = this_rq();
3878 struct rq *rq = this_rq();
5237 * past. prev == current is still correct but we need to recalculate this_rq
5243 struct rq *rq = this_rq();
5987 schedstat_inc(this_rq()
[all...]
/linux-master/tools/testing/selftests/bpf/progs/
H A Dtest_access_variable_array.c11 int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, argument

Completed in 257 milliseconds