Searched refs:this_rq (Results 1 - 10 of 10) sorted by last modified time

/linux-master/kernel/sched/
H A Dfair.c4825 static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf);
5983 if (rq == this_rq()) {
7593 this_rq()->nr_running <= 1 &&
7996 struct root_domain *rd = this_rq()->rd;
8209 if (!is_rd_overutilized(this_rq()->rd)) {
11304 static int sched_balance_rq(int this_cpu, struct rq *this_rq, argument
11317 .dst_rq = this_rq,
12048 SCHED_WARN_ON(rq != this_rq());
12160 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) argument
12167 int this_cpu = this_rq
12263 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) argument
12309 nohz_newidle_balance(struct rq *this_rq) argument
12339 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) argument
12344 nohz_newidle_balance(struct rq *this_rq) argument
12356 sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) argument
12488 struct rq *this_rq = this_rq(); local
[all...]
H A Dcore.c843 if (rq == this_rq())
2427 this_rq()->nr_pinned++;
2462 this_rq()->nr_pinned--;
2588 struct rq *rq = this_rq();
2691 struct rq *lowest_rq = NULL, *rq = this_rq();
3732 rq = this_rq();
3878 struct rq *rq = this_rq();
5236 * past. prev == current is still correct but we need to recalculate this_rq
5242 struct rq *rq = this_rq();
5986 schedstat_inc(this_rq()
[all...]
H A Drt.c587 return this_rq()->rd->span;
2269 rq = this_rq();
2299 static void pull_rt_task(struct rq *this_rq) argument
2301 int this_cpu = this_rq->cpu, cpu;
2305 int rt_overload_count = rt_overloaded(this_rq);
2318 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2323 tell_cpu_to_push(this_rq);
2328 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2342 this_rq
[all...]
H A Ddeadline.c2467 static void pull_dl_task(struct rq *this_rq) argument
2469 int this_cpu = this_rq->cpu, cpu;
2475 if (likely(!dl_overloaded(this_rq)))
2484 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2494 if (this_rq->dl.dl_nr_running &&
2495 dl_time_before(this_rq->dl.earliest_dl.curr,
2499 /* Might drop this_rq->lock */
2501 double_lock_balance(this_rq, src_rq);
2518 dl_task_is_earliest_deadline(p, this_rq)) {
2535 activate_task(this_rq,
[all...]
H A Dsched.h105 extern void calc_global_load_tick(struct rq *this_rq);
106 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
1244 #define this_rq() this_cpu_ptr(&runqueues) macro
1752 rq = this_rq();
2288 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2307 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2308 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2309 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2709 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2710 __releases(this_rq
2714 raw_spin_rq_unlock(this_rq); variable
2745 raw_spin_rq_unlock(this_rq); variable
2756 double_lock_balance(struct rq *this_rq, struct rq *busiest) argument
[all...]
H A Dloadavg.c78 long calc_load_fold_active(struct rq *this_rq, long adjust) argument
82 nr_active = this_rq->nr_running - adjust;
83 nr_active += (int)this_rq->nr_uninterruptible;
85 if (nr_active != this_rq->calc_load_active) {
86 delta = nr_active - this_rq->calc_load_active;
87 this_rq->calc_load_active = nr_active;
251 calc_load_nohz_fold(this_rq());
265 struct rq *this_rq = this_rq(); local
270 this_rq
385 calc_global_load_tick(struct rq *this_rq) argument
[all...]
H A Dcputime.c225 struct rq *rq = this_rq();
260 steal -= this_rq()->prev_steal_time;
263 this_rq()->prev_steal_time += steal;
403 } else if (p == this_rq()->idle) {
496 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
H A Dmembarrier.c238 struct rq *rq = this_rq();
H A Didle.c19 idle_set_state(this_rq(), idle_state);
/linux-master/tools/testing/selftests/bpf/progs/
H A Dtest_access_variable_array.c11 int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, argument

Completed in 477 milliseconds