Searched refs:on_rq (Results 1 - 13 of 13) sorted by relevance

/linux-master/kernel/
H A Dfreezer.c112 if (p->on_rq)
/linux-master/kernel/sched/
H A Dpelt.c274 * se_runnable() = !!on_rq
308 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
H A Dfair.c666 if (curr && curr->on_rq) {
711 SCHED_WARN_ON(!se->on_rq);
739 if (curr && curr->on_rq) {
775 if (curr->on_rq)
896 return curr && curr->on_rq ? curr : se;
898 if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
3797 if (se->on_rq) {
3807 if (se->on_rq) {
3828 if (se->on_rq) {
5245 if (curr && curr->on_rq)
[all...]
H A Dcore.c510 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
666 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
669 * [L] ->on_rq
2098 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2148 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2149 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2154 WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING);
2155 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2615 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
3357 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
[all...]
H A Dsched.h96 /* task_struct::on_rq states: */
804 return !!se->on_rq;
816 return !!se->on_rq;
2169 return p->on_rq == TASK_ON_RQ_QUEUED;
2174 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
2222 * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE)
H A Drt.c443 return rt_se->on_rq;
1404 rt_se->on_rq = 1;
1418 rt_se->on_rq = 0;
H A Ddeadline.c1191 * prev->on_rq = 0;
1833 if (p->on_rq == TASK_ON_RQ_MIGRATING)
1849 if (p->on_rq == TASK_ON_RQ_MIGRATING)
/linux-master/include/linux/
H A Dsched.h544 unsigned int on_rq; member in struct:sched_entity
582 unsigned short on_rq; member in struct:sched_rt_entity
790 int on_rq; member in struct:task_struct
912 * schedule() if (p->on_rq && ..) // false
915 * p->on_rq = 0; p->sched_remote_wakeup = Y;
/linux-master/kernel/trace/
H A Dtrace_selftest.c1224 while (p->on_rq) {
/linux-master/kernel/rcu/
H A Dtree_stall.h577 gpk && !READ_ONCE(gpk->on_rq)) {
H A Dtasks.h845 // t->on_rq and t->nvcsw transitions to complete. This works because
865 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
886 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
907 if (!READ_ONCE(t->on_rq))
1053 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
/linux-master/virt/kvm/
H A Dkvm_main.c6337 if (current->on_rq) {
/linux-master/kernel/events/
H A Dcore.c9092 if (!sched_in && task->on_rq) {

Completed in 347 milliseconds