Lines Matching refs:prev

4331 		 * this task as prev, considering queueing p on the remote CPUs wake_list
4354 * this task as prev, wait until it's done referencing the task.
5018 static inline void finish_task(struct task_struct *prev)
5022 * This must be the very last reference to @prev from this CPU. After
5027 * In particular, the load of prev->state in finish_task_switch() must
5032 smp_store_release(&prev->on_cpu, 0);
5158 * prev into current:
5196 * @prev: the current task that is being switched out
5207 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5210 kcov_prepare_switch(prev);
5211 sched_info_switch(rq, prev, next);
5212 perf_event_task_sched_out(prev, next);
5213 rseq_preempt(prev);
5214 fire_sched_out_preempt_notifiers(prev, next);
5222 * @prev: the thread we just switched away from.
5236 * past. prev == current is still correct but we need to recalculate this_rq
5237 * because prev may have moved to another CPU.
5239 static struct rq *finish_task_switch(struct task_struct *prev)
5270 * We must observe prev->state before clearing prev->on_cpu (in
5271 * finish_task), otherwise a concurrent wakeup can get prev
5275 prev_state = READ_ONCE(prev->__state);
5276 vtime_task_switch(prev);
5277 perf_event_task_sched_in(prev, current);
5278 finish_task(prev);
5311 if (prev->sched_class->task_dead)
5312 prev->sched_class->task_dead(prev);
5315 put_task_stack(prev);
5317 put_task_struct_rcu_user(prev);
5325 * @prev: the thread we just switched away from.
5327 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5339 finish_task_switch(prev);
5352 context_switch(struct rq *rq, struct task_struct *prev,
5355 prepare_task_switch(rq, prev, next);
5362 arch_start_context_switch(prev);
5375 enter_lazy_tlb(prev->active_mm, next);
5377 next->active_mm = prev->active_mm;
5378 if (prev->mm) // from user
5379 mmgrab_lazy_tlb(prev->active_mm);
5381 prev->active_mm = NULL;
5383 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5389 * case 'prev->active_mm == next->mm' through
5392 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5395 if (!prev->mm) { // from kernel
5397 rq->prev_mm = prev->active_mm;
5398 prev->active_mm = NULL;
5403 switch_mm_cid(rq, prev, next);
5408 switch_to(prev, next, prev);
5411 return finish_task_switch(prev);
5930 static noinline void __schedule_bug(struct task_struct *prev)
5939 prev->comm, prev->pid, preempt_count());
5941 debug_show_held_locks(prev);
5944 print_irqtrace_events(prev);
5958 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5961 if (task_stack_end_corrupted(prev))
5964 if (task_scs_end_corrupted(prev))
5969 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5971 prev->comm, prev->pid, prev->non_block_count);
5978 __schedule_bug(prev);
5989 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
6002 for_class_range(class, prev->sched_class, &idle_sched_class) {
6003 if (class->balance(rq, prev, rf))
6008 put_prev_task(rq, prev);
6015 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6022 * call that function directly, but only if the @prev task wasn't of a
6026 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6029 p = pick_next_task_fair(rq, prev, rf);
6035 put_prev_task(rq, prev);
6041 * therefore even if @p == @prev, ->dl_server must be NULL.
6050 put_prev_task_balance(rq, prev, rf);
6053 * We've updated @prev and no longer need the server link, clear it.
6057 if (prev->dl_server)
6058 prev->dl_server = NULL;
6107 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6119 return __pick_next_task(rq, prev, rf);
6131 return __pick_next_task(rq, prev, rf);
6149 if (next != prev) {
6150 put_prev_task(rq, prev);
6158 put_prev_task_balance(rq, prev, rf);
6551 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6553 return __pick_next_task(rq, prev, rf);
6617 struct task_struct *prev, *next;
6626 prev = rq->curr;
6628 schedule_debug(prev, !!sched_mode);
6661 switch_count = &prev->nivcsw;
6664 * We must load prev->state once (task_struct::state is volatile), such
6667 prev_state = READ_ONCE(prev->__state);
6669 if (signal_pending_state(prev_state, prev)) {
6670 WRITE_ONCE(prev->__state, TASK_RUNNING);
6672 prev->sched_contributes_to_load =
6677 if (prev->sched_contributes_to_load)
6682 * prev_state = prev->state; if (p->on_rq && ...)
6691 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
6693 if (prev->in_iowait) {
6698 switch_count = &prev->nvcsw;
6701 next = pick_next_task(rq, prev, &rf);
6702 clear_tsk_need_resched(prev);
6708 if (likely(prev != next)) {
6739 migrate_disable_switch(rq, prev);
6740 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6742 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
6745 rq = context_switch(rq, prev, next, &rf);
11729 * between store to rq->curr and load of prev and next task's
11846 * between store to rq->curr and load of prev and next task's