1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_FPU_SCHED_H
3#define _ASM_X86_FPU_SCHED_H
4
5#include <linux/sched.h>
6
7#include <asm/cpufeature.h>
8#include <asm/fpu/types.h>
9
10#include <asm/trace/fpu.h>
11
12extern void save_fpregs_to_fpstate(struct fpu *fpu);
13extern void fpu__drop(struct fpu *fpu);
14extern int  fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
15		      unsigned long shstk_addr);
16extern void fpu_flush_thread(void);
17
18/*
19 * FPU state switching for scheduling.
20 *
21 * This is a two-stage process:
22 *
23 *  - switch_fpu_prepare() saves the old state.
24 *    This is done within the context of the old process.
25 *
26 *  - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
27 *    will get loaded on return to userspace, or when the kernel needs it.
28 *
29 * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
30 * are saved in the current thread's FPU register state.
31 *
32 * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
33 * hold current()'s FPU registers. It is required to load the
34 * registers before returning to userland or using the content
35 * otherwise.
36 *
37 * The FPU context is only stored/restored for a user task and
38 * PF_KTHREAD is used to distinguish between kernel and user threads.
39 */
40static inline void switch_fpu_prepare(struct task_struct *old, int cpu)
41{
42	if (cpu_feature_enabled(X86_FEATURE_FPU) &&
43	    !(old->flags & (PF_KTHREAD | PF_USER_WORKER))) {
44		struct fpu *old_fpu = &old->thread.fpu;
45
46		save_fpregs_to_fpstate(old_fpu);
47		/*
48		 * The save operation preserved register state, so the
49		 * fpu_fpregs_owner_ctx is still @old_fpu. Store the
50		 * current CPU number in @old_fpu, so the next return
51		 * to user space can avoid the FPU register restore
52		 * when is returns on the same CPU and still owns the
53		 * context.
54		 */
55		old_fpu->last_cpu = cpu;
56
57		trace_x86_fpu_regs_deactivated(old_fpu);
58	}
59}
60
61/*
62 * Delay loading of the complete FPU state until the return to userland.
63 * PKRU is handled separately.
64 */
65static inline void switch_fpu_finish(struct task_struct *new)
66{
67	if (cpu_feature_enabled(X86_FEATURE_FPU))
68		set_tsk_thread_flag(new, TIF_NEED_FPU_LOAD);
69}
70
71#endif /* _ASM_X86_FPU_SCHED_H */
72