1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PROCESSOR_H
3#define _ASM_X86_PROCESSOR_H
4
5#include <asm/processor-flags.h>
6
7/* Forward declaration, a strange C thing */
8struct task_struct;
9struct mm_struct;
10struct io_bitmap;
11struct vm86;
12
13#include <asm/math_emu.h>
14#include <asm/segment.h>
15#include <asm/types.h>
16#include <uapi/asm/sigcontext.h>
17#include <asm/current.h>
18#include <asm/cpufeatures.h>
19#include <asm/cpuid.h>
20#include <asm/page.h>
21#include <asm/pgtable_types.h>
22#include <asm/percpu.h>
23#include <asm/desc_defs.h>
24#include <asm/nops.h>
25#include <asm/special_insns.h>
26#include <asm/fpu/types.h>
27#include <asm/unwind_hints.h>
28#include <asm/vmxfeatures.h>
29#include <asm/vdso/processor.h>
30#include <asm/shstk.h>
31
32#include <linux/personality.h>
33#include <linux/cache.h>
34#include <linux/threads.h>
35#include <linux/math64.h>
36#include <linux/err.h>
37#include <linux/irqflags.h>
38#include <linux/mem_encrypt.h>
39
40/*
41 * We handle most unaligned accesses in hardware.  On the other hand
42 * unaligned DMA can be quite expensive on some Nehalem processors.
43 *
44 * Based on this we disable the IP header alignment in network drivers.
45 */
46#define NET_IP_ALIGN	0
47
48#define HBP_NUM 4
49
50/*
51 * These alignment constraints are for performance in the vSMP case,
52 * but in the task_struct case we must also meet hardware imposed
53 * alignment requirements of the FPU state:
54 */
55#ifdef CONFIG_X86_VSMP
56# define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
57# define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
58#else
59# define ARCH_MIN_TASKALIGN		__alignof__(union fpregs_state)
60# define ARCH_MIN_MMSTRUCT_ALIGN	0
61#endif
62
63enum tlb_infos {
64	ENTRIES,
65	NR_INFO
66};
67
68extern u16 __read_mostly tlb_lli_4k[NR_INFO];
69extern u16 __read_mostly tlb_lli_2m[NR_INFO];
70extern u16 __read_mostly tlb_lli_4m[NR_INFO];
71extern u16 __read_mostly tlb_lld_4k[NR_INFO];
72extern u16 __read_mostly tlb_lld_2m[NR_INFO];
73extern u16 __read_mostly tlb_lld_4m[NR_INFO];
74extern u16 __read_mostly tlb_lld_1g[NR_INFO];
75
76/*
77 * CPU type and hardware bug flags. Kept separately for each CPU.
78 */
79
80struct cpuinfo_topology {
81	// Real APIC ID read from the local APIC
82	u32			apicid;
83	// The initial APIC ID provided by CPUID
84	u32			initial_apicid;
85
86	// Physical package ID
87	u32			pkg_id;
88
89	// Physical die ID on AMD, Relative on Intel
90	u32			die_id;
91
92	// Compute unit ID - AMD specific
93	u32			cu_id;
94
95	// Core ID relative to the package
96	u32			core_id;
97
98	// Logical ID mappings
99	u32			logical_pkg_id;
100	u32			logical_die_id;
101
102	// AMD Node ID and Nodes per Package info
103	u32			amd_node_id;
104
105	// Cache level topology IDs
106	u32			llc_id;
107	u32			l2c_id;
108};
109
110struct cpuinfo_x86 {
111	__u8			x86;		/* CPU family */
112	__u8			x86_vendor;	/* CPU vendor */
113	__u8			x86_model;
114	__u8			x86_stepping;
115#ifdef CONFIG_X86_64
116	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
117	int			x86_tlbsize;
118#endif
119#ifdef CONFIG_X86_VMX_FEATURE_NAMES
120	__u32			vmx_capability[NVMXINTS];
121#endif
122	__u8			x86_virt_bits;
123	__u8			x86_phys_bits;
124	/* Max extended CPUID function supported: */
125	__u32			extended_cpuid_level;
126	/* Maximum supported CPUID level, -1=no CPUID: */
127	int			cpuid_level;
128	/*
129	 * Align to size of unsigned long because the x86_capability array
130	 * is passed to bitops which require the alignment. Use unnamed
131	 * union to enforce the array is aligned to size of unsigned long.
132	 */
133	union {
134		__u32		x86_capability[NCAPINTS + NBUGINTS];
135		unsigned long	x86_capability_alignment;
136	};
137	char			x86_vendor_id[16];
138	char			x86_model_id[64];
139	struct cpuinfo_topology	topo;
140	/* in KB - valid for CPUS which support this call: */
141	unsigned int		x86_cache_size;
142	int			x86_cache_alignment;	/* In bytes */
143	/* Cache QoS architectural values, valid only on the BSP: */
144	int			x86_cache_max_rmid;	/* max index */
145	int			x86_cache_occ_scale;	/* scale to bytes */
146	int			x86_cache_mbm_width_offset;
147	int			x86_power;
148	unsigned long		loops_per_jiffy;
149	/* protected processor identification number */
150	u64			ppin;
151	u16			x86_clflush_size;
152	/* number of cores as seen by the OS: */
153	u16			booted_cores;
154	/* Index into per_cpu list: */
155	u16			cpu_index;
156	/*  Is SMT active on this core? */
157	bool			smt_active;
158	u32			microcode;
159	/* Address space bits used by the cache internally */
160	u8			x86_cache_bits;
161	unsigned		initialized : 1;
162} __randomize_layout;
163
164#define X86_VENDOR_INTEL	0
165#define X86_VENDOR_CYRIX	1
166#define X86_VENDOR_AMD		2
167#define X86_VENDOR_UMC		3
168#define X86_VENDOR_CENTAUR	5
169#define X86_VENDOR_TRANSMETA	7
170#define X86_VENDOR_NSC		8
171#define X86_VENDOR_HYGON	9
172#define X86_VENDOR_ZHAOXIN	10
173#define X86_VENDOR_VORTEX	11
174#define X86_VENDOR_NUM		12
175
176#define X86_VENDOR_UNKNOWN	0xff
177
178/*
179 * capabilities of CPUs
180 */
181extern struct cpuinfo_x86	boot_cpu_data;
182extern struct cpuinfo_x86	new_cpu_data;
183
184extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
185extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];
186
187DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
188#define cpu_data(cpu)		per_cpu(cpu_info, cpu)
189
190extern const struct seq_operations cpuinfo_op;
191
192#define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
193
194extern void cpu_detect(struct cpuinfo_x86 *c);
195
196static inline unsigned long long l1tf_pfn_limit(void)
197{
198	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
199}
200
201extern void early_cpu_init(void);
202extern void identify_secondary_cpu(struct cpuinfo_x86 *);
203extern void print_cpu_info(struct cpuinfo_x86 *);
204void print_cpu_msr(struct cpuinfo_x86 *);
205
206/*
207 * Friendlier CR3 helpers.
208 */
209static inline unsigned long read_cr3_pa(void)
210{
211	return __read_cr3() & CR3_ADDR_MASK;
212}
213
214static inline unsigned long native_read_cr3_pa(void)
215{
216	return __native_read_cr3() & CR3_ADDR_MASK;
217}
218
219static inline void load_cr3(pgd_t *pgdir)
220{
221	write_cr3(__sme_pa(pgdir));
222}
223
224/*
225 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
226 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
227 * unrelated to the task-switch mechanism:
228 */
229#ifdef CONFIG_X86_32
230/* This is the TSS defined by the hardware. */
231struct x86_hw_tss {
232	unsigned short		back_link, __blh;
233	unsigned long		sp0;
234	unsigned short		ss0, __ss0h;
235	unsigned long		sp1;
236
237	/*
238	 * We don't use ring 1, so ss1 is a convenient scratch space in
239	 * the same cacheline as sp0.  We use ss1 to cache the value in
240	 * MSR_IA32_SYSENTER_CS.  When we context switch
241	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
242	 * written matches ss1, and, if it's not, then we wrmsr the new
243	 * value and update ss1.
244	 *
245	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
246	 * that we set it to zero in vm86 tasks to avoid corrupting the
247	 * stack if we were to go through the sysenter path from vm86
248	 * mode.
249	 */
250	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */
251
252	unsigned short		__ss1h;
253	unsigned long		sp2;
254	unsigned short		ss2, __ss2h;
255	unsigned long		__cr3;
256	unsigned long		ip;
257	unsigned long		flags;
258	unsigned long		ax;
259	unsigned long		cx;
260	unsigned long		dx;
261	unsigned long		bx;
262	unsigned long		sp;
263	unsigned long		bp;
264	unsigned long		si;
265	unsigned long		di;
266	unsigned short		es, __esh;
267	unsigned short		cs, __csh;
268	unsigned short		ss, __ssh;
269	unsigned short		ds, __dsh;
270	unsigned short		fs, __fsh;
271	unsigned short		gs, __gsh;
272	unsigned short		ldt, __ldth;
273	unsigned short		trace;
274	unsigned short		io_bitmap_base;
275
276} __attribute__((packed));
277#else
278struct x86_hw_tss {
279	u32			reserved1;
280	u64			sp0;
281	u64			sp1;
282
283	/*
284	 * Since Linux does not use ring 2, the 'sp2' slot is unused by
285	 * hardware.  entry_SYSCALL_64 uses it as scratch space to stash
286	 * the user RSP value.
287	 */
288	u64			sp2;
289
290	u64			reserved2;
291	u64			ist[7];
292	u32			reserved3;
293	u32			reserved4;
294	u16			reserved5;
295	u16			io_bitmap_base;
296
297} __attribute__((packed));
298#endif
299
300/*
301 * IO-bitmap sizes:
302 */
303#define IO_BITMAP_BITS			65536
304#define IO_BITMAP_BYTES			(IO_BITMAP_BITS / BITS_PER_BYTE)
305#define IO_BITMAP_LONGS			(IO_BITMAP_BYTES / sizeof(long))
306
307#define IO_BITMAP_OFFSET_VALID_MAP				\
308	(offsetof(struct tss_struct, io_bitmap.bitmap) -	\
309	 offsetof(struct tss_struct, x86_tss))
310
311#define IO_BITMAP_OFFSET_VALID_ALL				\
312	(offsetof(struct tss_struct, io_bitmap.mapall) -	\
313	 offsetof(struct tss_struct, x86_tss))
314
315#ifdef CONFIG_X86_IOPL_IOPERM
316/*
317 * sizeof(unsigned long) coming from an extra "long" at the end of the
318 * iobitmap. The limit is inclusive, i.e. the last valid byte.
319 */
320# define __KERNEL_TSS_LIMIT	\
321	(IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
322	 sizeof(unsigned long) - 1)
323#else
324# define __KERNEL_TSS_LIMIT	\
325	(offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
326#endif
327
328/* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
329#define IO_BITMAP_OFFSET_INVALID	(__KERNEL_TSS_LIMIT + 1)
330
331struct entry_stack {
332	char	stack[PAGE_SIZE];
333};
334
335struct entry_stack_page {
336	struct entry_stack stack;
337} __aligned(PAGE_SIZE);
338
339/*
340 * All IO bitmap related data stored in the TSS:
341 */
342struct x86_io_bitmap {
343	/* The sequence number of the last active bitmap. */
344	u64			prev_sequence;
345
346	/*
347	 * Store the dirty size of the last io bitmap offender. The next
348	 * one will have to do the cleanup as the switch out to a non io
349	 * bitmap user will just set x86_tss.io_bitmap_base to a value
350	 * outside of the TSS limit. So for sane tasks there is no need to
351	 * actually touch the io_bitmap at all.
352	 */
353	unsigned int		prev_max;
354
355	/*
356	 * The extra 1 is there because the CPU will access an
357	 * additional byte beyond the end of the IO permission
358	 * bitmap. The extra byte must be all 1 bits, and must
359	 * be within the limit.
360	 */
361	unsigned long		bitmap[IO_BITMAP_LONGS + 1];
362
363	/*
364	 * Special I/O bitmap to emulate IOPL(3). All bytes zero,
365	 * except the additional byte at the end.
366	 */
367	unsigned long		mapall[IO_BITMAP_LONGS + 1];
368};
369
370struct tss_struct {
371	/*
372	 * The fixed hardware portion.  This must not cross a page boundary
373	 * at risk of violating the SDM's advice and potentially triggering
374	 * errata.
375	 */
376	struct x86_hw_tss	x86_tss;
377
378	struct x86_io_bitmap	io_bitmap;
379} __aligned(PAGE_SIZE);
380
381DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
382
383/* Per CPU interrupt stacks */
384struct irq_stack {
385	char		stack[IRQ_STACK_SIZE];
386} __aligned(IRQ_STACK_SIZE);
387
388#ifdef CONFIG_X86_64
389struct fixed_percpu_data {
390	/*
391	 * GCC hardcodes the stack canary as %gs:40.  Since the
392	 * irq_stack is the object at %gs:0, we reserve the bottom
393	 * 48 bytes of the irq stack for the canary.
394	 *
395	 * Once we are willing to require -mstack-protector-guard-symbol=
396	 * support for x86_64 stackprotector, we can get rid of this.
397	 */
398	char		gs_base[40];
399	unsigned long	stack_canary;
400};
401
402DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
403DECLARE_INIT_PER_CPU(fixed_percpu_data);
404
405static inline unsigned long cpu_kernelmode_gs_base(int cpu)
406{
407	return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
408}
409
410extern asmlinkage void entry_SYSCALL32_ignore(void);
411
412/* Save actual FS/GS selectors and bases to current->thread */
413void current_save_fsgs(void);
414#else	/* X86_64 */
415#ifdef CONFIG_STACKPROTECTOR
416DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
417#endif
418#endif	/* !X86_64 */
419
420struct perf_event;
421
422struct thread_struct {
423	/* Cached TLS descriptors: */
424	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
425#ifdef CONFIG_X86_32
426	unsigned long		sp0;
427#endif
428	unsigned long		sp;
429#ifdef CONFIG_X86_32
430	unsigned long		sysenter_cs;
431#else
432	unsigned short		es;
433	unsigned short		ds;
434	unsigned short		fsindex;
435	unsigned short		gsindex;
436#endif
437
438#ifdef CONFIG_X86_64
439	unsigned long		fsbase;
440	unsigned long		gsbase;
441#else
442	/*
443	 * XXX: this could presumably be unsigned short.  Alternatively,
444	 * 32-bit kernels could be taught to use fsindex instead.
445	 */
446	unsigned long fs;
447	unsigned long gs;
448#endif
449
450	/* Save middle states of ptrace breakpoints */
451	struct perf_event	*ptrace_bps[HBP_NUM];
452	/* Debug status used for traps, single steps, etc... */
453	unsigned long           virtual_dr6;
454	/* Keep track of the exact dr7 value set by the user */
455	unsigned long           ptrace_dr7;
456	/* Fault info: */
457	unsigned long		cr2;
458	unsigned long		trap_nr;
459	unsigned long		error_code;
460#ifdef CONFIG_VM86
461	/* Virtual 86 mode info */
462	struct vm86		*vm86;
463#endif
464	/* IO permissions: */
465	struct io_bitmap	*io_bitmap;
466
467	/*
468	 * IOPL. Privilege level dependent I/O permission which is
469	 * emulated via the I/O bitmap to prevent user space from disabling
470	 * interrupts.
471	 */
472	unsigned long		iopl_emul;
473
474	unsigned int		iopl_warn:1;
475	unsigned int		sig_on_uaccess_err:1;
476
477	/*
478	 * Protection Keys Register for Userspace.  Loaded immediately on
479	 * context switch. Store it in thread_struct to avoid a lookup in
480	 * the tasks's FPU xstate buffer. This value is only valid when a
481	 * task is scheduled out. For 'current' the authoritative source of
482	 * PKRU is the hardware itself.
483	 */
484	u32			pkru;
485
486#ifdef CONFIG_X86_USER_SHADOW_STACK
487	unsigned long		features;
488	unsigned long		features_locked;
489
490	struct thread_shstk	shstk;
491#endif
492
493	/* Floating point and extended processor state */
494	struct fpu		fpu;
495	/*
496	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
497	 * the end.
498	 */
499};
500
501extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
502
503static inline void arch_thread_struct_whitelist(unsigned long *offset,
504						unsigned long *size)
505{
506	fpu_thread_struct_whitelist(offset, size);
507}
508
509static inline void
510native_load_sp0(unsigned long sp0)
511{
512	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
513}
514
515static __always_inline void native_swapgs(void)
516{
517#ifdef CONFIG_X86_64
518	asm volatile("swapgs" ::: "memory");
519#endif
520}
521
522static __always_inline unsigned long current_top_of_stack(void)
523{
524	/*
525	 *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
526	 *  and around vm86 mode and sp0 on x86_64 is special because of the
527	 *  entry trampoline.
528	 */
529	if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
530		return this_cpu_read_const(const_pcpu_hot.top_of_stack);
531
532	return this_cpu_read_stable(pcpu_hot.top_of_stack);
533}
534
535static __always_inline bool on_thread_stack(void)
536{
537	return (unsigned long)(current_top_of_stack() -
538			       current_stack_pointer) < THREAD_SIZE;
539}
540
541#ifdef CONFIG_PARAVIRT_XXL
542#include <asm/paravirt.h>
543#else
544
545static inline void load_sp0(unsigned long sp0)
546{
547	native_load_sp0(sp0);
548}
549
550#endif /* CONFIG_PARAVIRT_XXL */
551
552unsigned long __get_wchan(struct task_struct *p);
553
554extern void select_idle_routine(void);
555extern void amd_e400_c1e_apic_setup(void);
556
557extern unsigned long		boot_option_idle_override;
558
559enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
560			 IDLE_POLL};
561
562extern void enable_sep_cpu(void);
563
564
565/* Defined in head.S */
566extern struct desc_ptr		early_gdt_descr;
567
568extern void switch_gdt_and_percpu_base(int);
569extern void load_direct_gdt(int);
570extern void load_fixmap_gdt(int);
571extern void cpu_init(void);
572extern void cpu_init_exception_handling(void);
573extern void cr4_init(void);
574
575extern void set_task_blockstep(struct task_struct *task, bool on);
576
577/* Boot loader type from the setup header: */
578extern int			bootloader_type;
579extern int			bootloader_version;
580
581extern char			ignore_fpu_irq;
582
583#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
584#define ARCH_HAS_PREFETCHW
585
586#ifdef CONFIG_X86_32
587# define BASE_PREFETCH		""
588# define ARCH_HAS_PREFETCH
589#else
590# define BASE_PREFETCH		"prefetcht0 %P1"
591#endif
592
593/*
594 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
595 *
596 * It's not worth to care about 3dnow prefetches for the K6
597 * because they are microcoded there and very slow.
598 */
599static inline void prefetch(const void *x)
600{
601	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
602			  X86_FEATURE_XMM,
603			  "m" (*(const char *)x));
604}
605
606/*
607 * 3dnow prefetch to get an exclusive cache line.
608 * Useful for spinlocks to avoid one state transition in the
609 * cache coherency protocol:
610 */
611static __always_inline void prefetchw(const void *x)
612{
613	alternative_input(BASE_PREFETCH, "prefetchw %P1",
614			  X86_FEATURE_3DNOWPREFETCH,
615			  "m" (*(const char *)x));
616}
617
618#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
619			   TOP_OF_KERNEL_STACK_PADDING)
620
621#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
622
623#define task_pt_regs(task) \
624({									\
625	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
626	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
627	((struct pt_regs *)__ptr) - 1;					\
628})
629
630#ifdef CONFIG_X86_32
631#define INIT_THREAD  {							  \
632	.sp0			= TOP_OF_INIT_STACK,			  \
633	.sysenter_cs		= __KERNEL_CS,				  \
634}
635
636#define KSTK_ESP(task)		(task_pt_regs(task)->sp)
637
638#else
639extern unsigned long __end_init_task[];
640
641#define INIT_THREAD {							\
642	.sp	= (unsigned long)&__end_init_task -			\
643		  TOP_OF_KERNEL_STACK_PADDING -				\
644		  sizeof(struct pt_regs),				\
645}
646
647extern unsigned long KSTK_ESP(struct task_struct *task);
648
649#endif /* CONFIG_X86_64 */
650
651extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
652					       unsigned long new_sp);
653
654/*
655 * This decides where the kernel will search for a free chunk of vm
656 * space during mmap's.
657 */
658#define __TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
659#define TASK_UNMAPPED_BASE		__TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
660
661#define KSTK_EIP(task)		(task_pt_regs(task)->ip)
662
663/* Get/set a process' ability to use the timestamp counter instruction */
664#define GET_TSC_CTL(adr)	get_tsc_mode((adr))
665#define SET_TSC_CTL(val)	set_tsc_mode((val))
666
667extern int get_tsc_mode(unsigned long adr);
668extern int set_tsc_mode(unsigned int val);
669
670DECLARE_PER_CPU(u64, msr_misc_features_shadow);
671
672static inline u32 per_cpu_llc_id(unsigned int cpu)
673{
674	return per_cpu(cpu_info.topo.llc_id, cpu);
675}
676
677static inline u32 per_cpu_l2c_id(unsigned int cpu)
678{
679	return per_cpu(cpu_info.topo.l2c_id, cpu);
680}
681
682#ifdef CONFIG_CPU_SUP_AMD
683extern u32 amd_get_highest_perf(void);
684extern void amd_clear_divider(void);
685extern void amd_check_microcode(void);
686#else
687static inline u32 amd_get_highest_perf(void)		{ return 0; }
688static inline void amd_clear_divider(void)		{ }
689static inline void amd_check_microcode(void)		{ }
690#endif
691
692extern unsigned long arch_align_stack(unsigned long sp);
693void free_init_pages(const char *what, unsigned long begin, unsigned long end);
694extern void free_kernel_image_pages(const char *what, void *begin, void *end);
695
696void default_idle(void);
697#ifdef	CONFIG_XEN
698bool xen_set_default_idle(void);
699#else
700#define xen_set_default_idle 0
701#endif
702
703void __noreturn stop_this_cpu(void *dummy);
704void microcode_check(struct cpuinfo_x86 *prev_info);
705void store_cpu_caps(struct cpuinfo_x86 *info);
706
707enum l1tf_mitigations {
708	L1TF_MITIGATION_OFF,
709	L1TF_MITIGATION_FLUSH_NOWARN,
710	L1TF_MITIGATION_FLUSH,
711	L1TF_MITIGATION_FLUSH_NOSMT,
712	L1TF_MITIGATION_FULL,
713	L1TF_MITIGATION_FULL_FORCE
714};
715
716extern enum l1tf_mitigations l1tf_mitigation;
717
718enum mds_mitigations {
719	MDS_MITIGATION_OFF,
720	MDS_MITIGATION_FULL,
721	MDS_MITIGATION_VMWERV,
722};
723
724extern bool gds_ucode_mitigated(void);
725
726/*
727 * Make previous memory operations globally visible before
728 * a WRMSR.
729 *
730 * MFENCE makes writes visible, but only affects load/store
731 * instructions.  WRMSR is unfortunately not a load/store
732 * instruction and is unaffected by MFENCE.  The LFENCE ensures
733 * that the WRMSR is not reordered.
734 *
735 * Most WRMSRs are full serializing instructions themselves and
736 * do not require this barrier.  This is only required for the
737 * IA32_TSC_DEADLINE and X2APIC MSRs.
738 */
739static inline void weak_wrmsr_fence(void)
740{
741	alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE));
742}
743
744#endif /* _ASM_X86_PROCESSOR_H */
745