1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _ASM_X86_CPU_ENTRY_AREA_H
4#define _ASM_X86_CPU_ENTRY_AREA_H
5
6#include <linux/percpu-defs.h>
7#include <asm/processor.h>
8#include <asm/intel_ds.h>
9#include <asm/pgtable_areas.h>
10
11#ifdef CONFIG_X86_64
12
13#ifdef CONFIG_AMD_MEM_ENCRYPT
14#define VC_EXCEPTION_STKSZ	EXCEPTION_STKSZ
15#else
16#define VC_EXCEPTION_STKSZ	0
17#endif
18
19/* Macro to enforce the same ordering and stack sizes */
20#define ESTACKS_MEMBERS(guardsize, optional_stack_size)		\
21	char	DF_stack_guard[guardsize];			\
22	char	DF_stack[EXCEPTION_STKSZ];			\
23	char	NMI_stack_guard[guardsize];			\
24	char	NMI_stack[EXCEPTION_STKSZ];			\
25	char	DB_stack_guard[guardsize];			\
26	char	DB_stack[EXCEPTION_STKSZ];			\
27	char	MCE_stack_guard[guardsize];			\
28	char	MCE_stack[EXCEPTION_STKSZ];			\
29	char	VC_stack_guard[guardsize];			\
30	char	VC_stack[optional_stack_size];			\
31	char	VC2_stack_guard[guardsize];			\
32	char	VC2_stack[optional_stack_size];			\
33	char	IST_top_guard[guardsize];			\
34
35/* The exception stacks' physical storage. No guard pages required */
36struct exception_stacks {
37	ESTACKS_MEMBERS(0, VC_EXCEPTION_STKSZ)
38};
39
40/* The effective cpu entry area mapping with guard pages. */
41struct cea_exception_stacks {
42	ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
43};
44
45/*
46 * The exception stack ordering in [cea_]exception_stacks
47 */
48enum exception_stack_ordering {
49	ESTACK_DF,
50	ESTACK_NMI,
51	ESTACK_DB,
52	ESTACK_MCE,
53	ESTACK_VC,
54	ESTACK_VC2,
55	N_EXCEPTION_STACKS
56};
57
58#define CEA_ESTACK_SIZE(st)					\
59	sizeof(((struct cea_exception_stacks *)0)->st## _stack)
60
61#define CEA_ESTACK_BOT(ceastp, st)				\
62	((unsigned long)&(ceastp)->st## _stack)
63
64#define CEA_ESTACK_TOP(ceastp, st)				\
65	(CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
66
67#define CEA_ESTACK_OFFS(st)					\
68	offsetof(struct cea_exception_stacks, st## _stack)
69
70#define CEA_ESTACK_PAGES					\
71	(sizeof(struct cea_exception_stacks) / PAGE_SIZE)
72
73#endif
74
75#ifdef CONFIG_X86_32
76struct doublefault_stack {
77	unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
78	struct x86_hw_tss tss;
79} __aligned(PAGE_SIZE);
80#endif
81
82/*
83 * cpu_entry_area is a percpu region that contains things needed by the CPU
84 * and early entry/exit code.  Real types aren't used for all fields here
85 * to avoid circular header dependencies.
86 *
87 * Every field is a virtual alias of some other allocated backing store.
88 * There is no direct allocation of a struct cpu_entry_area.
89 */
90struct cpu_entry_area {
91	char gdt[PAGE_SIZE];
92
93	/*
94	 * The GDT is just below entry_stack and thus serves (on x86_64) as
95	 * a read-only guard page. On 32-bit the GDT must be writeable, so
96	 * it needs an extra guard page.
97	 */
98#ifdef CONFIG_X86_32
99	char guard_entry_stack[PAGE_SIZE];
100#endif
101	struct entry_stack_page entry_stack_page;
102
103#ifdef CONFIG_X86_32
104	char guard_doublefault_stack[PAGE_SIZE];
105	struct doublefault_stack doublefault_stack;
106#endif
107
108	/*
109	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
110	 * we need task switches to work, and task switches write to the TSS.
111	 */
112	struct tss_struct tss;
113
114#ifdef CONFIG_X86_64
115	/*
116	 * Exception stacks used for IST entries with guard pages.
117	 */
118	struct cea_exception_stacks estacks;
119#endif
120	/*
121	 * Per CPU debug store for Intel performance monitoring. Wastes a
122	 * full page at the moment.
123	 */
124	struct debug_store cpu_debug_store;
125	/*
126	 * The actual PEBS/BTS buffers must be mapped to user space
127	 * Reserve enough fixmap PTEs.
128	 */
129	struct debug_store_buffers cpu_debug_buffers;
130};
131
132#define CPU_ENTRY_AREA_SIZE		(sizeof(struct cpu_entry_area))
133
134DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
135DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
136
137extern void setup_cpu_entry_areas(void);
138extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
139
140extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
141
142static __always_inline struct entry_stack *cpu_entry_stack(int cpu)
143{
144	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
145}
146
147#define __this_cpu_ist_top_va(name)					\
148	CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
149
150#define __this_cpu_ist_bottom_va(name)					\
151	CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
152
153#endif
154