1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_DEBUGREG_H
3#define _ASM_X86_DEBUGREG_H
4
5#include <linux/bug.h>
6#include <linux/percpu.h>
7#include <uapi/asm/debugreg.h>
8
9#include <asm/cpufeature.h>
10#include <asm/msr.h>
11
12DECLARE_PER_CPU(unsigned long, cpu_dr7);
13
14#ifndef CONFIG_PARAVIRT_XXL
15/*
16 * These special macros can be used to get or set a debugging register
17 */
18#define get_debugreg(var, register)				\
19	(var) = native_get_debugreg(register)
20#define set_debugreg(value, register)				\
21	native_set_debugreg(register, value)
22#endif
23
24static __always_inline unsigned long native_get_debugreg(int regno)
25{
26	unsigned long val = 0;	/* Damn you, gcc! */
27
28	switch (regno) {
29	case 0:
30		asm("mov %%db0, %0" :"=r" (val));
31		break;
32	case 1:
33		asm("mov %%db1, %0" :"=r" (val));
34		break;
35	case 2:
36		asm("mov %%db2, %0" :"=r" (val));
37		break;
38	case 3:
39		asm("mov %%db3, %0" :"=r" (val));
40		break;
41	case 6:
42		asm("mov %%db6, %0" :"=r" (val));
43		break;
44	case 7:
45		/*
46		 * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
47		 * with other code.
48		 *
49		 * This is needed because a DR7 access can cause a #VC exception
50		 * when running under SEV-ES. Taking a #VC exception is not a
51		 * safe thing to do just anywhere in the entry code and
52		 * re-ordering might place the access into an unsafe location.
53		 *
54		 * This happened in the NMI handler, where the DR7 read was
55		 * re-ordered to happen before the call to sev_es_ist_enter(),
56		 * causing stack recursion.
57		 */
58		asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
59		break;
60	default:
61		BUG();
62	}
63	return val;
64}
65
66static __always_inline void native_set_debugreg(int regno, unsigned long value)
67{
68	switch (regno) {
69	case 0:
70		asm("mov %0, %%db0"	::"r" (value));
71		break;
72	case 1:
73		asm("mov %0, %%db1"	::"r" (value));
74		break;
75	case 2:
76		asm("mov %0, %%db2"	::"r" (value));
77		break;
78	case 3:
79		asm("mov %0, %%db3"	::"r" (value));
80		break;
81	case 6:
82		asm("mov %0, %%db6"	::"r" (value));
83		break;
84	case 7:
85		/*
86		 * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
87		 * with other code.
88		 *
89		 * While is didn't happen with a DR7 write (see the DR7 read
90		 * comment above which explains where it happened), add the
91		 * __FORCE_ORDER here too to avoid similar problems in the
92		 * future.
93		 */
94		asm volatile("mov %0, %%db7"	::"r" (value), __FORCE_ORDER);
95		break;
96	default:
97		BUG();
98	}
99}
100
101static inline void hw_breakpoint_disable(void)
102{
103	/* Zero the control register for HW Breakpoint */
104	set_debugreg(0UL, 7);
105
106	/* Zero-out the individual HW breakpoint address registers */
107	set_debugreg(0UL, 0);
108	set_debugreg(0UL, 1);
109	set_debugreg(0UL, 2);
110	set_debugreg(0UL, 3);
111}
112
113static __always_inline bool hw_breakpoint_active(void)
114{
115	return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
116}
117
118extern void hw_breakpoint_restore(void);
119
120static __always_inline unsigned long local_db_save(void)
121{
122	unsigned long dr7;
123
124	if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
125		return 0;
126
127	get_debugreg(dr7, 7);
128	dr7 &= ~0x400; /* architecturally set bit */
129	if (dr7)
130		set_debugreg(0, 7);
131	/*
132	 * Ensure the compiler doesn't lower the above statements into
133	 * the critical section; disabling breakpoints late would not
134	 * be good.
135	 */
136	barrier();
137
138	return dr7;
139}
140
141static __always_inline void local_db_restore(unsigned long dr7)
142{
143	/*
144	 * Ensure the compiler doesn't raise this statement into
145	 * the critical section; enabling breakpoints early would
146	 * not be good.
147	 */
148	barrier();
149	if (dr7)
150		set_debugreg(dr7, 7);
151}
152
153#ifdef CONFIG_CPU_SUP_AMD
154extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
155extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
156#else
157static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
158static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
159{
160	return 0;
161}
162#endif
163
164static inline unsigned long get_debugctlmsr(void)
165{
166	unsigned long debugctlmsr = 0;
167
168#ifndef CONFIG_X86_DEBUGCTLMSR
169	if (boot_cpu_data.x86 < 6)
170		return 0;
171#endif
172	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
173
174	return debugctlmsr;
175}
176
177static inline void update_debugctlmsr(unsigned long debugctlmsr)
178{
179#ifndef CONFIG_X86_DEBUGCTLMSR
180	if (boot_cpu_data.x86 < 6)
181		return;
182#endif
183	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
184}
185
186#endif /* _ASM_X86_DEBUGREG_H */
187