1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Debug and Guest Debug support
4 *
5 * Copyright (C) 2015 - Linaro Ltd
6 * Author: Alex Benn��e <alex.bennee@linaro.org>
7 */
8
9#include <linux/kvm_host.h>
10#include <linux/hw_breakpoint.h>
11
12#include <asm/debug-monitors.h>
13#include <asm/kvm_asm.h>
14#include <asm/kvm_arm.h>
15#include <asm/kvm_emulate.h>
16
17#include "trace.h"
18
19/* These are the bits of MDSCR_EL1 we may manipulate */
20#define MDSCR_EL1_DEBUG_MASK	(DBG_MDSCR_SS | \
21				DBG_MDSCR_KDE | \
22				DBG_MDSCR_MDE)
23
24static DEFINE_PER_CPU(u64, mdcr_el2);
25
26/*
27 * save/restore_guest_debug_regs
28 *
29 * For some debug operations we need to tweak some guest registers. As
30 * a result we need to save the state of those registers before we
31 * make those modifications.
32 *
33 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
34 * after we have restored the preserved value to the main context.
35 *
36 * When single-step is enabled by userspace, we tweak PSTATE.SS on every
37 * guest entry. Preserve PSTATE.SS so we can restore the original value
38 * for the vcpu after the single-step is disabled.
39 */
40static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
41{
42	u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
43
44	vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
45
46	trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
47				vcpu->arch.guest_debug_preserved.mdscr_el1);
48
49	vcpu->arch.guest_debug_preserved.pstate_ss =
50					(*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
51}
52
53static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
54{
55	u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
56
57	vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
58
59	trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
60				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
61
62	if (vcpu->arch.guest_debug_preserved.pstate_ss)
63		*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
64	else
65		*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
66}
67
68/**
69 * kvm_arm_init_debug - grab what we need for debug
70 *
71 * Currently the sole task of this function is to retrieve the initial
72 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
73 * presumably been set-up by some knowledgeable bootcode.
74 *
75 * It is called once per-cpu during CPU hyp initialisation.
76 */
77
78void kvm_arm_init_debug(void)
79{
80	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
81}
82
83/**
84 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
85 *
86 * @vcpu:	the vcpu pointer
87 *
88 * This ensures we will trap access to:
89 *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
90 *  - Debug ROM Address (MDCR_EL2_TDRA)
91 *  - OS related registers (MDCR_EL2_TDOSA)
92 *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
93 *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
94 *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
95 */
96static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
97{
98	/*
99	 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
100	 * to disable guest access to the profiling and trace buffers
101	 */
102	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
103	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
104				MDCR_EL2_TPMS |
105				MDCR_EL2_TTRF |
106				MDCR_EL2_TPMCR |
107				MDCR_EL2_TDRA |
108				MDCR_EL2_TDOSA);
109
110	/* Is the VM being debugged by userspace? */
111	if (vcpu->guest_debug)
112		/* Route all software debug exceptions to EL2 */
113		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
114
115	/*
116	 * Trap debug register access when one of the following is true:
117	 *  - Userspace is using the hardware to debug the guest
118	 *  (KVM_GUESTDBG_USE_HW is set).
119	 *  - The guest is not using debug (DEBUG_DIRTY clear).
120	 *  - The guest has enabled the OS Lock (debug exceptions are blocked).
121	 */
122	if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
123	    !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
124	    kvm_vcpu_os_lock_enabled(vcpu))
125		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
126
127	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
128}
129
130/**
131 * kvm_arm_vcpu_init_debug - setup vcpu debug traps
132 *
133 * @vcpu:	the vcpu pointer
134 *
135 * Set vcpu initial mdcr_el2 value.
136 */
137void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
138{
139	preempt_disable();
140	kvm_arm_setup_mdcr_el2(vcpu);
141	preempt_enable();
142}
143
144/**
145 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
146 * @vcpu:	the vcpu pointer
147 */
148
149void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
150{
151	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
152}
153
154/**
155 * kvm_arm_setup_debug - set up debug related stuff
156 *
157 * @vcpu:	the vcpu pointer
158 *
159 * This is called before each entry into the hypervisor to setup any
160 * debug related registers.
161 *
162 * Additionally, KVM only traps guest accesses to the debug registers if
163 * the guest is not actively using them (see the DEBUG_DIRTY
164 * flag on vcpu->arch.iflags).  Since the guest must not interfere
165 * with the hardware state when debugging the guest, we must ensure that
166 * trapping is enabled whenever we are debugging the guest using the
167 * debug registers.
168 */
169
170void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
171{
172	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
173
174	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
175
176	kvm_arm_setup_mdcr_el2(vcpu);
177
178	/* Check if we need to use the debug registers. */
179	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
180		/* Save guest debug state */
181		save_guest_debug_regs(vcpu);
182
183		/*
184		 * Single Step (ARM ARM D2.12.3 The software step state
185		 * machine)
186		 *
187		 * If we are doing Single Step we need to manipulate
188		 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
189		 * step has occurred the hypervisor will trap the
190		 * debug exception and we return to userspace.
191		 *
192		 * If the guest attempts to single step its userspace
193		 * we would have to deal with a trapped exception
194		 * while in the guest kernel. Because this would be
195		 * hard to unwind we suppress the guest's ability to
196		 * do so by masking MDSCR_EL.SS.
197		 *
198		 * This confuses guest debuggers which use
199		 * single-step behind the scenes but everything
200		 * returns to normal once the host is no longer
201		 * debugging the system.
202		 */
203		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
204			/*
205			 * If the software step state at the last guest exit
206			 * was Active-pending, we don't set DBG_SPSR_SS so
207			 * that the state is maintained (to not run another
208			 * single-step until the pending Software Step
209			 * exception is taken).
210			 */
211			if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
212				*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
213			else
214				*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
215
216			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
217			mdscr |= DBG_MDSCR_SS;
218			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
219		} else {
220			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
221			mdscr &= ~DBG_MDSCR_SS;
222			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
223		}
224
225		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
226
227		/*
228		 * HW Breakpoints and watchpoints
229		 *
230		 * We simply switch the debug_ptr to point to our new
231		 * external_debug_state which has been populated by the
232		 * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
233		 * the registers are updated on the world switch.
234		 */
235		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
236			/* Enable breakpoints/watchpoints */
237			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
238			mdscr |= DBG_MDSCR_MDE;
239			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
240
241			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
242			vcpu_set_flag(vcpu, DEBUG_DIRTY);
243
244			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
245						&vcpu->arch.debug_ptr->dbg_bcr[0],
246						&vcpu->arch.debug_ptr->dbg_bvr[0]);
247
248			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
249						&vcpu->arch.debug_ptr->dbg_wcr[0],
250						&vcpu->arch.debug_ptr->dbg_wvr[0]);
251
252		/*
253		 * The OS Lock blocks debug exceptions in all ELs when it is
254		 * enabled. If the guest has enabled the OS Lock, constrain its
255		 * effects to the guest. Emulate the behavior by clearing
256		 * MDSCR_EL1.MDE. In so doing, we ensure that host debug
257		 * exceptions are unaffected by guest configuration of the OS
258		 * Lock.
259		 */
260		} else if (kvm_vcpu_os_lock_enabled(vcpu)) {
261			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
262			mdscr &= ~DBG_MDSCR_MDE;
263			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
264		}
265	}
266
267	BUG_ON(!vcpu->guest_debug &&
268		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
269
270	/* If KDE or MDE are set, perform a full save/restore cycle. */
271	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
272		vcpu_set_flag(vcpu, DEBUG_DIRTY);
273
274	/* Write mdcr_el2 changes since vcpu_load on VHE systems */
275	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
276		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
277
278	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
279}
280
281void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
282{
283	trace_kvm_arm_clear_debug(vcpu->guest_debug);
284
285	/*
286	 * Restore the guest's debug registers if we were using them.
287	 */
288	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
289		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
290			if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
291				/*
292				 * Mark the vcpu as ACTIVE_PENDING
293				 * until Software Step exception is taken.
294				 */
295				vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
296		}
297
298		restore_guest_debug_regs(vcpu);
299
300		/*
301		 * If we were using HW debug we need to restore the
302		 * debug_ptr to the guest debug state.
303		 */
304		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
305			kvm_arm_reset_debug_ptr(vcpu);
306
307			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
308						&vcpu->arch.debug_ptr->dbg_bcr[0],
309						&vcpu->arch.debug_ptr->dbg_bvr[0]);
310
311			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
312						&vcpu->arch.debug_ptr->dbg_wcr[0],
313						&vcpu->arch.debug_ptr->dbg_wvr[0]);
314		}
315	}
316}
317
318void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
319{
320	u64 dfr0;
321
322	/* For VHE, there is nothing to do */
323	if (has_vhe())
324		return;
325
326	dfr0 = read_sysreg(id_aa64dfr0_el1);
327	/*
328	 * If SPE is present on this CPU and is available at current EL,
329	 * we may need to check if the host state needs to be saved.
330	 */
331	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
332	    !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT)))
333		vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
334
335	/* Check if we have TRBE implemented and available at the host */
336	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
337	    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
338		vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
339}
340
341void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
342{
343	vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
344	vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
345}
346