1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Debug and Guest Debug support
4 *
5 * Copyright (C) 2015 - Linaro Ltd
6 * Author: Alex Bennée <alex.bennee@linaro.org>
7 */
8
9 #include <linux/kvm_host.h>
10 #include <linux/hw_breakpoint.h>
11
12 #include <asm/debug-monitors.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_arm.h>
15 #include <asm/kvm_emulate.h>
16
17 #include "trace.h"
18
19 /* These are the bits of MDSCR_EL1 we may manipulate */
20 #define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
21 DBG_MDSCR_KDE | \
22 DBG_MDSCR_MDE)
23
24 static DEFINE_PER_CPU(u64, mdcr_el2);
25
26 /**
27 * save/restore_guest_debug_regs
28 *
29 * For some debug operations we need to tweak some guest registers. As
30 * a result we need to save the state of those registers before we
31 * make those modifications.
32 *
33 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
34 * after we have restored the preserved value to the main context.
35 *
36 * When single-step is enabled by userspace, we tweak PSTATE.SS on every
37 * guest entry. Preserve PSTATE.SS so we can restore the original value
38 * for the vcpu after the single-step is disabled.
39 */
save_guest_debug_regs(struct kvm_vcpu * vcpu)40 static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
41 {
42 __vcpu_save_guest_debug_regs(vcpu);
43
44 trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
45 vcpu->arch.guest_debug_preserved.mdscr_el1);
46
47 vcpu->arch.guest_debug_preserved.pstate_ss =
48 (*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
49 }
50
restore_guest_debug_regs(struct kvm_vcpu * vcpu)51 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
52 {
53 __vcpu_restore_guest_debug_regs(vcpu);
54
55 trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
56 vcpu_read_sys_reg(vcpu, MDSCR_EL1));
57
58 if (vcpu->arch.guest_debug_preserved.pstate_ss)
59 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
60 else
61 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
62 }
63
64 /**
65 * kvm_arm_init_debug - grab what we need for debug
66 *
67 * Currently the sole task of this function is to retrieve the initial
68 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
69 * presumably been set-up by some knowledgeable bootcode.
70 *
71 * It is called once per-cpu during CPU hyp initialisation.
72 */
73
kvm_arm_init_debug(void)74 void kvm_arm_init_debug(void)
75 {
76 __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
77 }
78
79 /**
80 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
81 *
82 * @vcpu: the vcpu pointer
83 *
84 * This ensures we will trap access to:
85 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
86 * - Debug ROM Address (MDCR_EL2_TDRA)
87 * - OS related registers (MDCR_EL2_TDOSA)
88 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
89 * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
90 * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
91 */
kvm_arm_setup_mdcr_el2(struct kvm_vcpu * vcpu)92 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
93 {
94 /*
95 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
96 * to disable guest access to the profiling and trace buffers
97 */
98 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
99 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
100 MDCR_EL2_TPMS |
101 MDCR_EL2_TTRF |
102 MDCR_EL2_TPMCR |
103 MDCR_EL2_TDRA |
104 MDCR_EL2_TDOSA);
105
106 /* Is the VM being debugged by userspace? */
107 if (vcpu->guest_debug)
108 /* Route all software debug exceptions to EL2 */
109 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
110
111 /*
112 * Trap debug register access when one of the following is true:
113 * - Userspace is using the hardware to debug the guest
114 * (KVM_GUESTDBG_USE_HW is set).
115 * - The guest is not using debug (DEBUG_DIRTY clear).
116 * - The guest has enabled the OS Lock (debug exceptions are blocked).
117 */
118 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
119 !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
120 kvm_vcpu_os_lock_enabled(vcpu))
121 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
122
123 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
124 }
125
126 /**
127 * kvm_arm_vcpu_init_debug - setup vcpu debug traps
128 *
129 * @vcpu: the vcpu pointer
130 *
131 * Set vcpu initial mdcr_el2 value.
132 */
kvm_arm_vcpu_init_debug(struct kvm_vcpu * vcpu)133 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
134 {
135 preempt_disable();
136 kvm_arm_setup_mdcr_el2(vcpu);
137 preempt_enable();
138 }
139
140 /**
141 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
142 */
143
kvm_arm_reset_debug_ptr(struct kvm_vcpu * vcpu)144 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
145 {
146 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
147 }
148
149 /**
150 * kvm_arm_setup_debug - set up debug related stuff
151 *
152 * @vcpu: the vcpu pointer
153 *
154 * This is called before each entry into the hypervisor to setup any
155 * debug related registers.
156 *
157 * Additionally, KVM only traps guest accesses to the debug registers if
158 * the guest is not actively using them (see the DEBUG_DIRTY
159 * flag on vcpu->arch.iflags). Since the guest must not interfere
160 * with the hardware state when debugging the guest, we must ensure that
161 * trapping is enabled whenever we are debugging the guest using the
162 * debug registers.
163 */
164
kvm_arm_setup_debug(struct kvm_vcpu * vcpu)165 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
166 {
167 unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
168
169 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
170
171 kvm_arm_setup_mdcr_el2(vcpu);
172
173 /* Check if we need to use the debug registers. */
174 if (kvm_vcpu_needs_debug_regs(vcpu)) {
175 /* Save guest debug state */
176 save_guest_debug_regs(vcpu);
177
178 /*
179 * Single Step (ARM ARM D2.12.3 The software step state
180 * machine)
181 *
182 * If we are doing Single Step we need to manipulate
183 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
184 * step has occurred the hypervisor will trap the
185 * debug exception and we return to userspace.
186 *
187 * If the guest attempts to single step its userspace
188 * we would have to deal with a trapped exception
189 * while in the guest kernel. Because this would be
190 * hard to unwind we suppress the guest's ability to
191 * do so by masking MDSCR_EL.SS.
192 *
193 * This confuses guest debuggers which use
194 * single-step behind the scenes but everything
195 * returns to normal once the host is no longer
196 * debugging the system.
197 */
198 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
199 /*
200 * If the software step state at the last guest exit
201 * was Active-pending, we don't set DBG_SPSR_SS so
202 * that the state is maintained (to not run another
203 * single-step until the pending Software Step
204 * exception is taken).
205 */
206 if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
207 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
208 else
209 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
210
211 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
212 mdscr |= DBG_MDSCR_SS;
213 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
214 } else {
215 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
216 mdscr &= ~DBG_MDSCR_SS;
217 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
218 }
219
220 trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
221
222 /*
223 * HW Breakpoints and watchpoints
224 *
225 * We simply switch the debug_ptr to point to our new
226 * external_debug_state which has been populated by the
227 * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
228 * the registers are updated on the world switch.
229 */
230 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
231 /* Enable breakpoints/watchpoints */
232 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
233 mdscr |= DBG_MDSCR_MDE;
234 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
235
236 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
237 vcpu_set_flag(vcpu, DEBUG_DIRTY);
238
239 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
240 &vcpu->arch.debug_ptr->dbg_bcr[0],
241 &vcpu->arch.debug_ptr->dbg_bvr[0]);
242
243 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
244 &vcpu->arch.debug_ptr->dbg_wcr[0],
245 &vcpu->arch.debug_ptr->dbg_wvr[0]);
246
247 /*
248 * The OS Lock blocks debug exceptions in all ELs when it is
249 * enabled. If the guest has enabled the OS Lock, constrain its
250 * effects to the guest. Emulate the behavior by clearing
251 * MDSCR_EL1.MDE. In so doing, we ensure that host debug
252 * exceptions are unaffected by guest configuration of the OS
253 * Lock.
254 */
255 } else if (kvm_vcpu_os_lock_enabled(vcpu)) {
256 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
257 mdscr &= ~DBG_MDSCR_MDE;
258 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
259 }
260 }
261
262 BUG_ON(!vcpu->guest_debug &&
263 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
264
265 /* If KDE or MDE are set, perform a full save/restore cycle. */
266 if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
267 vcpu_set_flag(vcpu, DEBUG_DIRTY);
268
269 /* Write mdcr_el2 changes since vcpu_load on VHE systems */
270 if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
271 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
272
273 trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
274 }
275
kvm_arm_clear_debug(struct kvm_vcpu * vcpu)276 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
277 {
278 trace_kvm_arm_clear_debug(vcpu->guest_debug);
279
280 /*
281 * Restore the guest's debug registers if we were using them.
282 */
283 if (kvm_vcpu_needs_debug_regs(vcpu)) {
284 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
285 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
286 /*
287 * Mark the vcpu as ACTIVE_PENDING
288 * until Software Step exception is taken.
289 */
290 vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
291 }
292
293 restore_guest_debug_regs(vcpu);
294
295 /*
296 * If we were using HW debug we need to restore the
297 * debug_ptr to the guest debug state.
298 */
299 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
300 kvm_arm_reset_debug_ptr(vcpu);
301
302 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
303 &vcpu->arch.debug_ptr->dbg_bcr[0],
304 &vcpu->arch.debug_ptr->dbg_bvr[0]);
305
306 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
307 &vcpu->arch.debug_ptr->dbg_wcr[0],
308 &vcpu->arch.debug_ptr->dbg_wvr[0]);
309 }
310 }
311 }
312
kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu * vcpu)313 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
314 {
315 u64 dfr0;
316
317 /* For VHE, there is nothing to do */
318 if (has_vhe())
319 return;
320
321 dfr0 = read_sysreg(id_aa64dfr0_el1);
322 /*
323 * If SPE is present on this CPU and is available at current EL,
324 * we may need to check if the host state needs to be saved.
325 */
326 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
327 !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
328 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
329
330 /* Check if we have TRBE implemented and available at the host */
331 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
332 !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
333 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
334 }
335
kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu * vcpu)336 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
337 {
338 vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
339 vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
340 }
341