1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Debug and Guest Debug support
4 *
5 * Copyright (C) 2015 - Linaro Ltd
6 * Author: Alex Bennée <alex.bennee@linaro.org>
7 */
8
9 #include <linux/kvm_host.h>
10 #include <linux/hw_breakpoint.h>
11
12 #include <asm/debug-monitors.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_arm.h>
15 #include <asm/kvm_emulate.h>
16
17 #include <kvm_ptdump.h>
18
19 #include "trace.h"
20
21 /* These are the bits of MDSCR_EL1 we may manipulate */
22 #define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
23 DBG_MDSCR_KDE | \
24 DBG_MDSCR_MDE)
25
26 static DEFINE_PER_CPU(u64, mdcr_el2);
27
28 /**
29 * save/restore_guest_debug_regs
30 *
31 * For some debug operations we need to tweak some guest registers. As
32 * a result we need to save the state of those registers before we
33 * make those modifications.
34 *
35 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
36 * after we have restored the preserved value to the main context.
37 *
38 * When single-step is enabled by userspace, we tweak PSTATE.SS on every
39 * guest entry. Preserve PSTATE.SS so we can restore the original value
40 * for the vcpu after the single-step is disabled.
41 */
save_guest_debug_regs(struct kvm_vcpu * vcpu)42 static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
43 {
44 __vcpu_save_guest_debug_regs(vcpu);
45
46 trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
47 vcpu->arch.guest_debug_preserved.mdscr_el1);
48
49 vcpu->arch.guest_debug_preserved.pstate_ss =
50 (*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
51 }
52
restore_guest_debug_regs(struct kvm_vcpu * vcpu)53 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
54 {
55 __vcpu_restore_guest_debug_regs(vcpu);
56
57 trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
58 vcpu_read_sys_reg(vcpu, MDSCR_EL1));
59
60 if (vcpu->arch.guest_debug_preserved.pstate_ss)
61 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
62 else
63 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
64 }
65
66 /**
67 * kvm_arm_init_debug - grab what we need for debug
68 *
69 * Currently the sole task of this function is to retrieve the initial
70 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
71 * presumably been set-up by some knowledgeable bootcode.
72 *
73 * It is called once per-cpu during CPU hyp initialisation.
74 */
75
kvm_arm_init_debug(void)76 void kvm_arm_init_debug(void)
77 {
78 __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
79 }
80
81 /**
82 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
83 *
84 * @vcpu: the vcpu pointer
85 *
86 * This ensures we will trap access to:
87 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
88 * - Debug ROM Address (MDCR_EL2_TDRA)
89 * - OS related registers (MDCR_EL2_TDOSA)
90 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
91 * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
92 * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
93 */
kvm_arm_setup_mdcr_el2(struct kvm_vcpu * vcpu)94 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
95 {
96 /*
97 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
98 * to disable guest access to the profiling and trace buffers
99 */
100 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
101 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
102 MDCR_EL2_TPMS |
103 MDCR_EL2_TTRF |
104 MDCR_EL2_TPMCR |
105 MDCR_EL2_TDRA |
106 MDCR_EL2_TDOSA);
107
108 /* Is the VM being debugged by userspace? */
109 if (vcpu->guest_debug)
110 /* Route all software debug exceptions to EL2 */
111 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
112
113 /*
114 * Trap debug register access when one of the following is true:
115 * - Userspace is using the hardware to debug the guest
116 * (KVM_GUESTDBG_USE_HW is set).
117 * - The guest is not using debug (DEBUG_DIRTY clear).
118 * - The guest has enabled the OS Lock (debug exceptions are blocked).
119 */
120 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
121 !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
122 kvm_vcpu_os_lock_enabled(vcpu))
123 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
124
125 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
126 }
127
128 /**
129 * kvm_arm_vcpu_init_debug - setup vcpu debug traps
130 *
131 * @vcpu: the vcpu pointer
132 *
133 * Set vcpu initial mdcr_el2 value.
134 */
kvm_arm_vcpu_init_debug(struct kvm_vcpu * vcpu)135 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
136 {
137 preempt_disable();
138 kvm_arm_setup_mdcr_el2(vcpu);
139 preempt_enable();
140 }
141
142 /**
143 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
144 */
145
kvm_arm_reset_debug_ptr(struct kvm_vcpu * vcpu)146 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
147 {
148 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
149 }
150
151 /**
152 * kvm_arm_setup_debug - set up debug related stuff
153 *
154 * @vcpu: the vcpu pointer
155 *
156 * This is called before each entry into the hypervisor to setup any
157 * debug related registers.
158 *
159 * Additionally, KVM only traps guest accesses to the debug registers if
160 * the guest is not actively using them (see the DEBUG_DIRTY
161 * flag on vcpu->arch.iflags). Since the guest must not interfere
162 * with the hardware state when debugging the guest, we must ensure that
163 * trapping is enabled whenever we are debugging the guest using the
164 * debug registers.
165 */
166
kvm_arm_setup_debug(struct kvm_vcpu * vcpu)167 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
168 {
169 unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
170
171 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
172
173 kvm_arm_setup_mdcr_el2(vcpu);
174
175 /* Check if we need to use the debug registers. */
176 if (kvm_vcpu_needs_debug_regs(vcpu)) {
177 /* Save guest debug state */
178 save_guest_debug_regs(vcpu);
179
180 /*
181 * Single Step (ARM ARM D2.12.3 The software step state
182 * machine)
183 *
184 * If we are doing Single Step we need to manipulate
185 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
186 * step has occurred the hypervisor will trap the
187 * debug exception and we return to userspace.
188 *
189 * If the guest attempts to single step its userspace
190 * we would have to deal with a trapped exception
191 * while in the guest kernel. Because this would be
192 * hard to unwind we suppress the guest's ability to
193 * do so by masking MDSCR_EL.SS.
194 *
195 * This confuses guest debuggers which use
196 * single-step behind the scenes but everything
197 * returns to normal once the host is no longer
198 * debugging the system.
199 */
200 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
201 /*
202 * If the software step state at the last guest exit
203 * was Active-pending, we don't set DBG_SPSR_SS so
204 * that the state is maintained (to not run another
205 * single-step until the pending Software Step
206 * exception is taken).
207 */
208 if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
209 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
210 else
211 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
212
213 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
214 mdscr |= DBG_MDSCR_SS;
215 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
216 } else {
217 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
218 mdscr &= ~DBG_MDSCR_SS;
219 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
220 }
221
222 trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
223
224 /*
225 * HW Breakpoints and watchpoints
226 *
227 * We simply switch the debug_ptr to point to our new
228 * external_debug_state which has been populated by the
229 * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
230 * the registers are updated on the world switch.
231 */
232 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
233 /* Enable breakpoints/watchpoints */
234 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
235 mdscr |= DBG_MDSCR_MDE;
236 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
237
238 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
239 vcpu_set_flag(vcpu, DEBUG_DIRTY);
240
241 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
242 &vcpu->arch.debug_ptr->dbg_bcr[0],
243 &vcpu->arch.debug_ptr->dbg_bvr[0]);
244
245 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
246 &vcpu->arch.debug_ptr->dbg_wcr[0],
247 &vcpu->arch.debug_ptr->dbg_wvr[0]);
248
249 /*
250 * The OS Lock blocks debug exceptions in all ELs when it is
251 * enabled. If the guest has enabled the OS Lock, constrain its
252 * effects to the guest. Emulate the behavior by clearing
253 * MDSCR_EL1.MDE. In so doing, we ensure that host debug
254 * exceptions are unaffected by guest configuration of the OS
255 * Lock.
256 */
257 } else if (kvm_vcpu_os_lock_enabled(vcpu)) {
258 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
259 mdscr &= ~DBG_MDSCR_MDE;
260 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
261 }
262 }
263
264 BUG_ON(!vcpu->guest_debug &&
265 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
266
267 /* If KDE or MDE are set, perform a full save/restore cycle. */
268 if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
269 vcpu_set_flag(vcpu, DEBUG_DIRTY);
270
271 /* Write mdcr_el2 changes since vcpu_load on VHE systems */
272 if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
273 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
274
275 trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
276 }
277
kvm_arm_clear_debug(struct kvm_vcpu * vcpu)278 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
279 {
280 trace_kvm_arm_clear_debug(vcpu->guest_debug);
281
282 /*
283 * Restore the guest's debug registers if we were using them.
284 */
285 if (kvm_vcpu_needs_debug_regs(vcpu)) {
286 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
287 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
288 /*
289 * Mark the vcpu as ACTIVE_PENDING
290 * until Software Step exception is taken.
291 */
292 vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
293 }
294
295 restore_guest_debug_regs(vcpu);
296
297 /*
298 * If we were using HW debug we need to restore the
299 * debug_ptr to the guest debug state.
300 */
301 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
302 kvm_arm_reset_debug_ptr(vcpu);
303
304 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
305 &vcpu->arch.debug_ptr->dbg_bcr[0],
306 &vcpu->arch.debug_ptr->dbg_bvr[0]);
307
308 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
309 &vcpu->arch.debug_ptr->dbg_wcr[0],
310 &vcpu->arch.debug_ptr->dbg_wvr[0]);
311 }
312 }
313 }
314
kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu * vcpu)315 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
316 {
317 u64 dfr0;
318
319 /* For VHE, there is nothing to do */
320 if (has_vhe())
321 return;
322
323 dfr0 = read_sysreg(id_aa64dfr0_el1);
324 /*
325 * If SPE is present on this CPU and is available at current EL,
326 * we may need to check if the host state needs to be saved.
327 */
328 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
329 !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT)))
330 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
331
332 /* Check if we have TRBE implemented and available at the host */
333 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
334 !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
335 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
336 }
337
kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu * vcpu)338 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
339 {
340 vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
341 vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
342 }
343
kvm_arch_create_vm_debugfs(struct kvm * kvm)344 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
345 {
346 kvm_ptdump_guest_register(kvm);
347 return 0;
348 }
349