• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug and Guest Debug support
4  *
5  * Copyright (C) 2015 - Linaro Ltd
6  * Author: Alex Bennée <alex.bennee@linaro.org>
7  */
8 
9 #include <linux/kvm_host.h>
10 #include <linux/hw_breakpoint.h>
11 
12 #include <asm/debug-monitors.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_arm.h>
15 #include <asm/kvm_emulate.h>
16 
17 #include "trace.h"
18 
19 /* These are the bits of MDSCR_EL1 we may manipulate */
20 #define MDSCR_EL1_DEBUG_MASK	(DBG_MDSCR_SS | \
21 				DBG_MDSCR_KDE | \
22 				DBG_MDSCR_MDE)
23 
24 static DEFINE_PER_CPU(u64, mdcr_el2);
25 
26 /*
27  * save/restore_guest_debug_regs
28  *
29  * For some debug operations we need to tweak some guest registers. As
30  * a result we need to save the state of those registers before we
31  * make those modifications.
32  *
33  * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
34  * after we have restored the preserved value to the main context.
35  *
36  * When single-step is enabled by userspace, we tweak PSTATE.SS on every
37  * guest entry. Preserve PSTATE.SS so we can restore the original value
38  * for the vcpu after the single-step is disabled.
39  */
save_guest_debug_regs(struct kvm_vcpu * vcpu)40 static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
41 {
42 	__vcpu_save_guest_debug_regs(vcpu);
43 
44 	trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
45 				vcpu->arch.guest_debug_preserved.mdscr_el1);
46 
47 	vcpu->arch.guest_debug_preserved.pstate_ss =
48 					(*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
49 }
50 
restore_guest_debug_regs(struct kvm_vcpu * vcpu)51 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
52 {
53 	__vcpu_restore_guest_debug_regs(vcpu);
54 
55 	trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
56 				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
57 
58 	if (vcpu->arch.guest_debug_preserved.pstate_ss)
59 		*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
60 	else
61 		*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
62 }
63 
64 /**
65  * kvm_arm_init_debug - grab what we need for debug
66  *
67  * Currently the sole task of this function is to retrieve the initial
68  * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
69  * presumably been set-up by some knowledgeable bootcode.
70  *
71  * It is called once per-cpu during CPU hyp initialisation.
72  */
73 
kvm_arm_init_debug(void)74 void kvm_arm_init_debug(void)
75 {
76 	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
77 }
78 
79 /**
80  * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
81  *
82  * @vcpu:	the vcpu pointer
83  *
84  * This ensures we will trap access to:
85  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
86  *  - Debug ROM Address (MDCR_EL2_TDRA)
87  *  - OS related registers (MDCR_EL2_TDOSA)
88  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
89  *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
90  *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
91  */
kvm_arm_setup_mdcr_el2(struct kvm_vcpu * vcpu)92 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
93 {
94 	/*
95 	 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
96 	 * to disable guest access to the profiling and trace buffers
97 	 */
98 	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
99 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
100 				MDCR_EL2_TPMS |
101 				MDCR_EL2_TTRF |
102 				MDCR_EL2_TPMCR |
103 				MDCR_EL2_TDRA |
104 				MDCR_EL2_TDOSA);
105 
106 	/* Is the VM being debugged by userspace? */
107 	if (vcpu->guest_debug)
108 		/* Route all software debug exceptions to EL2 */
109 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
110 
111 	/*
112 	 * Trap debug register access when one of the following is true:
113 	 *  - Userspace is using the hardware to debug the guest
114 	 *  (KVM_GUESTDBG_USE_HW is set).
115 	 *  - The guest is not using debug (DEBUG_DIRTY clear).
116 	 *  - The guest has enabled the OS Lock (debug exceptions are blocked).
117 	 */
118 	if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
119 	    !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
120 	    kvm_vcpu_os_lock_enabled(vcpu))
121 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
122 
123 	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
124 }
125 
126 /**
127  * kvm_arm_vcpu_init_debug - setup vcpu debug traps
128  *
129  * @vcpu:	the vcpu pointer
130  *
131  * Set vcpu initial mdcr_el2 value.
132  */
kvm_arm_vcpu_init_debug(struct kvm_vcpu * vcpu)133 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
134 {
135 	preempt_disable();
136 	kvm_arm_setup_mdcr_el2(vcpu);
137 	preempt_enable();
138 }
139 
140 /**
141  * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
142  * @vcpu:	the vcpu pointer
143  */
144 
kvm_arm_reset_debug_ptr(struct kvm_vcpu * vcpu)145 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
146 {
147 	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
148 }
149 
150 /**
151  * kvm_arm_setup_debug - set up debug related stuff
152  *
153  * @vcpu:	the vcpu pointer
154  *
155  * This is called before each entry into the hypervisor to setup any
156  * debug related registers.
157  *
158  * Additionally, KVM only traps guest accesses to the debug registers if
159  * the guest is not actively using them (see the DEBUG_DIRTY
160  * flag on vcpu->arch.iflags).  Since the guest must not interfere
161  * with the hardware state when debugging the guest, we must ensure that
162  * trapping is enabled whenever we are debugging the guest using the
163  * debug registers.
164  */
165 
kvm_arm_setup_debug(struct kvm_vcpu * vcpu)166 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
167 {
168 	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
169 
170 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
171 
172 	kvm_arm_setup_mdcr_el2(vcpu);
173 
174 	/* Check if we need to use the debug registers. */
175 	if (kvm_vcpu_needs_debug_regs(vcpu)) {
176 		/* Save guest debug state */
177 		save_guest_debug_regs(vcpu);
178 
179 		/*
180 		 * Single Step (ARM ARM D2.12.3 The software step state
181 		 * machine)
182 		 *
183 		 * If we are doing Single Step we need to manipulate
184 		 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
185 		 * step has occurred the hypervisor will trap the
186 		 * debug exception and we return to userspace.
187 		 *
188 		 * If the guest attempts to single step its userspace
189 		 * we would have to deal with a trapped exception
190 		 * while in the guest kernel. Because this would be
191 		 * hard to unwind we suppress the guest's ability to
192 		 * do so by masking MDSCR_EL.SS.
193 		 *
194 		 * This confuses guest debuggers which use
195 		 * single-step behind the scenes but everything
196 		 * returns to normal once the host is no longer
197 		 * debugging the system.
198 		 */
199 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
200 			/*
201 			 * If the software step state at the last guest exit
202 			 * was Active-pending, we don't set DBG_SPSR_SS so
203 			 * that the state is maintained (to not run another
204 			 * single-step until the pending Software Step
205 			 * exception is taken).
206 			 */
207 			if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
208 				*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
209 			else
210 				*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
211 
212 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
213 			mdscr |= DBG_MDSCR_SS;
214 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
215 		} else {
216 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
217 			mdscr &= ~DBG_MDSCR_SS;
218 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
219 		}
220 
221 		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
222 
223 		/*
224 		 * HW Breakpoints and watchpoints
225 		 *
226 		 * We simply switch the debug_ptr to point to our new
227 		 * external_debug_state which has been populated by the
228 		 * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
229 		 * the registers are updated on the world switch.
230 		 */
231 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
232 			/* Enable breakpoints/watchpoints */
233 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
234 			mdscr |= DBG_MDSCR_MDE;
235 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
236 
237 			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
238 			vcpu_set_flag(vcpu, DEBUG_DIRTY);
239 
240 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
241 						&vcpu->arch.debug_ptr->dbg_bcr[0],
242 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
243 
244 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
245 						&vcpu->arch.debug_ptr->dbg_wcr[0],
246 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
247 
248 		/*
249 		 * The OS Lock blocks debug exceptions in all ELs when it is
250 		 * enabled. If the guest has enabled the OS Lock, constrain its
251 		 * effects to the guest. Emulate the behavior by clearing
252 		 * MDSCR_EL1.MDE. In so doing, we ensure that host debug
253 		 * exceptions are unaffected by guest configuration of the OS
254 		 * Lock.
255 		 */
256 		} else if (kvm_vcpu_os_lock_enabled(vcpu)) {
257 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
258 			mdscr &= ~DBG_MDSCR_MDE;
259 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
260 		}
261 	}
262 
263 	BUG_ON(!vcpu->guest_debug &&
264 		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
265 
266 	/* If KDE or MDE are set, perform a full save/restore cycle. */
267 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
268 		vcpu_set_flag(vcpu, DEBUG_DIRTY);
269 
270 	/* Write mdcr_el2 changes since vcpu_load on VHE systems */
271 	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
272 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
273 
274 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
275 }
276 
kvm_arm_clear_debug(struct kvm_vcpu * vcpu)277 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
278 {
279 	trace_kvm_arm_clear_debug(vcpu->guest_debug);
280 
281 	/*
282 	 * Restore the guest's debug registers if we were using them.
283 	 */
284 	if (kvm_vcpu_needs_debug_regs(vcpu)) {
285 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
286 			if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
287 				/*
288 				 * Mark the vcpu as ACTIVE_PENDING
289 				 * until Software Step exception is taken.
290 				 */
291 				vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
292 		}
293 
294 		restore_guest_debug_regs(vcpu);
295 
296 		/*
297 		 * If we were using HW debug we need to restore the
298 		 * debug_ptr to the guest debug state.
299 		 */
300 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
301 			kvm_arm_reset_debug_ptr(vcpu);
302 
303 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
304 						&vcpu->arch.debug_ptr->dbg_bcr[0],
305 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
306 
307 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
308 						&vcpu->arch.debug_ptr->dbg_wcr[0],
309 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
310 		}
311 	}
312 }
313 
kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu * vcpu)314 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
315 {
316 	u64 dfr0;
317 
318 	/* For VHE, there is nothing to do */
319 	if (has_vhe())
320 		return;
321 
322 	dfr0 = read_sysreg(id_aa64dfr0_el1);
323 	/*
324 	 * If SPE is present on this CPU and is available at current EL,
325 	 * we may need to check if the host state needs to be saved.
326 	 */
327 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
328 	    !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT)))
329 		vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
330 
331 	/* Check if we have TRBE implemented and available at the host */
332 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
333 	    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
334 		vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
335 }
336 
kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu * vcpu)337 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
338 {
339 	vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
340 	vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
341 }
342