1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/compiler.h>
8 #include <linux/kvm_host.h>
9
10 #include <asm/debug-monitors.h>
11 #include <asm/kvm_asm.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
14
15 #define read_debug(r,n) read_sysreg(r##n##_el1)
16 #define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
17
18 #define save_debug(ptr,reg,nr) \
19 switch (nr) { \
20 case 15: ptr[15] = read_debug(reg, 15); \
21 /* Fall through */ \
22 case 14: ptr[14] = read_debug(reg, 14); \
23 /* Fall through */ \
24 case 13: ptr[13] = read_debug(reg, 13); \
25 /* Fall through */ \
26 case 12: ptr[12] = read_debug(reg, 12); \
27 /* Fall through */ \
28 case 11: ptr[11] = read_debug(reg, 11); \
29 /* Fall through */ \
30 case 10: ptr[10] = read_debug(reg, 10); \
31 /* Fall through */ \
32 case 9: ptr[9] = read_debug(reg, 9); \
33 /* Fall through */ \
34 case 8: ptr[8] = read_debug(reg, 8); \
35 /* Fall through */ \
36 case 7: ptr[7] = read_debug(reg, 7); \
37 /* Fall through */ \
38 case 6: ptr[6] = read_debug(reg, 6); \
39 /* Fall through */ \
40 case 5: ptr[5] = read_debug(reg, 5); \
41 /* Fall through */ \
42 case 4: ptr[4] = read_debug(reg, 4); \
43 /* Fall through */ \
44 case 3: ptr[3] = read_debug(reg, 3); \
45 /* Fall through */ \
46 case 2: ptr[2] = read_debug(reg, 2); \
47 /* Fall through */ \
48 case 1: ptr[1] = read_debug(reg, 1); \
49 /* Fall through */ \
50 default: ptr[0] = read_debug(reg, 0); \
51 }
52
53 #define restore_debug(ptr,reg,nr) \
54 switch (nr) { \
55 case 15: write_debug(ptr[15], reg, 15); \
56 /* Fall through */ \
57 case 14: write_debug(ptr[14], reg, 14); \
58 /* Fall through */ \
59 case 13: write_debug(ptr[13], reg, 13); \
60 /* Fall through */ \
61 case 12: write_debug(ptr[12], reg, 12); \
62 /* Fall through */ \
63 case 11: write_debug(ptr[11], reg, 11); \
64 /* Fall through */ \
65 case 10: write_debug(ptr[10], reg, 10); \
66 /* Fall through */ \
67 case 9: write_debug(ptr[9], reg, 9); \
68 /* Fall through */ \
69 case 8: write_debug(ptr[8], reg, 8); \
70 /* Fall through */ \
71 case 7: write_debug(ptr[7], reg, 7); \
72 /* Fall through */ \
73 case 6: write_debug(ptr[6], reg, 6); \
74 /* Fall through */ \
75 case 5: write_debug(ptr[5], reg, 5); \
76 /* Fall through */ \
77 case 4: write_debug(ptr[4], reg, 4); \
78 /* Fall through */ \
79 case 3: write_debug(ptr[3], reg, 3); \
80 /* Fall through */ \
81 case 2: write_debug(ptr[2], reg, 2); \
82 /* Fall through */ \
83 case 1: write_debug(ptr[1], reg, 1); \
84 /* Fall through */ \
85 default: write_debug(ptr[0], reg, 0); \
86 }
87
__debug_save_spe_nvhe(u64 * pmscr_el1)88 static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
89 {
90 u64 reg;
91
92 /* Clear pmscr in case of early return */
93 *pmscr_el1 = 0;
94
95 /* SPE present on this CPU? */
96 if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
97 ID_AA64DFR0_PMSVER_SHIFT))
98 return;
99
100 /* Yes; is it owned by EL3? */
101 reg = read_sysreg_s(SYS_PMBIDR_EL1);
102 if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
103 return;
104
105 /* No; is the host actually using the thing? */
106 reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
107 if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
108 return;
109
110 /* Yes; save the control register and disable data generation */
111 *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
112 write_sysreg_s(0, SYS_PMSCR_EL1);
113 isb();
114
115 /* Now drain all buffered data to memory */
116 psb_csync();
117 dsb(nsh);
118 }
119
__debug_restore_spe_nvhe(u64 pmscr_el1)120 static void __hyp_text __debug_restore_spe_nvhe(u64 pmscr_el1)
121 {
122 if (!pmscr_el1)
123 return;
124
125 /* The host page table is installed, but not yet synchronised */
126 isb();
127
128 /* Re-enable data generation */
129 write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
130 }
131
__debug_save_state(struct kvm_vcpu * vcpu,struct kvm_guest_debug_arch * dbg,struct kvm_cpu_context * ctxt)132 static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
133 struct kvm_guest_debug_arch *dbg,
134 struct kvm_cpu_context *ctxt)
135 {
136 u64 aa64dfr0;
137 int brps, wrps;
138
139 aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
140 brps = (aa64dfr0 >> 12) & 0xf;
141 wrps = (aa64dfr0 >> 20) & 0xf;
142
143 save_debug(dbg->dbg_bcr, dbgbcr, brps);
144 save_debug(dbg->dbg_bvr, dbgbvr, brps);
145 save_debug(dbg->dbg_wcr, dbgwcr, wrps);
146 save_debug(dbg->dbg_wvr, dbgwvr, wrps);
147
148 ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
149 }
150
__debug_restore_state(struct kvm_vcpu * vcpu,struct kvm_guest_debug_arch * dbg,struct kvm_cpu_context * ctxt)151 static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
152 struct kvm_guest_debug_arch *dbg,
153 struct kvm_cpu_context *ctxt)
154 {
155 u64 aa64dfr0;
156 int brps, wrps;
157
158 aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
159
160 brps = (aa64dfr0 >> 12) & 0xf;
161 wrps = (aa64dfr0 >> 20) & 0xf;
162
163 restore_debug(dbg->dbg_bcr, dbgbcr, brps);
164 restore_debug(dbg->dbg_bvr, dbgbvr, brps);
165 restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
166 restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
167
168 write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
169 }
170
__debug_save_host_buffers_nvhe(struct kvm_vcpu * vcpu)171 void __hyp_text __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
172 {
173 /*
174 * Non-VHE: Disable and flush SPE data generation
175 * VHE: The vcpu can run, but it can't hide.
176 */
177 __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);
178
179 }
180
__debug_restore_host_buffers_nvhe(struct kvm_vcpu * vcpu)181 void __hyp_text __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
182 {
183 __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);
184 }
185
__debug_switch_to_guest(struct kvm_vcpu * vcpu)186 void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
187 {
188 struct kvm_cpu_context *host_ctxt;
189 struct kvm_cpu_context *guest_ctxt;
190 struct kvm_guest_debug_arch *host_dbg;
191 struct kvm_guest_debug_arch *guest_dbg;
192
193 if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
194 return;
195
196 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
197 guest_ctxt = &vcpu->arch.ctxt;
198 host_dbg = &vcpu->arch.host_debug_state.regs;
199 guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
200
201 __debug_save_state(vcpu, host_dbg, host_ctxt);
202 __debug_restore_state(vcpu, guest_dbg, guest_ctxt);
203 }
204
__debug_switch_to_host(struct kvm_vcpu * vcpu)205 void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
206 {
207 struct kvm_cpu_context *host_ctxt;
208 struct kvm_cpu_context *guest_ctxt;
209 struct kvm_guest_debug_arch *host_dbg;
210 struct kvm_guest_debug_arch *guest_dbg;
211
212
213 if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
214 return;
215
216 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
217 guest_ctxt = &vcpu->arch.ctxt;
218 host_dbg = &vcpu->arch.host_debug_state.regs;
219 guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
220
221 __debug_save_state(vcpu, guest_dbg, guest_ctxt);
222 __debug_restore_state(vcpu, host_dbg, host_ctxt);
223
224 vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
225 }
226
__kvm_get_mdcr_el2(void)227 u32 __hyp_text __kvm_get_mdcr_el2(void)
228 {
229 return read_sysreg(mdcr_el2);
230 }
231