1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #include <linux/irqchip/arm-gic-v3.h>
8
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_mmu.h>
11
12 #include <hyp/adjust_pc.h>
13
14 #include <nvhe/pkvm.h>
15
16 #include "../../sys_regs.h"
17
18 /*
19 * Copies of the host's CPU features registers holding sanitized values at hyp.
20 */
21 u64 id_aa64pfr0_el1_sys_val;
22 u64 id_aa64pfr1_el1_sys_val;
23 u64 id_aa64isar0_el1_sys_val;
24 u64 id_aa64isar1_el1_sys_val;
25 u64 id_aa64isar2_el1_sys_val;
26 u64 id_aa64mmfr0_el1_sys_val;
27 u64 id_aa64mmfr1_el1_sys_val;
28 u64 id_aa64mmfr2_el1_sys_val;
29
30 /*
31 * Inject an unknown/undefined exception to an AArch64 guest while most of its
32 * sysregs are live.
33 */
inject_undef64(struct kvm_vcpu * vcpu)34 static void inject_undef64(struct kvm_vcpu *vcpu)
35 {
36 u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
37
38 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
39 *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
40
41 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
42
43 __kvm_adjust_pc(vcpu);
44
45 write_sysreg_el1(esr, SYS_ESR);
46 write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
47 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
48 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
49 }
50
51 /*
52 * Returns the restricted features values of the feature register based on the
53 * limitations in restrict_fields.
54 * A feature id field value of 0b0000 does not impose any restrictions.
55 * Note: Use only for unsigned feature field values.
56 */
get_restricted_features_unsigned(u64 sys_reg_val,u64 restrict_fields)57 static u64 get_restricted_features_unsigned(u64 sys_reg_val,
58 u64 restrict_fields)
59 {
60 u64 value = 0UL;
61 u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
62
63 /*
64 * According to the Arm Architecture Reference Manual, feature fields
65 * use increasing values to indicate increases in functionality.
66 * Iterate over the restricted feature fields and calculate the minimum
67 * unsigned value between the one supported by the system, and what the
68 * value is being restricted to.
69 */
70 while (sys_reg_val && restrict_fields) {
71 value |= min(sys_reg_val & mask, restrict_fields & mask);
72 sys_reg_val &= ~mask;
73 restrict_fields &= ~mask;
74 mask <<= ARM64_FEATURE_FIELD_BITS;
75 }
76
77 return value;
78 }
79
80 /*
81 * Functions that return the value of feature id registers for protected VMs
82 * based on allowed features, system features, and KVM support.
83 */
84
get_pvm_id_aa64pfr0(const struct kvm_vcpu * vcpu)85 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
86 {
87 u64 set_mask = 0;
88 u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
89
90 set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
91 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
92
93 return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
94 }
95
get_pvm_id_aa64pfr1(const struct kvm_vcpu * vcpu)96 static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
97 {
98 const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
99 u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
100
101 if (!kvm_has_mte(kvm))
102 allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
103
104 return id_aa64pfr1_el1_sys_val & allow_mask;
105 }
106
get_pvm_id_aa64zfr0(const struct kvm_vcpu * vcpu)107 static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
108 {
109 /*
110 * No support for Scalable Vectors, therefore, hyp has no sanitized
111 * copy of the feature id register.
112 */
113 BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL);
114 return 0;
115 }
116
get_pvm_id_aa64dfr0(const struct kvm_vcpu * vcpu)117 static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
118 {
119 /*
120 * No support for debug, including breakpoints, and watchpoints,
121 * therefore, pKVM has no sanitized copy of the feature id register.
122 */
123 BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
124 return 0;
125 }
126
get_pvm_id_aa64dfr1(const struct kvm_vcpu * vcpu)127 static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
128 {
129 /*
130 * No support for debug, therefore, hyp has no sanitized copy of the
131 * feature id register.
132 */
133 BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
134 return 0;
135 }
136
get_pvm_id_aa64afr0(const struct kvm_vcpu * vcpu)137 static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
138 {
139 /*
140 * No support for implementation defined features, therefore, hyp has no
141 * sanitized copy of the feature id register.
142 */
143 BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
144 return 0;
145 }
146
get_pvm_id_aa64afr1(const struct kvm_vcpu * vcpu)147 static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
148 {
149 /*
150 * No support for implementation defined features, therefore, hyp has no
151 * sanitized copy of the feature id register.
152 */
153 BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
154 return 0;
155 }
156
get_pvm_id_aa64isar0(const struct kvm_vcpu * vcpu)157 static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
158 {
159 return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
160 }
161
get_pvm_id_aa64isar1(const struct kvm_vcpu * vcpu)162 static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
163 {
164 u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
165
166 if (!vcpu_has_ptrauth(vcpu))
167 allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
168 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
169 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
170 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
171
172 return id_aa64isar1_el1_sys_val & allow_mask;
173 }
174
get_pvm_id_aa64isar2(const struct kvm_vcpu * vcpu)175 static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
176 {
177 u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
178
179 if (!vcpu_has_ptrauth(vcpu))
180 allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
181 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
182
183 return id_aa64isar2_el1_sys_val & allow_mask;
184 }
185
get_pvm_id_aa64mmfr0(const struct kvm_vcpu * vcpu)186 static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
187 {
188 u64 set_mask;
189
190 set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
191 PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
192
193 return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
194 }
195
get_pvm_id_aa64mmfr1(const struct kvm_vcpu * vcpu)196 static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
197 {
198 return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
199 }
200
get_pvm_id_aa64mmfr2(const struct kvm_vcpu * vcpu)201 static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
202 {
203 return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
204 }
205
206 /* Read a sanitized cpufeature ID register by its encoding */
pvm_read_id_reg(const struct kvm_vcpu * vcpu,u32 id)207 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
208 {
209 switch (id) {
210 case SYS_ID_AA64PFR0_EL1:
211 return get_pvm_id_aa64pfr0(vcpu);
212 case SYS_ID_AA64PFR1_EL1:
213 return get_pvm_id_aa64pfr1(vcpu);
214 case SYS_ID_AA64ZFR0_EL1:
215 return get_pvm_id_aa64zfr0(vcpu);
216 case SYS_ID_AA64DFR0_EL1:
217 return get_pvm_id_aa64dfr0(vcpu);
218 case SYS_ID_AA64DFR1_EL1:
219 return get_pvm_id_aa64dfr1(vcpu);
220 case SYS_ID_AA64AFR0_EL1:
221 return get_pvm_id_aa64afr0(vcpu);
222 case SYS_ID_AA64AFR1_EL1:
223 return get_pvm_id_aa64afr1(vcpu);
224 case SYS_ID_AA64ISAR0_EL1:
225 return get_pvm_id_aa64isar0(vcpu);
226 case SYS_ID_AA64ISAR1_EL1:
227 return get_pvm_id_aa64isar1(vcpu);
228 case SYS_ID_AA64ISAR2_EL1:
229 return get_pvm_id_aa64isar2(vcpu);
230 case SYS_ID_AA64MMFR0_EL1:
231 return get_pvm_id_aa64mmfr0(vcpu);
232 case SYS_ID_AA64MMFR1_EL1:
233 return get_pvm_id_aa64mmfr1(vcpu);
234 case SYS_ID_AA64MMFR2_EL1:
235 return get_pvm_id_aa64mmfr2(vcpu);
236 default:
237 /* Unhandled ID register, RAZ */
238 return 0;
239 }
240 }
241
read_id_reg(const struct kvm_vcpu * vcpu,struct sys_reg_desc const * r)242 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
243 struct sys_reg_desc const *r)
244 {
245 return pvm_read_id_reg(vcpu, reg_to_encoding(r));
246 }
247
248 /* Handler to RAZ/WI sysregs */
pvm_access_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)249 static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
250 const struct sys_reg_desc *r)
251 {
252 if (!p->is_write)
253 p->regval = 0;
254
255 return true;
256 }
257
258 /*
259 * Accessor for AArch32 feature id registers.
260 *
261 * The value of these registers is "unknown" according to the spec if AArch32
262 * isn't supported.
263 */
pvm_access_id_aarch32(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)264 static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
265 struct sys_reg_params *p,
266 const struct sys_reg_desc *r)
267 {
268 if (p->is_write) {
269 inject_undef64(vcpu);
270 return false;
271 }
272
273 /*
274 * No support for AArch32 guests, therefore, pKVM has no sanitized copy
275 * of AArch32 feature id registers.
276 */
277 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
278 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
279
280 return pvm_access_raz_wi(vcpu, p, r);
281 }
282
283 /*
284 * Accessor for AArch64 feature id registers.
285 *
286 * If access is allowed, set the regval to the protected VM's view of the
287 * register and return true.
288 * Otherwise, inject an undefined exception and return false.
289 */
pvm_access_id_aarch64(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)290 static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
291 struct sys_reg_params *p,
292 const struct sys_reg_desc *r)
293 {
294 if (p->is_write) {
295 inject_undef64(vcpu);
296 return false;
297 }
298
299 p->regval = read_id_reg(vcpu, r);
300 return true;
301 }
302
pvm_gic_read_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)303 static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
304 struct sys_reg_params *p,
305 const struct sys_reg_desc *r)
306 {
307 /* pVMs only support GICv3. 'nuf said. */
308 if (!p->is_write)
309 p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
310
311 return true;
312 }
313
314 /* Mark the specified system register as an AArch32 feature id register. */
315 #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
316
317 /* Mark the specified system register as an AArch64 feature id register. */
318 #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
319
320 /*
321 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
322 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
323 * (1 <= crm < 8, 0 <= Op2 < 8).
324 */
325 #define ID_UNALLOCATED(crm, op2) { \
326 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
327 .access = pvm_access_id_aarch64, \
328 }
329
330 /* Mark the specified system register as Read-As-Zero/Write-Ignored */
331 #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
332
333 /* Mark the specified system register as not being handled in hyp. */
334 #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
335
336 /*
337 * Architected system registers.
338 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
339 *
340 * NOTE: Anything not explicitly listed here is *restricted by default*, i.e.,
341 * it will lead to injecting an exception into the guest.
342 */
343 static const struct sys_reg_desc pvm_sys_reg_descs[] = {
344 /* Cache maintenance by set/way operations are restricted. */
345
346 /* Debug and Trace Registers are restricted. */
347 RAZ_WI(SYS_DBGBVRn_EL1(0)),
348 RAZ_WI(SYS_DBGBCRn_EL1(0)),
349 RAZ_WI(SYS_DBGWVRn_EL1(0)),
350 RAZ_WI(SYS_DBGWCRn_EL1(0)),
351 RAZ_WI(SYS_MDSCR_EL1),
352 RAZ_WI(SYS_OSLAR_EL1),
353 RAZ_WI(SYS_OSLSR_EL1),
354 RAZ_WI(SYS_OSDLR_EL1),
355
356 /* Group 1 ID registers */
357 RAZ_WI(SYS_REVIDR_EL1),
358
359 /* AArch64 mappings of the AArch32 ID registers */
360 /* CRm=1 */
361 AARCH32(SYS_ID_PFR0_EL1),
362 AARCH32(SYS_ID_PFR1_EL1),
363 AARCH32(SYS_ID_DFR0_EL1),
364 AARCH32(SYS_ID_AFR0_EL1),
365 AARCH32(SYS_ID_MMFR0_EL1),
366 AARCH32(SYS_ID_MMFR1_EL1),
367 AARCH32(SYS_ID_MMFR2_EL1),
368 AARCH32(SYS_ID_MMFR3_EL1),
369
370 /* CRm=2 */
371 AARCH32(SYS_ID_ISAR0_EL1),
372 AARCH32(SYS_ID_ISAR1_EL1),
373 AARCH32(SYS_ID_ISAR2_EL1),
374 AARCH32(SYS_ID_ISAR3_EL1),
375 AARCH32(SYS_ID_ISAR4_EL1),
376 AARCH32(SYS_ID_ISAR5_EL1),
377 AARCH32(SYS_ID_MMFR4_EL1),
378 AARCH32(SYS_ID_ISAR6_EL1),
379
380 /* CRm=3 */
381 AARCH32(SYS_MVFR0_EL1),
382 AARCH32(SYS_MVFR1_EL1),
383 AARCH32(SYS_MVFR2_EL1),
384 ID_UNALLOCATED(3,3),
385 AARCH32(SYS_ID_PFR2_EL1),
386 AARCH32(SYS_ID_DFR1_EL1),
387 AARCH32(SYS_ID_MMFR5_EL1),
388 ID_UNALLOCATED(3,7),
389
390 /* AArch64 ID registers */
391 /* CRm=4 */
392 AARCH64(SYS_ID_AA64PFR0_EL1),
393 AARCH64(SYS_ID_AA64PFR1_EL1),
394 ID_UNALLOCATED(4,2),
395 ID_UNALLOCATED(4,3),
396 AARCH64(SYS_ID_AA64ZFR0_EL1),
397 ID_UNALLOCATED(4,5),
398 ID_UNALLOCATED(4,6),
399 ID_UNALLOCATED(4,7),
400 AARCH64(SYS_ID_AA64DFR0_EL1),
401 AARCH64(SYS_ID_AA64DFR1_EL1),
402 ID_UNALLOCATED(5,2),
403 ID_UNALLOCATED(5,3),
404 AARCH64(SYS_ID_AA64AFR0_EL1),
405 AARCH64(SYS_ID_AA64AFR1_EL1),
406 ID_UNALLOCATED(5,6),
407 ID_UNALLOCATED(5,7),
408 AARCH64(SYS_ID_AA64ISAR0_EL1),
409 AARCH64(SYS_ID_AA64ISAR1_EL1),
410 AARCH64(SYS_ID_AA64ISAR2_EL1),
411 ID_UNALLOCATED(6,3),
412 ID_UNALLOCATED(6,4),
413 ID_UNALLOCATED(6,5),
414 ID_UNALLOCATED(6,6),
415 ID_UNALLOCATED(6,7),
416 AARCH64(SYS_ID_AA64MMFR0_EL1),
417 AARCH64(SYS_ID_AA64MMFR1_EL1),
418 AARCH64(SYS_ID_AA64MMFR2_EL1),
419 ID_UNALLOCATED(7,3),
420 ID_UNALLOCATED(7,4),
421 ID_UNALLOCATED(7,5),
422 ID_UNALLOCATED(7,6),
423 ID_UNALLOCATED(7,7),
424
425 /* Scalable Vector Registers are restricted. */
426
427 RAZ_WI(SYS_ERRIDR_EL1),
428 RAZ_WI(SYS_ERRSELR_EL1),
429 RAZ_WI(SYS_ERXFR_EL1),
430 RAZ_WI(SYS_ERXCTLR_EL1),
431 RAZ_WI(SYS_ERXSTATUS_EL1),
432 RAZ_WI(SYS_ERXADDR_EL1),
433 RAZ_WI(SYS_ERXMISC0_EL1),
434 RAZ_WI(SYS_ERXMISC1_EL1),
435
436 /* Performance Monitoring Registers are restricted. */
437
438 /* Limited Ordering Regions Registers are restricted. */
439
440 HOST_HANDLED(SYS_ICC_SGI1R_EL1),
441 HOST_HANDLED(SYS_ICC_ASGI1R_EL1),
442 HOST_HANDLED(SYS_ICC_SGI0R_EL1),
443 { SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, },
444
445 HOST_HANDLED(SYS_CCSIDR_EL1),
446 HOST_HANDLED(SYS_CLIDR_EL1),
447 HOST_HANDLED(SYS_CSSELR_EL1),
448 HOST_HANDLED(SYS_CTR_EL0),
449
450 /* Performance Monitoring Registers are restricted. */
451
452 /* Activity Monitoring Registers are restricted. */
453
454 HOST_HANDLED(SYS_CNTP_TVAL_EL0),
455 HOST_HANDLED(SYS_CNTP_CTL_EL0),
456 HOST_HANDLED(SYS_CNTP_CVAL_EL0),
457
458 /* Performance Monitoring Registers are restricted. */
459 };
460
461 /* A structure to track reset values for system registers in protected vcpus. */
462 struct sys_reg_desc_reset {
463 /* Index into sys_reg[]. */
464 int reg;
465
466 /* Reset function. */
467 void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc_reset *);
468
469 /* Reset value. */
470 u64 value;
471 };
472
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)473 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
474 {
475 __vcpu_sys_reg(vcpu, r->reg) = read_sysreg(actlr_el1);
476 }
477
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)478 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
479 {
480 __vcpu_sys_reg(vcpu, r->reg) = read_sysreg(amair_el1);
481 }
482
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)483 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
484 {
485 __vcpu_sys_reg(vcpu, r->reg) = calculate_mpidr(vcpu);
486 }
487
reset_value(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)488 static void reset_value(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
489 {
490 __vcpu_sys_reg(vcpu, r->reg) = r->value;
491 }
492
493 /* Specify the register's reset value. */
494 #define RESET_VAL(REG, RESET_VAL) { REG, reset_value, RESET_VAL }
495
496 /* Specify a function that calculates the register's reset value. */
497 #define RESET_FUNC(REG, RESET_FUNC) { REG, RESET_FUNC, 0 }
498
499 /*
500 * Architected system registers reset values for Protected VMs.
501 * Important: Must be sorted ascending by REG (index into sys_reg[])
502 */
503 static const struct sys_reg_desc_reset pvm_sys_reg_reset_vals[] = {
504 RESET_FUNC(MPIDR_EL1, reset_mpidr),
505 RESET_VAL(SCTLR_EL1, 0x00C50078),
506 RESET_FUNC(ACTLR_EL1, reset_actlr),
507 RESET_VAL(CPACR_EL1, 0),
508 RESET_VAL(ZCR_EL1, 0),
509 RESET_VAL(TCR_EL1, 0),
510 RESET_VAL(VBAR_EL1, 0),
511 RESET_VAL(CONTEXTIDR_EL1, 0),
512 RESET_FUNC(AMAIR_EL1, reset_amair_el1),
513 RESET_VAL(CNTKCTL_EL1, 0),
514 RESET_VAL(MDSCR_EL1, 0),
515 RESET_VAL(MDCCINT_EL1, 0),
516 RESET_VAL(DISR_EL1, 0),
517 RESET_VAL(PMCCFILTR_EL0, 0),
518 RESET_VAL(PMUSERENR_EL0, 0),
519 };
520
521 /*
522 * Sets system registers to reset value
523 *
524 * This function finds the right entry and sets the registers on the protected
525 * vcpu to their architecturally defined reset values.
526 */
kvm_reset_pvm_sys_regs(struct kvm_vcpu * vcpu)527 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu)
528 {
529 unsigned long i;
530
531 for (i = 0; i < ARRAY_SIZE(pvm_sys_reg_reset_vals); i++) {
532 const struct sys_reg_desc_reset *r = &pvm_sys_reg_reset_vals[i];
533
534 r->reset(vcpu, r);
535 }
536 }
537
538 /*
539 * Checks that the sysreg tables are unique and in-order.
540 *
541 * Returns 0 if the table is consistent, or 1 otherwise.
542 */
kvm_check_pvm_sysreg_table(void)543 int kvm_check_pvm_sysreg_table(void)
544 {
545 unsigned int i;
546
547 for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) {
548 if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0)
549 return 1;
550 }
551
552 for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_reset_vals); i++) {
553 if (pvm_sys_reg_reset_vals[i-1].reg >= pvm_sys_reg_reset_vals[i].reg)
554 return 1;
555 }
556
557 return 0;
558 }
559
560 /*
561 * Handler for protected VM MSR, MRS or System instruction execution.
562 *
563 * Returns true if the hypervisor has handled the exit, and control should go
564 * back to the guest, or false if it hasn't, to be handled by the host.
565 */
kvm_handle_pvm_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)566 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
567 {
568 const struct sys_reg_desc *r;
569 struct sys_reg_params params;
570 unsigned long esr = kvm_vcpu_get_esr(vcpu);
571 int Rt = kvm_vcpu_sys_get_rt(vcpu);
572
573 params = esr_sys64_to_params(esr);
574 params.regval = vcpu_get_reg(vcpu, Rt);
575
576 r = find_reg(¶ms, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs));
577
578 /* Undefined (RESTRICTED). */
579 if (r == NULL) {
580 inject_undef64(vcpu);
581 return true;
582 }
583
584 /* Handled by the host (HOST_HANDLED) */
585 if (r->access == NULL)
586 return false;
587
588 /* Handled by hyp: skip instruction if instructed to do so. */
589 if (r->access(vcpu, ¶ms, r))
590 __kvm_skip_instr(vcpu);
591
592 if (!params.is_write)
593 vcpu_set_reg(vcpu, Rt, params.regval);
594
595 return true;
596 }
597
598 /*
599 * Handler for protected VM restricted exceptions.
600 *
601 * Inject an undefined exception into the guest and return true to indicate that
602 * the hypervisor has handled the exit, and control should go back to the guest.
603 */
kvm_handle_pvm_restricted(struct kvm_vcpu * vcpu,u64 * exit_code)604 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
605 {
606 inject_undef64(vcpu);
607 return true;
608 }
609