• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/irqchip/arm-gic-v3.h>
8 
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_mmu.h>
11 
12 #include <hyp/adjust_pc.h>
13 
14 #include <nvhe/pkvm.h>
15 
16 #include "../../sys_regs.h"
17 
18 /*
19  * Copies of the host's CPU features registers holding sanitized values at hyp.
20  */
21 u64 id_aa64pfr0_el1_sys_val;
22 u64 id_aa64pfr1_el1_sys_val;
23 u64 id_aa64zfr0_el1_sys_val;
24 u64 id_aa64isar0_el1_sys_val;
25 u64 id_aa64isar1_el1_sys_val;
26 u64 id_aa64isar2_el1_sys_val;
27 u64 id_aa64mmfr0_el1_sys_val;
28 u64 id_aa64mmfr1_el1_sys_val;
29 u64 id_aa64mmfr2_el1_sys_val;
30 u64 id_aa64smfr0_el1_sys_val;
31 
32 /*
33  * Inject an unknown/undefined exception to an AArch64 guest while most of its
34  * sysregs are live.
35  */
inject_undef64(struct kvm_vcpu * vcpu)36 static void inject_undef64(struct kvm_vcpu *vcpu)
37 {
38 	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
39 
40 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
41 	*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
42 
43 	kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
44 
45 	__kvm_adjust_pc(vcpu);
46 
47 	write_sysreg_el1(esr, SYS_ESR);
48 	write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
49 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
50 	write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
51 }
52 
53 /*
54  * Returns the restricted features values of the feature register based on the
55  * limitations in restrict_fields.
56  * Note: Use only for unsigned feature field values.
57  */
get_restricted_features_unsigned(u64 sys_reg_val,u64 restrict_fields)58 static u64 get_restricted_features_unsigned(u64 sys_reg_val,
59 					    u64 restrict_fields)
60 {
61 	u64 value = 0UL;
62 	u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
63 
64 	/*
65 	 * According to the Arm Architecture Reference Manual, feature fields
66 	 * use increasing values to indicate increases in functionality.
67 	 * Iterate over the restricted feature fields and calculate the minimum
68 	 * unsigned value between the one supported by the system, and what the
69 	 * value is being restricted to.
70 	 */
71 	while (sys_reg_val && restrict_fields) {
72 		value |= min(sys_reg_val & mask, restrict_fields & mask);
73 		sys_reg_val &= ~mask;
74 		restrict_fields &= ~mask;
75 		mask <<= ARM64_FEATURE_FIELD_BITS;
76 	}
77 
78 	return value;
79 }
80 
81 /*
82  * Functions that return the value of feature id registers for protected VMs
83  * based on allowed features, system features, and KVM support.
84  */
85 
get_pvm_id_aa64pfr0(const struct kvm_vcpu * vcpu)86 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
87 {
88 	u64 value = get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
89 						     PVM_ID_AA64PFR0_ALLOW);
90 
91 	if (!vcpu_has_sve(vcpu))
92 		value &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
93 
94 	return value;
95 }
96 
get_pvm_id_aa64pfr1(const struct kvm_vcpu * vcpu)97 static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
98 {
99 	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
100 	u64 value = get_restricted_features_unsigned(id_aa64pfr1_el1_sys_val,
101 						     PVM_ID_AA64PFR1_ALLOW);
102 
103 	if (!kvm_has_mte(kvm))
104 		value &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
105 
106 	return value;
107 }
108 
get_pvm_id_aa64zfr0(const struct kvm_vcpu * vcpu)109 static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
110 {
111 	if (vcpu_has_sve(vcpu))
112 		return get_restricted_features_unsigned(id_aa64zfr0_el1_sys_val,
113 							PVM_ID_AA64ZFR0_ALLOW);
114 
115 	return 0;
116 }
117 
get_pvm_id_aa64dfr0(const struct kvm_vcpu * vcpu)118 static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
119 {
120 	/*
121 	 * No support for debug, including breakpoints, and watchpoints,
122 	 * therefore, pKVM has no sanitized copy of the feature id register.
123 	 */
124 	BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
125 	return 0;
126 }
127 
get_pvm_id_aa64dfr1(const struct kvm_vcpu * vcpu)128 static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
129 {
130 	/*
131 	 * No support for debug, therefore, hyp has no sanitized copy of the
132 	 * feature id register.
133 	 */
134 	BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
135 	return 0;
136 }
137 
get_pvm_id_aa64afr0(const struct kvm_vcpu * vcpu)138 static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
139 {
140 	/*
141 	 * No support for implementation defined features, therefore, hyp has no
142 	 * sanitized copy of the feature id register.
143 	 */
144 	BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
145 	return 0;
146 }
147 
get_pvm_id_aa64afr1(const struct kvm_vcpu * vcpu)148 static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
149 {
150 	/*
151 	 * No support for implementation defined features, therefore, hyp has no
152 	 * sanitized copy of the feature id register.
153 	 */
154 	BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
155 	return 0;
156 }
157 
get_pvm_id_aa64isar0(const struct kvm_vcpu * vcpu)158 static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
159 {
160 	return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
161 }
162 
get_pvm_id_aa64isar1(const struct kvm_vcpu * vcpu)163 static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
164 {
165 	u64 value = get_restricted_features_unsigned(id_aa64isar1_el1_sys_val,
166 						     PVM_ID_AA64ISAR1_ALLOW);
167 
168 	if (!vcpu_has_ptrauth(vcpu))
169 		value &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
170 			   ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
171 			   ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
172 			   ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
173 
174 	return value;
175 }
176 
get_pvm_id_aa64isar2(const struct kvm_vcpu * vcpu)177 static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
178 {
179 	u64 value = get_restricted_features_unsigned(id_aa64isar2_el1_sys_val,
180 						     PVM_ID_AA64ISAR2_ALLOW);
181 
182 	if (!vcpu_has_ptrauth(vcpu))
183 		value &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
184 			   ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
185 
186 	return id_aa64isar2_el1_sys_val & value;
187 }
188 
get_pvm_id_aa64mmfr0(const struct kvm_vcpu * vcpu)189 static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
190 {
191 	return get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
192 						PVM_ID_AA64MMFR0_ALLOW);
193 }
194 
get_pvm_id_aa64mmfr1(const struct kvm_vcpu * vcpu)195 static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
196 {
197 	return get_restricted_features_unsigned(id_aa64mmfr1_el1_sys_val,
198 						PVM_ID_AA64MMFR1_ALLOW);
199 }
200 
get_pvm_id_aa64mmfr2(const struct kvm_vcpu * vcpu)201 static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
202 {
203 	return get_restricted_features_unsigned(id_aa64mmfr2_el1_sys_val,
204 						PVM_ID_AA64MMFR2_ALLOW);
205 }
206 
207 /* Read a sanitized cpufeature ID register by its encoding */
pvm_read_id_reg(const struct kvm_vcpu * vcpu,u32 id)208 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
209 {
210 	switch (id) {
211 	case SYS_ID_AA64PFR0_EL1:
212 		return get_pvm_id_aa64pfr0(vcpu);
213 	case SYS_ID_AA64PFR1_EL1:
214 		return get_pvm_id_aa64pfr1(vcpu);
215 	case SYS_ID_AA64ZFR0_EL1:
216 		return get_pvm_id_aa64zfr0(vcpu);
217 	case SYS_ID_AA64DFR0_EL1:
218 		return get_pvm_id_aa64dfr0(vcpu);
219 	case SYS_ID_AA64DFR1_EL1:
220 		return get_pvm_id_aa64dfr1(vcpu);
221 	case SYS_ID_AA64AFR0_EL1:
222 		return get_pvm_id_aa64afr0(vcpu);
223 	case SYS_ID_AA64AFR1_EL1:
224 		return get_pvm_id_aa64afr1(vcpu);
225 	case SYS_ID_AA64ISAR0_EL1:
226 		return get_pvm_id_aa64isar0(vcpu);
227 	case SYS_ID_AA64ISAR1_EL1:
228 		return get_pvm_id_aa64isar1(vcpu);
229 	case SYS_ID_AA64ISAR2_EL1:
230 		return get_pvm_id_aa64isar2(vcpu);
231 	case SYS_ID_AA64MMFR0_EL1:
232 		return get_pvm_id_aa64mmfr0(vcpu);
233 	case SYS_ID_AA64MMFR1_EL1:
234 		return get_pvm_id_aa64mmfr1(vcpu);
235 	case SYS_ID_AA64MMFR2_EL1:
236 		return get_pvm_id_aa64mmfr2(vcpu);
237 	default:
238 		/* Unhandled ID register, RAZ */
239 		return 0;
240 	}
241 }
242 
read_id_reg(const struct kvm_vcpu * vcpu,struct sys_reg_desc const * r)243 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
244 		       struct sys_reg_desc const *r)
245 {
246 	return pvm_read_id_reg(vcpu, reg_to_encoding(r));
247 }
248 
249 /* Handler to RAZ/WI sysregs */
pvm_access_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)250 static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
251 			      const struct sys_reg_desc *r)
252 {
253 	if (!p->is_write)
254 		p->regval = 0;
255 
256 	return true;
257 }
258 
259 /*
260  * Accessor for AArch32 feature id registers.
261  *
262  * The value of these registers is "unknown" according to the spec if AArch32
263  * isn't supported.
264  */
pvm_access_id_aarch32(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)265 static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
266 				  struct sys_reg_params *p,
267 				  const struct sys_reg_desc *r)
268 {
269 	if (p->is_write) {
270 		inject_undef64(vcpu);
271 		return false;
272 	}
273 
274 	/*
275 	 * No support for AArch32 guests, therefore, pKVM has no sanitized copy
276 	 * of AArch32 feature id registers.
277 	 */
278 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
279 		     PVM_ID_AA64PFR0_ALLOW) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
280 
281 	return pvm_access_raz_wi(vcpu, p, r);
282 }
283 
284 /*
285  * Accessor for AArch64 feature id registers.
286  *
287  * If access is allowed, set the regval to the protected VM's view of the
288  * register and return true.
289  * Otherwise, inject an undefined exception and return false.
290  */
pvm_access_id_aarch64(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)291 static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
292 				  struct sys_reg_params *p,
293 				  const struct sys_reg_desc *r)
294 {
295 	if (p->is_write) {
296 		inject_undef64(vcpu);
297 		return false;
298 	}
299 
300 	p->regval = read_id_reg(vcpu, r);
301 	return true;
302 }
303 
pvm_access_unallocated(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)304 static bool pvm_access_unallocated(struct kvm_vcpu *vcpu,
305 				   struct sys_reg_params *p,
306 				   const struct sys_reg_desc *r)
307 {
308 	if (p->is_write) {
309 		inject_undef64(vcpu);
310 		return false;
311 	}
312 
313 	p->regval = 0;
314 	return true;
315 }
316 
pvm_gic_read_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)317 static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
318 			     struct sys_reg_params *p,
319 			     const struct sys_reg_desc *r)
320 {
321 	/* pVMs only support GICv3. 'nuf said. */
322 	if (!p->is_write)
323 		p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
324 
325 	return true;
326 }
327 
328 /* Mark the specified system register as an AArch32 feature id register. */
329 #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
330 
331 /* Mark the specified system register as an AArch64 feature id register. */
332 #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
333 
334 /*
335  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
336  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
337  * (1 <= crm < 8, 0 <= Op2 < 8).
338  */
339 #define ID_UNALLOCATED(crm, op2) {			\
340 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
341 	.access = pvm_access_unallocated,		\
342 }
343 
344 /*
345  * sys_reg_desc initialiser for known ID registers that we hide from guests.
346  * For now, these are handled just like unallocated ID regs.
347  */
348 #define ID_HIDDEN(REG) {			\
349 	SYS_DESC(REG),				\
350 	.access = pvm_access_unallocated,	\
351 }
352 
353 /* Mark the specified system register as Read-As-Zero/Write-Ignored */
354 #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
355 
356 /* Mark the specified system register as not being handled in hyp. */
357 #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
358 
359 /*
360  * Architected system registers.
361  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
362  *
363  * NOTE: Anything not explicitly listed here is *restricted by default*, i.e.,
364  * it will lead to injecting an exception into the guest.
365  */
366 static const struct sys_reg_desc pvm_sys_reg_descs[] = {
367 	/* Cache maintenance by set/way operations are restricted. */
368 
369 	/* Debug and Trace Registers are restricted. */
370 	RAZ_WI(SYS_DBGBVRn_EL1(0)),
371 	RAZ_WI(SYS_DBGBCRn_EL1(0)),
372 	RAZ_WI(SYS_DBGWVRn_EL1(0)),
373 	RAZ_WI(SYS_DBGWCRn_EL1(0)),
374 	RAZ_WI(SYS_MDSCR_EL1),
375 	RAZ_WI(SYS_OSLAR_EL1),
376 	RAZ_WI(SYS_OSLSR_EL1),
377 	RAZ_WI(SYS_OSDLR_EL1),
378 
379 	/* Group 1 ID registers */
380 	RAZ_WI(SYS_REVIDR_EL1),
381 
382 	/* AArch64 mappings of the AArch32 ID registers */
383 	/* CRm=1 */
384 	AARCH32(SYS_ID_PFR0_EL1),
385 	AARCH32(SYS_ID_PFR1_EL1),
386 	AARCH32(SYS_ID_DFR0_EL1),
387 	AARCH32(SYS_ID_AFR0_EL1),
388 	AARCH32(SYS_ID_MMFR0_EL1),
389 	AARCH32(SYS_ID_MMFR1_EL1),
390 	AARCH32(SYS_ID_MMFR2_EL1),
391 	AARCH32(SYS_ID_MMFR3_EL1),
392 
393 	/* CRm=2 */
394 	AARCH32(SYS_ID_ISAR0_EL1),
395 	AARCH32(SYS_ID_ISAR1_EL1),
396 	AARCH32(SYS_ID_ISAR2_EL1),
397 	AARCH32(SYS_ID_ISAR3_EL1),
398 	AARCH32(SYS_ID_ISAR4_EL1),
399 	AARCH32(SYS_ID_ISAR5_EL1),
400 	AARCH32(SYS_ID_MMFR4_EL1),
401 	AARCH32(SYS_ID_ISAR6_EL1),
402 
403 	/* CRm=3 */
404 	AARCH32(SYS_MVFR0_EL1),
405 	AARCH32(SYS_MVFR1_EL1),
406 	AARCH32(SYS_MVFR2_EL1),
407 	ID_UNALLOCATED(3,3),
408 	AARCH32(SYS_ID_PFR2_EL1),
409 	AARCH32(SYS_ID_DFR1_EL1),
410 	AARCH32(SYS_ID_MMFR5_EL1),
411 	ID_UNALLOCATED(3,7),
412 
413 	/* AArch64 ID registers */
414 	/* CRm=4 */
415 	AARCH64(SYS_ID_AA64PFR0_EL1),
416 	AARCH64(SYS_ID_AA64PFR1_EL1),
417 	ID_UNALLOCATED(4,2),
418 	ID_UNALLOCATED(4,3),
419 	AARCH64(SYS_ID_AA64ZFR0_EL1),
420 	ID_HIDDEN(SYS_ID_AA64SMFR0_EL1),
421 	ID_UNALLOCATED(4,6),
422 	ID_UNALLOCATED(4,7),
423 	AARCH64(SYS_ID_AA64DFR0_EL1),
424 	AARCH64(SYS_ID_AA64DFR1_EL1),
425 	ID_UNALLOCATED(5,2),
426 	ID_UNALLOCATED(5,3),
427 	ID_HIDDEN(SYS_ID_AA64AFR0_EL1),
428 	ID_HIDDEN(SYS_ID_AA64AFR1_EL1),
429 	ID_UNALLOCATED(5,6),
430 	ID_UNALLOCATED(5,7),
431 	AARCH64(SYS_ID_AA64ISAR0_EL1),
432 	AARCH64(SYS_ID_AA64ISAR1_EL1),
433 	AARCH64(SYS_ID_AA64ISAR2_EL1),
434 	ID_UNALLOCATED(6,3),
435 	ID_UNALLOCATED(6,4),
436 	ID_UNALLOCATED(6,5),
437 	ID_UNALLOCATED(6,6),
438 	ID_UNALLOCATED(6,7),
439 	AARCH64(SYS_ID_AA64MMFR0_EL1),
440 	AARCH64(SYS_ID_AA64MMFR1_EL1),
441 	AARCH64(SYS_ID_AA64MMFR2_EL1),
442 	ID_UNALLOCATED(7,3),
443 	ID_UNALLOCATED(7,4),
444 	ID_UNALLOCATED(7,5),
445 	ID_UNALLOCATED(7,6),
446 	ID_UNALLOCATED(7,7),
447 
448 	RAZ_WI(SYS_ERRIDR_EL1),
449 	RAZ_WI(SYS_ERRSELR_EL1),
450 	RAZ_WI(SYS_ERXFR_EL1),
451 	RAZ_WI(SYS_ERXCTLR_EL1),
452 	RAZ_WI(SYS_ERXSTATUS_EL1),
453 	RAZ_WI(SYS_ERXADDR_EL1),
454 	RAZ_WI(SYS_ERXMISC0_EL1),
455 	RAZ_WI(SYS_ERXMISC1_EL1),
456 
457 	/* Performance Monitoring Registers are restricted. */
458 
459 	/* Limited Ordering Regions Registers are restricted. */
460 
461 	HOST_HANDLED(SYS_ICC_SGI1R_EL1),
462 	HOST_HANDLED(SYS_ICC_ASGI1R_EL1),
463 	HOST_HANDLED(SYS_ICC_SGI0R_EL1),
464 	{ SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, },
465 
466 	HOST_HANDLED(SYS_CCSIDR_EL1),
467 	HOST_HANDLED(SYS_CLIDR_EL1),
468 	HOST_HANDLED(SYS_CSSELR_EL1),
469 	HOST_HANDLED(SYS_CTR_EL0),
470 
471 	/* Performance Monitoring Registers are restricted. */
472 
473 	/* Activity Monitoring Registers are restricted. */
474 
475 	HOST_HANDLED(SYS_CNTP_TVAL_EL0),
476 	HOST_HANDLED(SYS_CNTP_CTL_EL0),
477 	HOST_HANDLED(SYS_CNTP_CVAL_EL0),
478 
479 	/* Performance Monitoring Registers are restricted. */
480 };
481 
482 /* A structure to track reset values for system registers in protected vcpus. */
483 struct sys_reg_desc_reset {
484 	/* Index into sys_reg[]. */
485 	int reg;
486 
487 	/* Reset function. */
488 	void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc_reset *);
489 
490 	/* Reset value. */
491 	u64 value;
492 };
493 
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)494 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
495 {
496 	__vcpu_sys_reg(vcpu, r->reg) = read_sysreg(actlr_el1);
497 }
498 
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)499 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
500 {
501 	__vcpu_sys_reg(vcpu, r->reg) = read_sysreg(amair_el1);
502 }
503 
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)504 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
505 {
506 	__vcpu_sys_reg(vcpu, r->reg) = calculate_mpidr(vcpu);
507 }
508 
reset_value(struct kvm_vcpu * vcpu,const struct sys_reg_desc_reset * r)509 static void reset_value(struct kvm_vcpu *vcpu, const struct sys_reg_desc_reset *r)
510 {
511 	__vcpu_sys_reg(vcpu, r->reg) = r->value;
512 }
513 
514 /* Specify the register's reset value. */
515 #define RESET_VAL(REG, RESET_VAL) {  REG, reset_value, RESET_VAL }
516 
517 /* Specify a function that calculates the register's reset value. */
518 #define RESET_FUNC(REG, RESET_FUNC) {  REG, RESET_FUNC, 0 }
519 
520 /*
521  * Architected system registers reset values for Protected VMs.
522  * Important: Must be sorted ascending by REG (index into sys_reg[])
523  */
524 static const struct sys_reg_desc_reset pvm_sys_reg_reset_vals[] = {
525 	RESET_FUNC(MPIDR_EL1, reset_mpidr),
526 	RESET_VAL(SCTLR_EL1, 0x00C50078),
527 	RESET_FUNC(ACTLR_EL1, reset_actlr),
528 	RESET_VAL(CPACR_EL1, 0),
529 	RESET_VAL(ZCR_EL1, 0),
530 	RESET_VAL(TCR_EL1, 0),
531 	RESET_VAL(VBAR_EL1, 0),
532 	RESET_VAL(CONTEXTIDR_EL1, 0),
533 	RESET_FUNC(AMAIR_EL1, reset_amair_el1),
534 	RESET_VAL(CNTKCTL_EL1, 0),
535 	RESET_VAL(MDSCR_EL1, 0),
536 	RESET_VAL(MDCCINT_EL1, 0),
537 	RESET_VAL(DISR_EL1, 0),
538 	RESET_VAL(PMCCFILTR_EL0, 0),
539 	RESET_VAL(PMUSERENR_EL0, 0),
540 };
541 
542 /*
543  * Sets system registers to reset value
544  *
545  * This function finds the right entry and sets the registers on the protected
546  * vcpu to their architecturally defined reset values.
547  */
kvm_reset_pvm_sys_regs(struct kvm_vcpu * vcpu)548 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu)
549 {
550 	unsigned long i;
551 
552 	for (i = 0; i < ARRAY_SIZE(pvm_sys_reg_reset_vals); i++) {
553 		const struct sys_reg_desc_reset *r = &pvm_sys_reg_reset_vals[i];
554 
555 		r->reset(vcpu, r);
556 	}
557 }
558 
559 /*
560  * Checks that the sysreg tables are unique and in-order.
561  *
562  * Returns 0 if the table is consistent, or 1 otherwise.
563  */
kvm_check_pvm_sysreg_table(void)564 int kvm_check_pvm_sysreg_table(void)
565 {
566 	unsigned int i;
567 
568 	for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) {
569 		if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0)
570 			return 1;
571 	}
572 
573 	for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_reset_vals); i++) {
574 		if (pvm_sys_reg_reset_vals[i-1].reg >= pvm_sys_reg_reset_vals[i].reg)
575 			return 1;
576 	}
577 
578 	return 0;
579 }
580 
581 /*
582  * Handler for protected VM MSR, MRS or System instruction execution.
583  *
584  * Returns true if the hypervisor has handled the exit, and control should go
585  * back to the guest, or false if it hasn't, to be handled by the host.
586  */
kvm_handle_pvm_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)587 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
588 {
589 	const struct sys_reg_desc *r;
590 	struct sys_reg_params params;
591 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
592 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
593 
594 	params = esr_sys64_to_params(esr);
595 	params.regval = vcpu_get_reg(vcpu, Rt);
596 
597 	r = find_reg(&params, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs));
598 
599 	/* Undefined (RESTRICTED). */
600 	if (r == NULL) {
601 		inject_undef64(vcpu);
602 		return true;
603 	}
604 
605 	/* Handled by the host (HOST_HANDLED) */
606 	if (r->access == NULL)
607 		return false;
608 
609 	/* Handled by hyp: skip instruction if instructed to do so. */
610 	if (r->access(vcpu, &params, r))
611 		__kvm_skip_instr(vcpu);
612 
613 	if (!params.is_write)
614 		vcpu_set_reg(vcpu, Rt, params.regval);
615 
616 	return true;
617 }
618 
619 /*
620  * Handler for protected VM restricted exceptions.
621  *
622  * Inject an undefined exception into the guest and return true to indicate that
623  * the hypervisor has handled the exit, and control should go back to the guest.
624  */
kvm_handle_pvm_restricted(struct kvm_vcpu * vcpu,u64 * exit_code)625 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
626 {
627 	inject_undef64(vcpu);
628 	return true;
629 }
630