• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/guest.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
20 #include <linux/fs.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
25 #include <asm/kvm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/kvm_host.h>
29 #include <asm/sigcontext.h>
30 
31 #include "trace.h"
32 
33 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
34 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
35 
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 	VCPU_STAT(hvc_exit_stat),
38 	VCPU_STAT(wfe_exit_stat),
39 	VCPU_STAT(wfi_exit_stat),
40 	VCPU_STAT(mmio_exit_user),
41 	VCPU_STAT(mmio_exit_kernel),
42 	VCPU_STAT(exits),
43 	{ NULL }
44 };
45 
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)46 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
47 {
48 	return 0;
49 }
50 
core_reg_offset_is_vreg(u64 off)51 static bool core_reg_offset_is_vreg(u64 off)
52 {
53 	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
54 		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
55 }
56 
core_reg_offset_from_id(u64 id)57 static u64 core_reg_offset_from_id(u64 id)
58 {
59 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
60 }
61 
core_reg_size_from_offset(const struct kvm_vcpu * vcpu,u64 off)62 static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
63 {
64 	int size;
65 
66 	switch (off) {
67 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
68 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
69 	case KVM_REG_ARM_CORE_REG(regs.sp):
70 	case KVM_REG_ARM_CORE_REG(regs.pc):
71 	case KVM_REG_ARM_CORE_REG(regs.pstate):
72 	case KVM_REG_ARM_CORE_REG(sp_el1):
73 	case KVM_REG_ARM_CORE_REG(elr_el1):
74 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
75 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
76 		size = sizeof(__u64);
77 		break;
78 
79 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
80 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
81 		size = sizeof(__uint128_t);
82 		break;
83 
84 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
85 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
86 		size = sizeof(__u32);
87 		break;
88 
89 	default:
90 		return -EINVAL;
91 	}
92 
93 	if (!IS_ALIGNED(off, size / sizeof(__u32)))
94 		return -EINVAL;
95 
96 	/*
97 	 * The KVM_REG_ARM64_SVE regs must be used instead of
98 	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
99 	 * SVE-enabled vcpus:
100 	 */
101 	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
102 		return -EINVAL;
103 
104 	return size;
105 }
106 
validate_core_offset(const struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)107 static int validate_core_offset(const struct kvm_vcpu *vcpu,
108 				const struct kvm_one_reg *reg)
109 {
110 	u64 off = core_reg_offset_from_id(reg->id);
111 	int size = core_reg_size_from_offset(vcpu, off);
112 
113 	if (size < 0)
114 		return -EINVAL;
115 
116 	if (KVM_REG_SIZE(reg->id) != size)
117 		return -EINVAL;
118 
119 	return 0;
120 }
121 
get_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)122 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
123 {
124 	/*
125 	 * Because the kvm_regs structure is a mix of 32, 64 and
126 	 * 128bit fields, we index it as if it was a 32bit
127 	 * array. Hence below, nr_regs is the number of entries, and
128 	 * off the index in the "array".
129 	 */
130 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
131 	struct kvm_regs *regs = vcpu_gp_regs(vcpu);
132 	int nr_regs = sizeof(*regs) / sizeof(__u32);
133 	u32 off;
134 
135 	/* Our ID is an index into the kvm_regs struct. */
136 	off = core_reg_offset_from_id(reg->id);
137 	if (off >= nr_regs ||
138 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
139 		return -ENOENT;
140 
141 	if (validate_core_offset(vcpu, reg))
142 		return -EINVAL;
143 
144 	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
145 		return -EFAULT;
146 
147 	return 0;
148 }
149 
set_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)150 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
151 {
152 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
153 	struct kvm_regs *regs = vcpu_gp_regs(vcpu);
154 	int nr_regs = sizeof(*regs) / sizeof(__u32);
155 	__uint128_t tmp;
156 	void *valp = &tmp;
157 	u64 off;
158 	int err = 0;
159 
160 	/* Our ID is an index into the kvm_regs struct. */
161 	off = core_reg_offset_from_id(reg->id);
162 	if (off >= nr_regs ||
163 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
164 		return -ENOENT;
165 
166 	if (validate_core_offset(vcpu, reg))
167 		return -EINVAL;
168 
169 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
170 		return -EINVAL;
171 
172 	if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
173 		err = -EFAULT;
174 		goto out;
175 	}
176 
177 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
178 		u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
179 		switch (mode) {
180 		case PSR_AA32_MODE_USR:
181 			if (!system_supports_32bit_el0())
182 				return -EINVAL;
183 			break;
184 		case PSR_AA32_MODE_FIQ:
185 		case PSR_AA32_MODE_IRQ:
186 		case PSR_AA32_MODE_SVC:
187 		case PSR_AA32_MODE_ABT:
188 		case PSR_AA32_MODE_UND:
189 			if (!vcpu_el1_is_32bit(vcpu))
190 				return -EINVAL;
191 			break;
192 		case PSR_MODE_EL0t:
193 		case PSR_MODE_EL1t:
194 		case PSR_MODE_EL1h:
195 			if (vcpu_el1_is_32bit(vcpu))
196 				return -EINVAL;
197 			break;
198 		default:
199 			err = -EINVAL;
200 			goto out;
201 		}
202 	}
203 
204 	memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
205 
206 	if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
207 		int i;
208 
209 		for (i = 0; i < 16; i++)
210 			*vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
211 	}
212 out:
213 	return err;
214 }
215 
216 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
217 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
218 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
219 
get_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)220 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
221 {
222 	unsigned int max_vq, vq;
223 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
224 
225 	if (!vcpu_has_sve(vcpu))
226 		return -ENOENT;
227 
228 	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
229 		return -EINVAL;
230 
231 	memset(vqs, 0, sizeof(vqs));
232 
233 	max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
234 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
235 		if (sve_vq_available(vq))
236 			vqs[vq_word(vq)] |= vq_mask(vq);
237 
238 	if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
239 		return -EFAULT;
240 
241 	return 0;
242 }
243 
set_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)244 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
245 {
246 	unsigned int max_vq, vq;
247 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
248 
249 	if (!vcpu_has_sve(vcpu))
250 		return -ENOENT;
251 
252 	if (kvm_arm_vcpu_sve_finalized(vcpu))
253 		return -EPERM; /* too late! */
254 
255 	if (WARN_ON(vcpu->arch.sve_state))
256 		return -EINVAL;
257 
258 	if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
259 		return -EFAULT;
260 
261 	max_vq = 0;
262 	for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
263 		if (vq_present(vqs, vq))
264 			max_vq = vq;
265 
266 	if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
267 		return -EINVAL;
268 
269 	/*
270 	 * Vector lengths supported by the host can't currently be
271 	 * hidden from the guest individually: instead we can only set a
272 	 * maxmium via ZCR_EL2.LEN.  So, make sure the available vector
273 	 * lengths match the set requested exactly up to the requested
274 	 * maximum:
275 	 */
276 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
277 		if (vq_present(vqs, vq) != sve_vq_available(vq))
278 			return -EINVAL;
279 
280 	/* Can't run with no vector lengths at all: */
281 	if (max_vq < SVE_VQ_MIN)
282 		return -EINVAL;
283 
284 	/* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
285 	vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
286 
287 	return 0;
288 }
289 
290 #define SVE_REG_SLICE_SHIFT	0
291 #define SVE_REG_SLICE_BITS	5
292 #define SVE_REG_ID_SHIFT	(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
293 #define SVE_REG_ID_BITS		5
294 
295 #define SVE_REG_SLICE_MASK					\
296 	GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1,	\
297 		SVE_REG_SLICE_SHIFT)
298 #define SVE_REG_ID_MASK							\
299 	GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
300 
301 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
302 
303 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
304 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
305 
306 /*
307  * Number of register slices required to cover each whole SVE register.
308  * NOTE: Only the first slice every exists, for now.
309  * If you are tempted to modify this, you must also rework sve_reg_to_region()
310  * to match:
311  */
312 #define vcpu_sve_slices(vcpu) 1
313 
314 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
315 struct sve_state_reg_region {
316 	unsigned int koffset;	/* offset into sve_state in kernel memory */
317 	unsigned int klen;	/* length in kernel memory */
318 	unsigned int upad;	/* extra trailing padding in user memory */
319 };
320 
321 /*
322  * Validate SVE register ID and get sanitised bounds for user/kernel SVE
323  * register copy
324  */
sve_reg_to_region(struct sve_state_reg_region * region,struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)325 static int sve_reg_to_region(struct sve_state_reg_region *region,
326 			     struct kvm_vcpu *vcpu,
327 			     const struct kvm_one_reg *reg)
328 {
329 	/* reg ID ranges for Z- registers */
330 	const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
331 	const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
332 						       SVE_NUM_SLICES - 1);
333 
334 	/* reg ID ranges for P- registers and FFR (which are contiguous) */
335 	const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
336 	const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
337 
338 	unsigned int vq;
339 	unsigned int reg_num;
340 
341 	unsigned int reqoffset, reqlen; /* User-requested offset and length */
342 	unsigned int maxlen; /* Maxmimum permitted length */
343 
344 	size_t sve_state_size;
345 
346 	const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
347 							SVE_NUM_SLICES - 1);
348 
349 	/* Verify that the P-regs and FFR really do have contiguous IDs: */
350 	BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
351 
352 	/* Verify that we match the UAPI header: */
353 	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
354 
355 	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
356 
357 	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
358 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
359 			return -ENOENT;
360 
361 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
362 
363 		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
364 				SVE_SIG_REGS_OFFSET;
365 		reqlen = KVM_SVE_ZREG_SIZE;
366 		maxlen = SVE_SIG_ZREG_SIZE(vq);
367 	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
368 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
369 			return -ENOENT;
370 
371 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
372 
373 		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
374 				SVE_SIG_REGS_OFFSET;
375 		reqlen = KVM_SVE_PREG_SIZE;
376 		maxlen = SVE_SIG_PREG_SIZE(vq);
377 	} else {
378 		return -EINVAL;
379 	}
380 
381 	sve_state_size = vcpu_sve_state_size(vcpu);
382 	if (WARN_ON(!sve_state_size))
383 		return -EINVAL;
384 
385 	region->koffset = array_index_nospec(reqoffset, sve_state_size);
386 	region->klen = min(maxlen, reqlen);
387 	region->upad = reqlen - region->klen;
388 
389 	return 0;
390 }
391 
get_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)392 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
393 {
394 	int ret;
395 	struct sve_state_reg_region region;
396 	char __user *uptr = (char __user *)reg->addr;
397 
398 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
399 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
400 		return get_sve_vls(vcpu, reg);
401 
402 	/* Try to interpret reg ID as an architectural SVE register... */
403 	ret = sve_reg_to_region(&region, vcpu, reg);
404 	if (ret)
405 		return ret;
406 
407 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
408 		return -EPERM;
409 
410 	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
411 			 region.klen) ||
412 	    clear_user(uptr + region.klen, region.upad))
413 		return -EFAULT;
414 
415 	return 0;
416 }
417 
set_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)418 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
419 {
420 	int ret;
421 	struct sve_state_reg_region region;
422 	const char __user *uptr = (const char __user *)reg->addr;
423 
424 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
425 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
426 		return set_sve_vls(vcpu, reg);
427 
428 	/* Try to interpret reg ID as an architectural SVE register... */
429 	ret = sve_reg_to_region(&region, vcpu, reg);
430 	if (ret)
431 		return ret;
432 
433 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
434 		return -EPERM;
435 
436 	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
437 			   region.klen))
438 		return -EFAULT;
439 
440 	return 0;
441 }
442 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)443 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
444 {
445 	return -EINVAL;
446 }
447 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)448 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
449 {
450 	return -EINVAL;
451 }
452 
copy_core_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)453 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
454 				 u64 __user *uindices)
455 {
456 	unsigned int i;
457 	int n = 0;
458 
459 	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
460 		u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
461 		int size = core_reg_size_from_offset(vcpu, i);
462 
463 		if (size < 0)
464 			continue;
465 
466 		switch (size) {
467 		case sizeof(__u32):
468 			reg |= KVM_REG_SIZE_U32;
469 			break;
470 
471 		case sizeof(__u64):
472 			reg |= KVM_REG_SIZE_U64;
473 			break;
474 
475 		case sizeof(__uint128_t):
476 			reg |= KVM_REG_SIZE_U128;
477 			break;
478 
479 		default:
480 			WARN_ON(1);
481 			continue;
482 		}
483 
484 		if (uindices) {
485 			if (put_user(reg, uindices))
486 				return -EFAULT;
487 			uindices++;
488 		}
489 
490 		n++;
491 	}
492 
493 	return n;
494 }
495 
num_core_regs(const struct kvm_vcpu * vcpu)496 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
497 {
498 	return copy_core_reg_indices(vcpu, NULL);
499 }
500 
501 /**
502  * ARM64 versions of the TIMER registers, always available on arm64
503  */
504 
505 #define NUM_TIMER_REGS 3
506 
is_timer_reg(u64 index)507 static bool is_timer_reg(u64 index)
508 {
509 	switch (index) {
510 	case KVM_REG_ARM_TIMER_CTL:
511 	case KVM_REG_ARM_TIMER_CNT:
512 	case KVM_REG_ARM_TIMER_CVAL:
513 		return true;
514 	}
515 	return false;
516 }
517 
copy_timer_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)518 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
519 {
520 	if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
521 		return -EFAULT;
522 	uindices++;
523 	if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
524 		return -EFAULT;
525 	uindices++;
526 	if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
527 		return -EFAULT;
528 
529 	return 0;
530 }
531 
set_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)532 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
533 {
534 	void __user *uaddr = (void __user *)(long)reg->addr;
535 	u64 val;
536 	int ret;
537 
538 	ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
539 	if (ret != 0)
540 		return -EFAULT;
541 
542 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
543 }
544 
get_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)545 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
546 {
547 	void __user *uaddr = (void __user *)(long)reg->addr;
548 	u64 val;
549 
550 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
551 	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
552 }
553 
num_sve_regs(const struct kvm_vcpu * vcpu)554 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
555 {
556 	const unsigned int slices = vcpu_sve_slices(vcpu);
557 
558 	if (!vcpu_has_sve(vcpu))
559 		return 0;
560 
561 	/* Policed by KVM_GET_REG_LIST: */
562 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
563 
564 	return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
565 		+ 1; /* KVM_REG_ARM64_SVE_VLS */
566 }
567 
copy_sve_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)568 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
569 				u64 __user *uindices)
570 {
571 	const unsigned int slices = vcpu_sve_slices(vcpu);
572 	u64 reg;
573 	unsigned int i, n;
574 	int num_regs = 0;
575 
576 	if (!vcpu_has_sve(vcpu))
577 		return 0;
578 
579 	/* Policed by KVM_GET_REG_LIST: */
580 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
581 
582 	/*
583 	 * Enumerate this first, so that userspace can save/restore in
584 	 * the order reported by KVM_GET_REG_LIST:
585 	 */
586 	reg = KVM_REG_ARM64_SVE_VLS;
587 	if (put_user(reg, uindices++))
588 		return -EFAULT;
589 	++num_regs;
590 
591 	for (i = 0; i < slices; i++) {
592 		for (n = 0; n < SVE_NUM_ZREGS; n++) {
593 			reg = KVM_REG_ARM64_SVE_ZREG(n, i);
594 			if (put_user(reg, uindices++))
595 				return -EFAULT;
596 			num_regs++;
597 		}
598 
599 		for (n = 0; n < SVE_NUM_PREGS; n++) {
600 			reg = KVM_REG_ARM64_SVE_PREG(n, i);
601 			if (put_user(reg, uindices++))
602 				return -EFAULT;
603 			num_regs++;
604 		}
605 
606 		reg = KVM_REG_ARM64_SVE_FFR(i);
607 		if (put_user(reg, uindices++))
608 			return -EFAULT;
609 		num_regs++;
610 	}
611 
612 	return num_regs;
613 }
614 
615 /**
616  * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
617  *
618  * This is for all registers.
619  */
kvm_arm_num_regs(struct kvm_vcpu * vcpu)620 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
621 {
622 	unsigned long res = 0;
623 
624 	res += num_core_regs(vcpu);
625 	res += num_sve_regs(vcpu);
626 	res += kvm_arm_num_sys_reg_descs(vcpu);
627 	res += kvm_arm_get_fw_num_regs(vcpu);
628 	res += NUM_TIMER_REGS;
629 
630 	return res;
631 }
632 
633 /**
634  * kvm_arm_copy_reg_indices - get indices of all registers.
635  *
636  * We do core registers right here, then we append system regs.
637  */
kvm_arm_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)638 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
639 {
640 	int ret;
641 
642 	ret = copy_core_reg_indices(vcpu, uindices);
643 	if (ret < 0)
644 		return ret;
645 	uindices += ret;
646 
647 	ret = copy_sve_reg_indices(vcpu, uindices);
648 	if (ret < 0)
649 		return ret;
650 	uindices += ret;
651 
652 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
653 	if (ret < 0)
654 		return ret;
655 	uindices += kvm_arm_get_fw_num_regs(vcpu);
656 
657 	ret = copy_timer_indices(vcpu, uindices);
658 	if (ret < 0)
659 		return ret;
660 	uindices += NUM_TIMER_REGS;
661 
662 	return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
663 }
664 
kvm_arm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)665 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
666 {
667 	/* We currently use nothing arch-specific in upper 32 bits */
668 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
669 		return -EINVAL;
670 
671 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
672 	case KVM_REG_ARM_CORE:	return get_core_reg(vcpu, reg);
673 	case KVM_REG_ARM_FW:	return kvm_arm_get_fw_reg(vcpu, reg);
674 	case KVM_REG_ARM64_SVE:	return get_sve_reg(vcpu, reg);
675 	}
676 
677 	if (is_timer_reg(reg->id))
678 		return get_timer_reg(vcpu, reg);
679 
680 	return kvm_arm_sys_reg_get_reg(vcpu, reg);
681 }
682 
kvm_arm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)683 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
684 {
685 	/* We currently use nothing arch-specific in upper 32 bits */
686 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
687 		return -EINVAL;
688 
689 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
690 	case KVM_REG_ARM_CORE:	return set_core_reg(vcpu, reg);
691 	case KVM_REG_ARM_FW:	return kvm_arm_set_fw_reg(vcpu, reg);
692 	case KVM_REG_ARM64_SVE:	return set_sve_reg(vcpu, reg);
693 	}
694 
695 	if (is_timer_reg(reg->id))
696 		return set_timer_reg(vcpu, reg);
697 
698 	return kvm_arm_sys_reg_set_reg(vcpu, reg);
699 }
700 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)701 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
702 				  struct kvm_sregs *sregs)
703 {
704 	return -EINVAL;
705 }
706 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)707 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
708 				  struct kvm_sregs *sregs)
709 {
710 	return -EINVAL;
711 }
712 
__kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)713 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
714 			      struct kvm_vcpu_events *events)
715 {
716 	events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
717 	events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
718 
719 	if (events->exception.serror_pending && events->exception.serror_has_esr)
720 		events->exception.serror_esr = vcpu_get_vsesr(vcpu);
721 
722 	return 0;
723 }
724 
__kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)725 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
726 			      struct kvm_vcpu_events *events)
727 {
728 	bool serror_pending = events->exception.serror_pending;
729 	bool has_esr = events->exception.serror_has_esr;
730 
731 	if (serror_pending && has_esr) {
732 		if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
733 			return -EINVAL;
734 
735 		if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
736 			kvm_set_sei_esr(vcpu, events->exception.serror_esr);
737 		else
738 			return -EINVAL;
739 	} else if (serror_pending) {
740 		kvm_inject_vabt(vcpu);
741 	}
742 
743 	return 0;
744 }
745 
kvm_target_cpu(void)746 int __attribute_const__ kvm_target_cpu(void)
747 {
748 	unsigned long implementor = read_cpuid_implementor();
749 	unsigned long part_number = read_cpuid_part_number();
750 
751 	switch (implementor) {
752 	case ARM_CPU_IMP_ARM:
753 		switch (part_number) {
754 		case ARM_CPU_PART_AEM_V8:
755 			return KVM_ARM_TARGET_AEM_V8;
756 		case ARM_CPU_PART_FOUNDATION:
757 			return KVM_ARM_TARGET_FOUNDATION_V8;
758 		case ARM_CPU_PART_CORTEX_A53:
759 			return KVM_ARM_TARGET_CORTEX_A53;
760 		case ARM_CPU_PART_CORTEX_A57:
761 			return KVM_ARM_TARGET_CORTEX_A57;
762 		}
763 		break;
764 	case ARM_CPU_IMP_APM:
765 		switch (part_number) {
766 		case APM_CPU_PART_POTENZA:
767 			return KVM_ARM_TARGET_XGENE_POTENZA;
768 		}
769 		break;
770 	}
771 
772 	/* Return a default generic target */
773 	return KVM_ARM_TARGET_GENERIC_V8;
774 }
775 
kvm_vcpu_preferred_target(struct kvm_vcpu_init * init)776 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
777 {
778 	int target = kvm_target_cpu();
779 
780 	if (target < 0)
781 		return -ENODEV;
782 
783 	memset(init, 0, sizeof(*init));
784 
785 	/*
786 	 * For now, we don't return any features.
787 	 * In future, we might use features to return target
788 	 * specific features available for the preferred
789 	 * target type.
790 	 */
791 	init->target = (__u32)target;
792 
793 	return 0;
794 }
795 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)796 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
797 {
798 	return -EINVAL;
799 }
800 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)801 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
802 {
803 	return -EINVAL;
804 }
805 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)806 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
807 				  struct kvm_translation *tr)
808 {
809 	return -EINVAL;
810 }
811 
812 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE |    \
813 			    KVM_GUESTDBG_USE_SW_BP | \
814 			    KVM_GUESTDBG_USE_HW | \
815 			    KVM_GUESTDBG_SINGLESTEP)
816 
817 /**
818  * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
819  * @kvm:	pointer to the KVM struct
820  * @kvm_guest_debug: the ioctl data buffer
821  *
822  * This sets up and enables the VM for guest debugging. Userspace
823  * passes in a control flag to enable different debug types and
824  * potentially other architecture specific information in the rest of
825  * the structure.
826  */
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)827 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
828 					struct kvm_guest_debug *dbg)
829 {
830 	int ret = 0;
831 
832 	trace_kvm_set_guest_debug(vcpu, dbg->control);
833 
834 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
835 		ret = -EINVAL;
836 		goto out;
837 	}
838 
839 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
840 		vcpu->guest_debug = dbg->control;
841 
842 		/* Hardware assisted Break and Watch points */
843 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
844 			vcpu->arch.external_debug_state = dbg->arch;
845 		}
846 
847 	} else {
848 		/* If not enabled clear all flags */
849 		vcpu->guest_debug = 0;
850 	}
851 
852 out:
853 	return ret;
854 }
855 
kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)856 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
857 			       struct kvm_device_attr *attr)
858 {
859 	int ret;
860 
861 	switch (attr->group) {
862 	case KVM_ARM_VCPU_PMU_V3_CTRL:
863 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
864 		break;
865 	case KVM_ARM_VCPU_TIMER_CTRL:
866 		ret = kvm_arm_timer_set_attr(vcpu, attr);
867 		break;
868 	default:
869 		ret = -ENXIO;
870 		break;
871 	}
872 
873 	return ret;
874 }
875 
kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)876 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
877 			       struct kvm_device_attr *attr)
878 {
879 	int ret;
880 
881 	switch (attr->group) {
882 	case KVM_ARM_VCPU_PMU_V3_CTRL:
883 		ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
884 		break;
885 	case KVM_ARM_VCPU_TIMER_CTRL:
886 		ret = kvm_arm_timer_get_attr(vcpu, attr);
887 		break;
888 	default:
889 		ret = -ENXIO;
890 		break;
891 	}
892 
893 	return ret;
894 }
895 
kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)896 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
897 			       struct kvm_device_attr *attr)
898 {
899 	int ret;
900 
901 	switch (attr->group) {
902 	case KVM_ARM_VCPU_PMU_V3_CTRL:
903 		ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
904 		break;
905 	case KVM_ARM_VCPU_TIMER_CTRL:
906 		ret = kvm_arm_timer_has_attr(vcpu, attr);
907 		break;
908 	default:
909 		ret = -ENXIO;
910 		break;
911 	}
912 
913 	return ret;
914 }
915