1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
20 #include <linux/fs.h>
21 #include <kvm/arm_hypercalls.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
25 #include <asm/kvm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_nested.h>
28 #include <asm/sigcontext.h>
29
30 #include "trace.h"
31
32 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
33 KVM_GENERIC_VM_STATS(),
34 STATS_DESC_ICOUNTER(VM, protected_hyp_mem),
35 STATS_DESC_ICOUNTER(VM, protected_shared_mem),
36 STATS_DESC_ICOUNTER(VM, protected_pgtable_mem),
37 };
38
39 const struct kvm_stats_header kvm_vm_stats_header = {
40 .name_size = KVM_STATS_NAME_SIZE,
41 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
42 .id_offset = sizeof(struct kvm_stats_header),
43 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
44 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
45 sizeof(kvm_vm_stats_desc),
46 };
47
48 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
49 KVM_GENERIC_VCPU_STATS(),
50 STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
51 STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
52 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
53 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
54 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
55 STATS_DESC_COUNTER(VCPU, signal_exits),
56 STATS_DESC_COUNTER(VCPU, exits)
57 };
58
59 const struct kvm_stats_header kvm_vcpu_stats_header = {
60 .name_size = KVM_STATS_NAME_SIZE,
61 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
62 .id_offset = sizeof(struct kvm_stats_header),
63 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
64 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
65 sizeof(kvm_vcpu_stats_desc),
66 };
67
core_reg_offset_is_vreg(u64 off)68 static bool core_reg_offset_is_vreg(u64 off)
69 {
70 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
71 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
72 }
73
core_reg_offset_from_id(u64 id)74 static u64 core_reg_offset_from_id(u64 id)
75 {
76 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
77 }
78
core_reg_size_from_offset(const struct kvm_vcpu * vcpu,u64 off)79 static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
80 {
81 int size;
82
83 switch (off) {
84 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
85 KVM_REG_ARM_CORE_REG(regs.regs[30]):
86 case KVM_REG_ARM_CORE_REG(regs.sp):
87 case KVM_REG_ARM_CORE_REG(regs.pc):
88 case KVM_REG_ARM_CORE_REG(regs.pstate):
89 case KVM_REG_ARM_CORE_REG(sp_el1):
90 case KVM_REG_ARM_CORE_REG(elr_el1):
91 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
92 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
93 size = sizeof(__u64);
94 break;
95
96 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
97 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
98 size = sizeof(__uint128_t);
99 break;
100
101 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
102 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
103 size = sizeof(__u32);
104 break;
105
106 default:
107 return -EINVAL;
108 }
109
110 if (!IS_ALIGNED(off, size / sizeof(__u32)))
111 return -EINVAL;
112
113 /*
114 * The KVM_REG_ARM64_SVE regs must be used instead of
115 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
116 * SVE-enabled vcpus:
117 */
118 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
119 return -EINVAL;
120
121 return size;
122 }
123
core_reg_addr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)124 static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
125 {
126 u64 off = core_reg_offset_from_id(reg->id);
127 int size = core_reg_size_from_offset(vcpu, off);
128
129 if (size < 0)
130 return NULL;
131
132 if (KVM_REG_SIZE(reg->id) != size)
133 return NULL;
134
135 switch (off) {
136 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
137 KVM_REG_ARM_CORE_REG(regs.regs[30]):
138 off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
139 off /= 2;
140 return &vcpu->arch.ctxt.regs.regs[off];
141
142 case KVM_REG_ARM_CORE_REG(regs.sp):
143 return &vcpu->arch.ctxt.regs.sp;
144
145 case KVM_REG_ARM_CORE_REG(regs.pc):
146 return &vcpu->arch.ctxt.regs.pc;
147
148 case KVM_REG_ARM_CORE_REG(regs.pstate):
149 return &vcpu->arch.ctxt.regs.pstate;
150
151 case KVM_REG_ARM_CORE_REG(sp_el1):
152 return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
153
154 case KVM_REG_ARM_CORE_REG(elr_el1):
155 return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
156
157 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
158 return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
159
160 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
161 return &vcpu->arch.ctxt.spsr_abt;
162
163 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
164 return &vcpu->arch.ctxt.spsr_und;
165
166 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
167 return &vcpu->arch.ctxt.spsr_irq;
168
169 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
170 return &vcpu->arch.ctxt.spsr_fiq;
171
172 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
173 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
174 off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
175 off /= 4;
176 return &vcpu->arch.ctxt.fp_regs.vregs[off];
177
178 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
179 return &vcpu->arch.ctxt.fp_regs.fpsr;
180
181 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
182 return &vcpu->arch.ctxt.fp_regs.fpcr;
183
184 default:
185 return NULL;
186 }
187 }
188
get_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)189 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
190 {
191 /*
192 * Because the kvm_regs structure is a mix of 32, 64 and
193 * 128bit fields, we index it as if it was a 32bit
194 * array. Hence below, nr_regs is the number of entries, and
195 * off the index in the "array".
196 */
197 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
198 int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
199 void *addr;
200 u32 off;
201
202 /* Our ID is an index into the kvm_regs struct. */
203 off = core_reg_offset_from_id(reg->id);
204 if (off >= nr_regs ||
205 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
206 return -ENOENT;
207
208 addr = core_reg_addr(vcpu, reg);
209 if (!addr)
210 return -EINVAL;
211
212 if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
213 return -EFAULT;
214
215 return 0;
216 }
217
set_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)218 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
219 {
220 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
221 int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
222 __uint128_t tmp;
223 void *valp = &tmp, *addr;
224 u64 off;
225 int err = 0;
226
227 /* Our ID is an index into the kvm_regs struct. */
228 off = core_reg_offset_from_id(reg->id);
229 if (off >= nr_regs ||
230 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
231 return -ENOENT;
232
233 addr = core_reg_addr(vcpu, reg);
234 if (!addr)
235 return -EINVAL;
236
237 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
238 return -EINVAL;
239
240 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
241 err = -EFAULT;
242 goto out;
243 }
244
245 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
246 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
247 switch (mode) {
248 case PSR_AA32_MODE_USR:
249 if (!kvm_supports_32bit_el0())
250 return -EINVAL;
251 break;
252 case PSR_AA32_MODE_FIQ:
253 case PSR_AA32_MODE_IRQ:
254 case PSR_AA32_MODE_SVC:
255 case PSR_AA32_MODE_ABT:
256 case PSR_AA32_MODE_UND:
257 case PSR_AA32_MODE_SYS:
258 if (!vcpu_el1_is_32bit(vcpu))
259 return -EINVAL;
260 break;
261 case PSR_MODE_EL2h:
262 case PSR_MODE_EL2t:
263 if (!vcpu_has_nv(vcpu))
264 return -EINVAL;
265 fallthrough;
266 case PSR_MODE_EL0t:
267 case PSR_MODE_EL1t:
268 case PSR_MODE_EL1h:
269 if (vcpu_el1_is_32bit(vcpu))
270 return -EINVAL;
271 break;
272 default:
273 err = -EINVAL;
274 goto out;
275 }
276 }
277
278 memcpy(addr, valp, KVM_REG_SIZE(reg->id));
279
280 if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
281 int i, nr_reg;
282
283 switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
284 /*
285 * Either we are dealing with user mode, and only the
286 * first 15 registers (+ PC) must be narrowed to 32bit.
287 * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
288 */
289 case PSR_AA32_MODE_USR:
290 case PSR_AA32_MODE_SYS:
291 nr_reg = 15;
292 break;
293
294 /*
295 * Otherwise, this is a privileged mode, and *all* the
296 * registers must be narrowed to 32bit.
297 */
298 default:
299 nr_reg = 31;
300 break;
301 }
302
303 for (i = 0; i < nr_reg; i++)
304 vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
305
306 *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
307 }
308 out:
309 return err;
310 }
311
312 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
313 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
314 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
315
get_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)316 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
317 {
318 unsigned int max_vq, vq;
319 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
320
321 if (!vcpu_has_sve(vcpu))
322 return -ENOENT;
323
324 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
325 return -EINVAL;
326
327 memset(vqs, 0, sizeof(vqs));
328
329 max_vq = vcpu_sve_max_vq(vcpu);
330 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
331 if (sve_vq_available(vq))
332 vqs[vq_word(vq)] |= vq_mask(vq);
333
334 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
335 return -EFAULT;
336
337 return 0;
338 }
339
set_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)340 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
341 {
342 unsigned int max_vq, vq;
343 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
344
345 if (!vcpu_has_sve(vcpu))
346 return -ENOENT;
347
348 if (kvm_arm_vcpu_sve_finalized(vcpu))
349 return -EPERM; /* too late! */
350
351 if (WARN_ON(vcpu->arch.sve_state))
352 return -EINVAL;
353
354 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
355 return -EFAULT;
356
357 max_vq = 0;
358 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
359 if (vq_present(vqs, vq))
360 max_vq = vq;
361
362 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
363 return -EINVAL;
364
365 /*
366 * Vector lengths supported by the host can't currently be
367 * hidden from the guest individually: instead we can only set a
368 * maximum via ZCR_EL2.LEN. So, make sure the available vector
369 * lengths match the set requested exactly up to the requested
370 * maximum:
371 */
372 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
373 if (vq_present(vqs, vq) != sve_vq_available(vq))
374 return -EINVAL;
375
376 /* Can't run with no vector lengths at all: */
377 if (max_vq < SVE_VQ_MIN)
378 return -EINVAL;
379
380 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
381 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
382
383 return 0;
384 }
385
386 #define SVE_REG_SLICE_SHIFT 0
387 #define SVE_REG_SLICE_BITS 5
388 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
389 #define SVE_REG_ID_BITS 5
390
391 #define SVE_REG_SLICE_MASK \
392 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
393 SVE_REG_SLICE_SHIFT)
394 #define SVE_REG_ID_MASK \
395 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
396
397 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
398
399 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
400 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
401
402 /*
403 * Number of register slices required to cover each whole SVE register.
404 * NOTE: Only the first slice every exists, for now.
405 * If you are tempted to modify this, you must also rework sve_reg_to_region()
406 * to match:
407 */
408 #define vcpu_sve_slices(vcpu) 1
409
410 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
411 struct sve_state_reg_region {
412 unsigned int koffset; /* offset into sve_state in kernel memory */
413 unsigned int klen; /* length in kernel memory */
414 unsigned int upad; /* extra trailing padding in user memory */
415 };
416
417 /*
418 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
419 * register copy
420 */
sve_reg_to_region(struct sve_state_reg_region * region,struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)421 static int sve_reg_to_region(struct sve_state_reg_region *region,
422 struct kvm_vcpu *vcpu,
423 const struct kvm_one_reg *reg)
424 {
425 /* reg ID ranges for Z- registers */
426 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
427 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
428 SVE_NUM_SLICES - 1);
429
430 /* reg ID ranges for P- registers and FFR (which are contiguous) */
431 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
432 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
433
434 unsigned int vq;
435 unsigned int reg_num;
436
437 unsigned int reqoffset, reqlen; /* User-requested offset and length */
438 unsigned int maxlen; /* Maximum permitted length */
439
440 size_t sve_state_size;
441
442 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
443 SVE_NUM_SLICES - 1);
444
445 /* Verify that the P-regs and FFR really do have contiguous IDs: */
446 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
447
448 /* Verify that we match the UAPI header: */
449 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
450
451 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
452
453 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
454 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
455 return -ENOENT;
456
457 vq = vcpu_sve_max_vq(vcpu);
458
459 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
460 SVE_SIG_REGS_OFFSET;
461 reqlen = KVM_SVE_ZREG_SIZE;
462 maxlen = SVE_SIG_ZREG_SIZE(vq);
463 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
464 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
465 return -ENOENT;
466
467 vq = vcpu_sve_max_vq(vcpu);
468
469 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
470 SVE_SIG_REGS_OFFSET;
471 reqlen = KVM_SVE_PREG_SIZE;
472 maxlen = SVE_SIG_PREG_SIZE(vq);
473 } else {
474 return -EINVAL;
475 }
476
477 sve_state_size = vcpu_sve_state_size(vcpu);
478 if (WARN_ON(!sve_state_size))
479 return -EINVAL;
480
481 region->koffset = array_index_nospec(reqoffset, sve_state_size);
482 region->klen = min(maxlen, reqlen);
483 region->upad = reqlen - region->klen;
484
485 return 0;
486 }
487
get_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)488 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
489 {
490 int ret;
491 struct sve_state_reg_region region;
492 char __user *uptr = (char __user *)reg->addr;
493
494 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
495 if (reg->id == KVM_REG_ARM64_SVE_VLS)
496 return get_sve_vls(vcpu, reg);
497
498 /* Try to interpret reg ID as an architectural SVE register... */
499 ret = sve_reg_to_region(®ion, vcpu, reg);
500 if (ret)
501 return ret;
502
503 if (!kvm_arm_vcpu_sve_finalized(vcpu))
504 return -EPERM;
505
506 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
507 region.klen) ||
508 clear_user(uptr + region.klen, region.upad))
509 return -EFAULT;
510
511 return 0;
512 }
513
set_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)514 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
515 {
516 int ret;
517 struct sve_state_reg_region region;
518 const char __user *uptr = (const char __user *)reg->addr;
519
520 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
521 if (reg->id == KVM_REG_ARM64_SVE_VLS)
522 return set_sve_vls(vcpu, reg);
523
524 /* Try to interpret reg ID as an architectural SVE register... */
525 ret = sve_reg_to_region(®ion, vcpu, reg);
526 if (ret)
527 return ret;
528
529 if (!kvm_arm_vcpu_sve_finalized(vcpu))
530 return -EPERM;
531
532 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
533 region.klen))
534 return -EFAULT;
535
536 return 0;
537 }
538
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)539 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
540 {
541 return -EINVAL;
542 }
543
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)544 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
545 {
546 return -EINVAL;
547 }
548
copy_core_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)549 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
550 u64 __user *uindices)
551 {
552 unsigned int i;
553 int n = 0;
554
555 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
556 u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
557 int size = core_reg_size_from_offset(vcpu, i);
558
559 if (size < 0)
560 continue;
561
562 switch (size) {
563 case sizeof(__u32):
564 reg |= KVM_REG_SIZE_U32;
565 break;
566
567 case sizeof(__u64):
568 reg |= KVM_REG_SIZE_U64;
569 break;
570
571 case sizeof(__uint128_t):
572 reg |= KVM_REG_SIZE_U128;
573 break;
574
575 default:
576 WARN_ON(1);
577 continue;
578 }
579
580 if (uindices) {
581 if (put_user(reg, uindices))
582 return -EFAULT;
583 uindices++;
584 }
585
586 n++;
587 }
588
589 return n;
590 }
591
num_core_regs(const struct kvm_vcpu * vcpu)592 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
593 {
594 return copy_core_reg_indices(vcpu, NULL);
595 }
596
597 static const u64 timer_reg_list[] = {
598 KVM_REG_ARM_TIMER_CTL,
599 KVM_REG_ARM_TIMER_CNT,
600 KVM_REG_ARM_TIMER_CVAL,
601 KVM_REG_ARM_PTIMER_CTL,
602 KVM_REG_ARM_PTIMER_CNT,
603 KVM_REG_ARM_PTIMER_CVAL,
604 };
605
606 #define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
607
is_timer_reg(u64 index)608 static bool is_timer_reg(u64 index)
609 {
610 switch (index) {
611 case KVM_REG_ARM_TIMER_CTL:
612 case KVM_REG_ARM_TIMER_CNT:
613 case KVM_REG_ARM_TIMER_CVAL:
614 case KVM_REG_ARM_PTIMER_CTL:
615 case KVM_REG_ARM_PTIMER_CNT:
616 case KVM_REG_ARM_PTIMER_CVAL:
617 return true;
618 }
619 return false;
620 }
621
copy_timer_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)622 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
623 {
624 for (int i = 0; i < NUM_TIMER_REGS; i++) {
625 if (put_user(timer_reg_list[i], uindices))
626 return -EFAULT;
627 uindices++;
628 }
629
630 return 0;
631 }
632
set_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)633 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
634 {
635 void __user *uaddr = (void __user *)(long)reg->addr;
636 u64 val;
637 int ret;
638
639 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
640 if (ret != 0)
641 return -EFAULT;
642
643 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
644 }
645
get_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)646 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
647 {
648 void __user *uaddr = (void __user *)(long)reg->addr;
649 u64 val;
650
651 val = kvm_arm_timer_get_reg(vcpu, reg->id);
652 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
653 }
654
num_sve_regs(const struct kvm_vcpu * vcpu)655 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
656 {
657 const unsigned int slices = vcpu_sve_slices(vcpu);
658
659 if (!vcpu_has_sve(vcpu))
660 return 0;
661
662 /* Policed by KVM_GET_REG_LIST: */
663 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
664
665 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
666 + 1; /* KVM_REG_ARM64_SVE_VLS */
667 }
668
copy_sve_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)669 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
670 u64 __user *uindices)
671 {
672 const unsigned int slices = vcpu_sve_slices(vcpu);
673 u64 reg;
674 unsigned int i, n;
675 int num_regs = 0;
676
677 if (!vcpu_has_sve(vcpu))
678 return 0;
679
680 /* Policed by KVM_GET_REG_LIST: */
681 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
682
683 /*
684 * Enumerate this first, so that userspace can save/restore in
685 * the order reported by KVM_GET_REG_LIST:
686 */
687 reg = KVM_REG_ARM64_SVE_VLS;
688 if (put_user(reg, uindices++))
689 return -EFAULT;
690 ++num_regs;
691
692 for (i = 0; i < slices; i++) {
693 for (n = 0; n < SVE_NUM_ZREGS; n++) {
694 reg = KVM_REG_ARM64_SVE_ZREG(n, i);
695 if (put_user(reg, uindices++))
696 return -EFAULT;
697 num_regs++;
698 }
699
700 for (n = 0; n < SVE_NUM_PREGS; n++) {
701 reg = KVM_REG_ARM64_SVE_PREG(n, i);
702 if (put_user(reg, uindices++))
703 return -EFAULT;
704 num_regs++;
705 }
706
707 reg = KVM_REG_ARM64_SVE_FFR(i);
708 if (put_user(reg, uindices++))
709 return -EFAULT;
710 num_regs++;
711 }
712
713 return num_regs;
714 }
715
716 /**
717 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
718 *
719 * This is for all registers.
720 */
kvm_arm_num_regs(struct kvm_vcpu * vcpu)721 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
722 {
723 unsigned long res = 0;
724
725 res += num_core_regs(vcpu);
726 res += num_sve_regs(vcpu);
727 res += kvm_arm_num_sys_reg_descs(vcpu);
728 res += kvm_arm_get_fw_num_regs(vcpu);
729 res += NUM_TIMER_REGS;
730
731 return res;
732 }
733
734 /**
735 * kvm_arm_copy_reg_indices - get indices of all registers.
736 *
737 * We do core registers right here, then we append system regs.
738 */
kvm_arm_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)739 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
740 {
741 int ret;
742
743 ret = copy_core_reg_indices(vcpu, uindices);
744 if (ret < 0)
745 return ret;
746 uindices += ret;
747
748 ret = copy_sve_reg_indices(vcpu, uindices);
749 if (ret < 0)
750 return ret;
751 uindices += ret;
752
753 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
754 if (ret < 0)
755 return ret;
756 uindices += kvm_arm_get_fw_num_regs(vcpu);
757
758 ret = copy_timer_indices(vcpu, uindices);
759 if (ret < 0)
760 return ret;
761 uindices += NUM_TIMER_REGS;
762
763 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
764 }
765
kvm_arm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)766 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
767 {
768 /* We currently use nothing arch-specific in upper 32 bits */
769 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
770 return -EINVAL;
771
772 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
773 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
774 case KVM_REG_ARM_FW:
775 case KVM_REG_ARM_FW_FEAT_BMAP:
776 return kvm_arm_get_fw_reg(vcpu, reg);
777 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
778 }
779
780 if (is_timer_reg(reg->id))
781 return get_timer_reg(vcpu, reg);
782
783 return kvm_arm_sys_reg_get_reg(vcpu, reg);
784 }
785
kvm_arm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)786 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
787 {
788 /* We currently use nothing arch-specific in upper 32 bits */
789 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
790 return -EINVAL;
791
792 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
793 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
794 case KVM_REG_ARM_FW:
795 case KVM_REG_ARM_FW_FEAT_BMAP:
796 return kvm_arm_set_fw_reg(vcpu, reg);
797 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
798 }
799
800 if (is_timer_reg(reg->id))
801 return set_timer_reg(vcpu, reg);
802
803 return kvm_arm_sys_reg_set_reg(vcpu, reg);
804 }
805
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)806 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
807 struct kvm_sregs *sregs)
808 {
809 return -EINVAL;
810 }
811
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)812 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
813 struct kvm_sregs *sregs)
814 {
815 return -EINVAL;
816 }
817
__kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)818 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
819 struct kvm_vcpu_events *events)
820 {
821 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
822 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
823
824 if (events->exception.serror_pending && events->exception.serror_has_esr)
825 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
826
827 /*
828 * We never return a pending ext_dabt here because we deliver it to
829 * the virtual CPU directly when setting the event and it's no longer
830 * 'pending' at this point.
831 */
832
833 return 0;
834 }
835
__kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)836 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
837 struct kvm_vcpu_events *events)
838 {
839 bool serror_pending = events->exception.serror_pending;
840 bool has_esr = events->exception.serror_has_esr;
841 bool ext_dabt_pending = events->exception.ext_dabt_pending;
842
843 if (serror_pending && has_esr) {
844 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
845 return -EINVAL;
846
847 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
848 kvm_set_sei_esr(vcpu, events->exception.serror_esr);
849 else
850 return -EINVAL;
851 } else if (serror_pending) {
852 kvm_inject_vabt(vcpu);
853 }
854
855 if (ext_dabt_pending)
856 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
857
858 return 0;
859 }
860
kvm_target_cpu(void)861 u32 __attribute_const__ kvm_target_cpu(void)
862 {
863 unsigned long implementor = read_cpuid_implementor();
864 unsigned long part_number = read_cpuid_part_number();
865
866 switch (implementor) {
867 case ARM_CPU_IMP_ARM:
868 switch (part_number) {
869 case ARM_CPU_PART_AEM_V8:
870 return KVM_ARM_TARGET_AEM_V8;
871 case ARM_CPU_PART_FOUNDATION:
872 return KVM_ARM_TARGET_FOUNDATION_V8;
873 case ARM_CPU_PART_CORTEX_A53:
874 return KVM_ARM_TARGET_CORTEX_A53;
875 case ARM_CPU_PART_CORTEX_A57:
876 return KVM_ARM_TARGET_CORTEX_A57;
877 }
878 break;
879 case ARM_CPU_IMP_APM:
880 switch (part_number) {
881 case APM_CPU_PART_XGENE:
882 return KVM_ARM_TARGET_XGENE_POTENZA;
883 }
884 break;
885 }
886
887 /* Return a default generic target */
888 return KVM_ARM_TARGET_GENERIC_V8;
889 }
890
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)891 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
892 {
893 return -EINVAL;
894 }
895
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)896 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
897 {
898 return -EINVAL;
899 }
900
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)901 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
902 struct kvm_translation *tr)
903 {
904 return -EINVAL;
905 }
906
907 /**
908 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
909 * @kvm: pointer to the KVM struct
910 * @kvm_guest_debug: the ioctl data buffer
911 *
912 * This sets up and enables the VM for guest debugging. Userspace
913 * passes in a control flag to enable different debug types and
914 * potentially other architecture specific information in the rest of
915 * the structure.
916 */
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)917 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
918 struct kvm_guest_debug *dbg)
919 {
920 int ret = 0;
921
922 trace_kvm_set_guest_debug(vcpu, dbg->control);
923
924 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
925 ret = -EINVAL;
926 goto out;
927 }
928
929 if (dbg->control & KVM_GUESTDBG_ENABLE) {
930 vcpu->guest_debug = dbg->control;
931
932 /* Hardware assisted Break and Watch points */
933 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
934 vcpu->arch.external_debug_state = dbg->arch;
935 }
936
937 } else {
938 /* If not enabled clear all flags */
939 vcpu->guest_debug = 0;
940 vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
941 }
942
943 out:
944 return ret;
945 }
946
kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)947 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
948 struct kvm_device_attr *attr)
949 {
950 int ret;
951
952 switch (attr->group) {
953 case KVM_ARM_VCPU_PMU_V3_CTRL:
954 mutex_lock(&vcpu->kvm->arch.config_lock);
955 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
956 mutex_unlock(&vcpu->kvm->arch.config_lock);
957 break;
958 case KVM_ARM_VCPU_TIMER_CTRL:
959 ret = kvm_arm_timer_set_attr(vcpu, attr);
960 break;
961 case KVM_ARM_VCPU_PVTIME_CTRL:
962 ret = kvm_arm_pvtime_set_attr(vcpu, attr);
963 break;
964 default:
965 ret = -ENXIO;
966 break;
967 }
968
969 return ret;
970 }
971
kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)972 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
973 struct kvm_device_attr *attr)
974 {
975 int ret;
976
977 switch (attr->group) {
978 case KVM_ARM_VCPU_PMU_V3_CTRL:
979 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
980 break;
981 case KVM_ARM_VCPU_TIMER_CTRL:
982 ret = kvm_arm_timer_get_attr(vcpu, attr);
983 break;
984 case KVM_ARM_VCPU_PVTIME_CTRL:
985 ret = kvm_arm_pvtime_get_attr(vcpu, attr);
986 break;
987 default:
988 ret = -ENXIO;
989 break;
990 }
991
992 return ret;
993 }
994
kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)995 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
996 struct kvm_device_attr *attr)
997 {
998 int ret;
999
1000 switch (attr->group) {
1001 case KVM_ARM_VCPU_PMU_V3_CTRL:
1002 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
1003 break;
1004 case KVM_ARM_VCPU_TIMER_CTRL:
1005 ret = kvm_arm_timer_has_attr(vcpu, attr);
1006 break;
1007 case KVM_ARM_VCPU_PVTIME_CTRL:
1008 ret = kvm_arm_pvtime_has_attr(vcpu, attr);
1009 break;
1010 default:
1011 ret = -ENXIO;
1012 break;
1013 }
1014
1015 return ret;
1016 }
1017
kvm_vm_ioctl_mte_copy_tags(struct kvm * kvm,struct kvm_arm_copy_mte_tags * copy_tags)1018 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1019 struct kvm_arm_copy_mte_tags *copy_tags)
1020 {
1021 gpa_t guest_ipa = copy_tags->guest_ipa;
1022 size_t length = copy_tags->length;
1023 void __user *tags = copy_tags->addr;
1024 gpa_t gfn;
1025 bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
1026 int ret = 0;
1027
1028 if (!kvm_has_mte(kvm))
1029 return -EINVAL;
1030
1031 if (copy_tags->reserved[0] || copy_tags->reserved[1])
1032 return -EINVAL;
1033
1034 if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
1035 return -EINVAL;
1036
1037 if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
1038 return -EINVAL;
1039
1040 /* Lengths above INT_MAX cannot be represented in the return value */
1041 if (length > INT_MAX)
1042 return -EINVAL;
1043
1044 gfn = gpa_to_gfn(guest_ipa);
1045
1046 mutex_lock(&kvm->slots_lock);
1047
1048 while (length > 0) {
1049 kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
1050 void *maddr;
1051 unsigned long num_tags;
1052 struct page *page;
1053
1054 if (is_error_noslot_pfn(pfn)) {
1055 ret = -EFAULT;
1056 goto out;
1057 }
1058
1059 page = pfn_to_online_page(pfn);
1060 if (!page) {
1061 /* Reject ZONE_DEVICE memory */
1062 ret = -EFAULT;
1063 goto out;
1064 }
1065 maddr = page_address(page);
1066
1067 if (!write) {
1068 if (page_mte_tagged(page))
1069 num_tags = mte_copy_tags_to_user(tags, maddr,
1070 MTE_GRANULES_PER_PAGE);
1071 else
1072 /* No tags in memory, so write zeros */
1073 num_tags = MTE_GRANULES_PER_PAGE -
1074 clear_user(tags, MTE_GRANULES_PER_PAGE);
1075 kvm_release_pfn_clean(pfn);
1076 } else {
1077 /*
1078 * Only locking to serialise with a concurrent
1079 * __set_ptes() in the VMM but still overriding the
1080 * tags, hence ignoring the return value.
1081 */
1082 try_page_mte_tagging(page);
1083 num_tags = mte_copy_tags_from_user(maddr, tags,
1084 MTE_GRANULES_PER_PAGE);
1085
1086 /* uaccess failed, don't leave stale tags */
1087 if (num_tags != MTE_GRANULES_PER_PAGE)
1088 mte_clear_page_tags(maddr);
1089 set_page_mte_tagged(page);
1090
1091 kvm_release_pfn_dirty(pfn);
1092 }
1093
1094 if (num_tags != MTE_GRANULES_PER_PAGE) {
1095 ret = -EFAULT;
1096 goto out;
1097 }
1098
1099 gfn++;
1100 tags += num_tags;
1101 length -= PAGE_SIZE;
1102 }
1103
1104 out:
1105 mutex_unlock(&kvm->slots_lock);
1106 /* If some data has been copied report the number of bytes copied */
1107 if (length != copy_tags->length)
1108 return copy_tags->length - length;
1109 return ret;
1110 }
1111