1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <linux/uaccess.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/cputype.h>
13 #include "vgic.h"
14
15 /* common helpers */
16
vgic_check_ioaddr(struct kvm * kvm,phys_addr_t * ioaddr,phys_addr_t addr,phys_addr_t alignment)17 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
18 phys_addr_t addr, phys_addr_t alignment)
19 {
20 if (addr & ~kvm_phys_mask(kvm))
21 return -E2BIG;
22
23 if (!IS_ALIGNED(addr, alignment))
24 return -EINVAL;
25
26 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
27 return -EEXIST;
28
29 return 0;
30 }
31
vgic_check_type(struct kvm * kvm,int type_needed)32 static int vgic_check_type(struct kvm *kvm, int type_needed)
33 {
34 if (kvm->arch.vgic.vgic_model != type_needed)
35 return -ENODEV;
36 else
37 return 0;
38 }
39
40 /**
41 * kvm_vgic_addr - set or get vgic VM base addresses
42 * @kvm: pointer to the vm struct
43 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
44 * @addr: pointer to address value
45 * @write: if true set the address in the VM address space, if false read the
46 * address
47 *
48 * Set or get the vgic base addresses for the distributor and the virtual CPU
49 * interface in the VM physical address space. These addresses are properties
50 * of the emulated core/SoC and therefore user space initially knows this
51 * information.
52 * Check them for sanity (alignment, double assignment). We can't check for
53 * overlapping regions in case of a virtual GICv3 here, since we don't know
54 * the number of VCPUs yet, so we defer this check to map_resources().
55 */
kvm_vgic_addr(struct kvm * kvm,unsigned long type,u64 * addr,bool write)56 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
57 {
58 int r = 0;
59 struct vgic_dist *vgic = &kvm->arch.vgic;
60 phys_addr_t *addr_ptr, alignment;
61 u64 undef_value = VGIC_ADDR_UNDEF;
62
63 mutex_lock(&kvm->lock);
64 switch (type) {
65 case KVM_VGIC_V2_ADDR_TYPE_DIST:
66 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
67 addr_ptr = &vgic->vgic_dist_base;
68 alignment = SZ_4K;
69 break;
70 case KVM_VGIC_V2_ADDR_TYPE_CPU:
71 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
72 addr_ptr = &vgic->vgic_cpu_base;
73 alignment = SZ_4K;
74 break;
75 case KVM_VGIC_V3_ADDR_TYPE_DIST:
76 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
77 addr_ptr = &vgic->vgic_dist_base;
78 alignment = SZ_64K;
79 break;
80 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
81 struct vgic_redist_region *rdreg;
82
83 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
84 if (r)
85 break;
86 if (write) {
87 r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
88 goto out;
89 }
90 rdreg = list_first_entry_or_null(&vgic->rd_regions,
91 struct vgic_redist_region, list);
92 if (!rdreg)
93 addr_ptr = &undef_value;
94 else
95 addr_ptr = &rdreg->base;
96 break;
97 }
98 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
99 {
100 struct vgic_redist_region *rdreg;
101 u8 index;
102
103 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
104 if (r)
105 break;
106
107 index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
108
109 if (write) {
110 gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK;
111 u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK)
112 >> KVM_VGIC_V3_RDIST_COUNT_SHIFT;
113 u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
114 >> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
115
116 if (!count || flags)
117 r = -EINVAL;
118 else
119 r = vgic_v3_set_redist_base(kvm, index,
120 base, count);
121 goto out;
122 }
123
124 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
125 if (!rdreg) {
126 r = -ENOENT;
127 goto out;
128 }
129
130 *addr = index;
131 *addr |= rdreg->base;
132 *addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
133 goto out;
134 }
135 default:
136 r = -ENODEV;
137 }
138
139 if (r)
140 goto out;
141
142 if (write) {
143 r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
144 if (!r)
145 *addr_ptr = *addr;
146 } else {
147 *addr = *addr_ptr;
148 }
149
150 out:
151 mutex_unlock(&kvm->lock);
152 return r;
153 }
154
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)155 static int vgic_set_common_attr(struct kvm_device *dev,
156 struct kvm_device_attr *attr)
157 {
158 int r;
159
160 switch (attr->group) {
161 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
162 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
163 u64 addr;
164 unsigned long type = (unsigned long)attr->attr;
165
166 if (copy_from_user(&addr, uaddr, sizeof(addr)))
167 return -EFAULT;
168
169 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
170 return (r == -ENODEV) ? -ENXIO : r;
171 }
172 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
173 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
174 u32 val;
175 int ret = 0;
176
177 if (get_user(val, uaddr))
178 return -EFAULT;
179
180 /*
181 * We require:
182 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
183 * - at most 1024 interrupts
184 * - a multiple of 32 interrupts
185 */
186 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
187 val > VGIC_MAX_RESERVED ||
188 (val & 31))
189 return -EINVAL;
190
191 mutex_lock(&dev->kvm->lock);
192
193 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
194 ret = -EBUSY;
195 else
196 dev->kvm->arch.vgic.nr_spis =
197 val - VGIC_NR_PRIVATE_IRQS;
198
199 mutex_unlock(&dev->kvm->lock);
200
201 return ret;
202 }
203 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
204 switch (attr->attr) {
205 case KVM_DEV_ARM_VGIC_CTRL_INIT:
206 mutex_lock(&dev->kvm->lock);
207 r = vgic_init(dev->kvm);
208 mutex_unlock(&dev->kvm->lock);
209 return r;
210 }
211 break;
212 }
213 }
214
215 return -ENXIO;
216 }
217
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)218 static int vgic_get_common_attr(struct kvm_device *dev,
219 struct kvm_device_attr *attr)
220 {
221 int r = -ENXIO;
222
223 switch (attr->group) {
224 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
225 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
226 u64 addr;
227 unsigned long type = (unsigned long)attr->attr;
228
229 if (copy_from_user(&addr, uaddr, sizeof(addr)))
230 return -EFAULT;
231
232 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
233 if (r)
234 return (r == -ENODEV) ? -ENXIO : r;
235
236 if (copy_to_user(uaddr, &addr, sizeof(addr)))
237 return -EFAULT;
238 break;
239 }
240 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
241 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
242
243 r = put_user(dev->kvm->arch.vgic.nr_spis +
244 VGIC_NR_PRIVATE_IRQS, uaddr);
245 break;
246 }
247 }
248
249 return r;
250 }
251
vgic_create(struct kvm_device * dev,u32 type)252 static int vgic_create(struct kvm_device *dev, u32 type)
253 {
254 return kvm_vgic_create(dev->kvm, type);
255 }
256
vgic_destroy(struct kvm_device * dev)257 static void vgic_destroy(struct kvm_device *dev)
258 {
259 kfree(dev);
260 }
261
kvm_register_vgic_device(unsigned long type)262 int kvm_register_vgic_device(unsigned long type)
263 {
264 int ret = -ENODEV;
265
266 switch (type) {
267 case KVM_DEV_TYPE_ARM_VGIC_V2:
268 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
269 KVM_DEV_TYPE_ARM_VGIC_V2);
270 break;
271 case KVM_DEV_TYPE_ARM_VGIC_V3:
272 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
273 KVM_DEV_TYPE_ARM_VGIC_V3);
274
275 if (ret)
276 break;
277 ret = kvm_vgic_register_its_device();
278 break;
279 }
280
281 return ret;
282 }
283
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)284 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
285 struct vgic_reg_attr *reg_attr)
286 {
287 int cpuid;
288
289 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
290 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
291
292 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
293 return -EINVAL;
294
295 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
296 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
297
298 return 0;
299 }
300
301 /* unlocks vcpus from @vcpu_lock_idx and smaller */
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)302 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
303 {
304 struct kvm_vcpu *tmp_vcpu;
305
306 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
307 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
308 mutex_unlock(&tmp_vcpu->mutex);
309 }
310 }
311
unlock_all_vcpus(struct kvm * kvm)312 void unlock_all_vcpus(struct kvm *kvm)
313 {
314 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
315 }
316
317 /* Returns true if all vcpus were locked, false otherwise */
lock_all_vcpus(struct kvm * kvm)318 bool lock_all_vcpus(struct kvm *kvm)
319 {
320 struct kvm_vcpu *tmp_vcpu;
321 int c;
322
323 /*
324 * Any time a vcpu is run, vcpu_load is called which tries to grab the
325 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
326 * that no other VCPUs are run and fiddle with the vgic state while we
327 * access it.
328 */
329 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
330 if (!mutex_trylock(&tmp_vcpu->mutex)) {
331 unlock_vcpus(kvm, c - 1);
332 return false;
333 }
334 }
335
336 return true;
337 }
338
339 /**
340 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
341 *
342 * @dev: kvm device handle
343 * @attr: kvm device attribute
344 * @reg: address the value is read or written
345 * @is_write: true if userspace is writing a register
346 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u32 * reg,bool is_write)347 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
348 struct kvm_device_attr *attr,
349 u32 *reg, bool is_write)
350 {
351 struct vgic_reg_attr reg_attr;
352 gpa_t addr;
353 struct kvm_vcpu *vcpu;
354 int ret;
355
356 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
357 if (ret)
358 return ret;
359
360 vcpu = reg_attr.vcpu;
361 addr = reg_attr.addr;
362
363 mutex_lock(&dev->kvm->lock);
364
365 ret = vgic_init(dev->kvm);
366 if (ret)
367 goto out;
368
369 if (!lock_all_vcpus(dev->kvm)) {
370 ret = -EBUSY;
371 goto out;
372 }
373
374 switch (attr->group) {
375 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
376 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
377 break;
378 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
379 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
380 break;
381 default:
382 ret = -EINVAL;
383 break;
384 }
385
386 unlock_all_vcpus(dev->kvm);
387 out:
388 mutex_unlock(&dev->kvm->lock);
389 return ret;
390 }
391
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)392 static int vgic_v2_set_attr(struct kvm_device *dev,
393 struct kvm_device_attr *attr)
394 {
395 int ret;
396
397 ret = vgic_set_common_attr(dev, attr);
398 if (ret != -ENXIO)
399 return ret;
400
401 switch (attr->group) {
402 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
403 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
404 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
405 u32 reg;
406
407 if (get_user(reg, uaddr))
408 return -EFAULT;
409
410 return vgic_v2_attr_regs_access(dev, attr, ®, true);
411 }
412 }
413
414 return -ENXIO;
415 }
416
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)417 static int vgic_v2_get_attr(struct kvm_device *dev,
418 struct kvm_device_attr *attr)
419 {
420 int ret;
421
422 ret = vgic_get_common_attr(dev, attr);
423 if (ret != -ENXIO)
424 return ret;
425
426 switch (attr->group) {
427 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
428 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
429 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
430 u32 reg = 0;
431
432 ret = vgic_v2_attr_regs_access(dev, attr, ®, false);
433 if (ret)
434 return ret;
435 return put_user(reg, uaddr);
436 }
437 }
438
439 return -ENXIO;
440 }
441
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)442 static int vgic_v2_has_attr(struct kvm_device *dev,
443 struct kvm_device_attr *attr)
444 {
445 switch (attr->group) {
446 case KVM_DEV_ARM_VGIC_GRP_ADDR:
447 switch (attr->attr) {
448 case KVM_VGIC_V2_ADDR_TYPE_DIST:
449 case KVM_VGIC_V2_ADDR_TYPE_CPU:
450 return 0;
451 }
452 break;
453 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
454 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
455 return vgic_v2_has_attr_regs(dev, attr);
456 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
457 return 0;
458 case KVM_DEV_ARM_VGIC_GRP_CTRL:
459 switch (attr->attr) {
460 case KVM_DEV_ARM_VGIC_CTRL_INIT:
461 return 0;
462 }
463 }
464 return -ENXIO;
465 }
466
467 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
468 .name = "kvm-arm-vgic-v2",
469 .create = vgic_create,
470 .destroy = vgic_destroy,
471 .set_attr = vgic_v2_set_attr,
472 .get_attr = vgic_v2_get_attr,
473 .has_attr = vgic_v2_has_attr,
474 };
475
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)476 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
477 struct vgic_reg_attr *reg_attr)
478 {
479 unsigned long vgic_mpidr, mpidr_reg;
480
481 /*
482 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
483 * attr might not hold MPIDR. Hence assume vcpu0.
484 */
485 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
486 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
487 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
488
489 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
490 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
491 } else {
492 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
493 }
494
495 if (!reg_attr->vcpu)
496 return -EINVAL;
497
498 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
499
500 return 0;
501 }
502
503 /*
504 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
505 *
506 * @dev: kvm device handle
507 * @attr: kvm device attribute
508 * @reg: address the value is read or written
509 * @is_write: true if userspace is writing a register
510 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u64 * reg,bool is_write)511 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
512 struct kvm_device_attr *attr,
513 u64 *reg, bool is_write)
514 {
515 struct vgic_reg_attr reg_attr;
516 gpa_t addr;
517 struct kvm_vcpu *vcpu;
518 int ret;
519 u32 tmp32;
520
521 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
522 if (ret)
523 return ret;
524
525 vcpu = reg_attr.vcpu;
526 addr = reg_attr.addr;
527
528 mutex_lock(&dev->kvm->lock);
529
530 if (unlikely(!vgic_initialized(dev->kvm))) {
531 ret = -EBUSY;
532 goto out;
533 }
534
535 if (!lock_all_vcpus(dev->kvm)) {
536 ret = -EBUSY;
537 goto out;
538 }
539
540 switch (attr->group) {
541 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
542 if (is_write)
543 tmp32 = *reg;
544
545 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
546 if (!is_write)
547 *reg = tmp32;
548 break;
549 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
550 if (is_write)
551 tmp32 = *reg;
552
553 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
554 if (!is_write)
555 *reg = tmp32;
556 break;
557 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
558 u64 regid;
559
560 regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
561 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
562 regid, reg);
563 break;
564 }
565 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
566 unsigned int info, intid;
567
568 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
569 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
570 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
571 intid = attr->attr &
572 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
573 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
574 intid, reg);
575 } else {
576 ret = -EINVAL;
577 }
578 break;
579 }
580 default:
581 ret = -EINVAL;
582 break;
583 }
584
585 unlock_all_vcpus(dev->kvm);
586 out:
587 mutex_unlock(&dev->kvm->lock);
588 return ret;
589 }
590
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)591 static int vgic_v3_set_attr(struct kvm_device *dev,
592 struct kvm_device_attr *attr)
593 {
594 int ret;
595
596 ret = vgic_set_common_attr(dev, attr);
597 if (ret != -ENXIO)
598 return ret;
599
600 switch (attr->group) {
601 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
602 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
603 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
604 u32 tmp32;
605 u64 reg;
606
607 if (get_user(tmp32, uaddr))
608 return -EFAULT;
609
610 reg = tmp32;
611 return vgic_v3_attr_regs_access(dev, attr, ®, true);
612 }
613 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
614 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
615 u64 reg;
616
617 if (get_user(reg, uaddr))
618 return -EFAULT;
619
620 return vgic_v3_attr_regs_access(dev, attr, ®, true);
621 }
622 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
623 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
624 u64 reg;
625 u32 tmp32;
626
627 if (get_user(tmp32, uaddr))
628 return -EFAULT;
629
630 reg = tmp32;
631 return vgic_v3_attr_regs_access(dev, attr, ®, true);
632 }
633 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
634 int ret;
635
636 switch (attr->attr) {
637 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
638 mutex_lock(&dev->kvm->lock);
639
640 if (!lock_all_vcpus(dev->kvm)) {
641 mutex_unlock(&dev->kvm->lock);
642 return -EBUSY;
643 }
644 ret = vgic_v3_save_pending_tables(dev->kvm);
645 unlock_all_vcpus(dev->kvm);
646 mutex_unlock(&dev->kvm->lock);
647 return ret;
648 }
649 break;
650 }
651 }
652 return -ENXIO;
653 }
654
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)655 static int vgic_v3_get_attr(struct kvm_device *dev,
656 struct kvm_device_attr *attr)
657 {
658 int ret;
659
660 ret = vgic_get_common_attr(dev, attr);
661 if (ret != -ENXIO)
662 return ret;
663
664 switch (attr->group) {
665 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
666 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
667 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
668 u64 reg;
669 u32 tmp32;
670
671 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
672 if (ret)
673 return ret;
674 tmp32 = reg;
675 return put_user(tmp32, uaddr);
676 }
677 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
678 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
679 u64 reg;
680
681 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
682 if (ret)
683 return ret;
684 return put_user(reg, uaddr);
685 }
686 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
687 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
688 u64 reg;
689 u32 tmp32;
690
691 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
692 if (ret)
693 return ret;
694 tmp32 = reg;
695 return put_user(tmp32, uaddr);
696 }
697 }
698 return -ENXIO;
699 }
700
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)701 static int vgic_v3_has_attr(struct kvm_device *dev,
702 struct kvm_device_attr *attr)
703 {
704 switch (attr->group) {
705 case KVM_DEV_ARM_VGIC_GRP_ADDR:
706 switch (attr->attr) {
707 case KVM_VGIC_V3_ADDR_TYPE_DIST:
708 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
709 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
710 return 0;
711 }
712 break;
713 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
714 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
715 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
716 return vgic_v3_has_attr_regs(dev, attr);
717 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
718 return 0;
719 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
720 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
721 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
722 VGIC_LEVEL_INFO_LINE_LEVEL)
723 return 0;
724 break;
725 }
726 case KVM_DEV_ARM_VGIC_GRP_CTRL:
727 switch (attr->attr) {
728 case KVM_DEV_ARM_VGIC_CTRL_INIT:
729 return 0;
730 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
731 return 0;
732 }
733 }
734 return -ENXIO;
735 }
736
737 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
738 .name = "kvm-arm-vgic-v3",
739 .create = vgic_create,
740 .destroy = vgic_destroy,
741 .set_attr = vgic_v3_set_attr,
742 .get_attr = vgic_v3_get_attr,
743 .has_attr = vgic_v3_has_attr,
744 };
745