1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <linux/uaccess.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/cputype.h>
13 #include "vgic.h"
14
15 /* common helpers */
16
vgic_check_iorange(struct kvm * kvm,phys_addr_t ioaddr,phys_addr_t addr,phys_addr_t alignment,phys_addr_t size)17 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
18 phys_addr_t addr, phys_addr_t alignment,
19 phys_addr_t size)
20 {
21 if (!IS_VGIC_ADDR_UNDEF(ioaddr))
22 return -EEXIST;
23
24 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
25 return -EINVAL;
26
27 if (addr + size < addr)
28 return -EINVAL;
29
30 if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
31 return -E2BIG;
32
33 return 0;
34 }
35
vgic_check_type(struct kvm * kvm,int type_needed)36 static int vgic_check_type(struct kvm *kvm, int type_needed)
37 {
38 if (kvm->arch.vgic.vgic_model != type_needed)
39 return -ENODEV;
40 else
41 return 0;
42 }
43
44 /**
45 * kvm_vgic_addr - set or get vgic VM base addresses
46 * @kvm: pointer to the vm struct
47 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
48 * @addr: pointer to address value
49 * @write: if true set the address in the VM address space, if false read the
50 * address
51 *
52 * Set or get the vgic base addresses for the distributor and the virtual CPU
53 * interface in the VM physical address space. These addresses are properties
54 * of the emulated core/SoC and therefore user space initially knows this
55 * information.
56 * Check them for sanity (alignment, double assignment). We can't check for
57 * overlapping regions in case of a virtual GICv3 here, since we don't know
58 * the number of VCPUs yet, so we defer this check to map_resources().
59 */
kvm_vgic_addr(struct kvm * kvm,unsigned long type,u64 * addr,bool write)60 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
61 {
62 int r = 0;
63 struct vgic_dist *vgic = &kvm->arch.vgic;
64 phys_addr_t *addr_ptr, alignment, size;
65 u64 undef_value = VGIC_ADDR_UNDEF;
66
67 mutex_lock(&kvm->lock);
68 switch (type) {
69 case KVM_VGIC_V2_ADDR_TYPE_DIST:
70 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
71 addr_ptr = &vgic->vgic_dist_base;
72 alignment = SZ_4K;
73 size = KVM_VGIC_V2_DIST_SIZE;
74 break;
75 case KVM_VGIC_V2_ADDR_TYPE_CPU:
76 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
77 addr_ptr = &vgic->vgic_cpu_base;
78 alignment = SZ_4K;
79 size = KVM_VGIC_V2_CPU_SIZE;
80 break;
81 case KVM_VGIC_V3_ADDR_TYPE_DIST:
82 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
83 addr_ptr = &vgic->vgic_dist_base;
84 alignment = SZ_64K;
85 size = KVM_VGIC_V3_DIST_SIZE;
86 break;
87 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
88 struct vgic_redist_region *rdreg;
89
90 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
91 if (r)
92 break;
93 if (write) {
94 r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
95 goto out;
96 }
97 rdreg = list_first_entry_or_null(&vgic->rd_regions,
98 struct vgic_redist_region, list);
99 if (!rdreg)
100 addr_ptr = &undef_value;
101 else
102 addr_ptr = &rdreg->base;
103 break;
104 }
105 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
106 {
107 struct vgic_redist_region *rdreg;
108 u8 index;
109
110 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
111 if (r)
112 break;
113
114 index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
115
116 if (write) {
117 gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK;
118 u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK)
119 >> KVM_VGIC_V3_RDIST_COUNT_SHIFT;
120 u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
121 >> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
122
123 if (!count || flags)
124 r = -EINVAL;
125 else
126 r = vgic_v3_set_redist_base(kvm, index,
127 base, count);
128 goto out;
129 }
130
131 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
132 if (!rdreg) {
133 r = -ENOENT;
134 goto out;
135 }
136
137 *addr = index;
138 *addr |= rdreg->base;
139 *addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
140 goto out;
141 }
142 default:
143 r = -ENODEV;
144 }
145
146 if (r)
147 goto out;
148
149 if (write) {
150 r = vgic_check_iorange(kvm, *addr_ptr, *addr, alignment, size);
151 if (!r)
152 *addr_ptr = *addr;
153 } else {
154 *addr = *addr_ptr;
155 }
156
157 out:
158 mutex_unlock(&kvm->lock);
159 return r;
160 }
161
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)162 static int vgic_set_common_attr(struct kvm_device *dev,
163 struct kvm_device_attr *attr)
164 {
165 int r;
166
167 switch (attr->group) {
168 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
169 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
170 u64 addr;
171 unsigned long type = (unsigned long)attr->attr;
172
173 if (copy_from_user(&addr, uaddr, sizeof(addr)))
174 return -EFAULT;
175
176 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
177 return (r == -ENODEV) ? -ENXIO : r;
178 }
179 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
180 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
181 u32 val;
182 int ret = 0;
183
184 if (get_user(val, uaddr))
185 return -EFAULT;
186
187 /*
188 * We require:
189 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
190 * - at most 1024 interrupts
191 * - a multiple of 32 interrupts
192 */
193 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
194 val > VGIC_MAX_RESERVED ||
195 (val & 31))
196 return -EINVAL;
197
198 mutex_lock(&dev->kvm->lock);
199
200 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
201 ret = -EBUSY;
202 else
203 dev->kvm->arch.vgic.nr_spis =
204 val - VGIC_NR_PRIVATE_IRQS;
205
206 mutex_unlock(&dev->kvm->lock);
207
208 return ret;
209 }
210 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
211 switch (attr->attr) {
212 case KVM_DEV_ARM_VGIC_CTRL_INIT:
213 mutex_lock(&dev->kvm->lock);
214 r = vgic_init(dev->kvm);
215 mutex_unlock(&dev->kvm->lock);
216 return r;
217 }
218 break;
219 }
220 }
221
222 return -ENXIO;
223 }
224
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)225 static int vgic_get_common_attr(struct kvm_device *dev,
226 struct kvm_device_attr *attr)
227 {
228 int r = -ENXIO;
229
230 switch (attr->group) {
231 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
232 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
233 u64 addr;
234 unsigned long type = (unsigned long)attr->attr;
235
236 if (copy_from_user(&addr, uaddr, sizeof(addr)))
237 return -EFAULT;
238
239 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
240 if (r)
241 return (r == -ENODEV) ? -ENXIO : r;
242
243 if (copy_to_user(uaddr, &addr, sizeof(addr)))
244 return -EFAULT;
245 break;
246 }
247 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
248 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
249
250 r = put_user(dev->kvm->arch.vgic.nr_spis +
251 VGIC_NR_PRIVATE_IRQS, uaddr);
252 break;
253 }
254 }
255
256 return r;
257 }
258
vgic_create(struct kvm_device * dev,u32 type)259 static int vgic_create(struct kvm_device *dev, u32 type)
260 {
261 return kvm_vgic_create(dev->kvm, type);
262 }
263
vgic_destroy(struct kvm_device * dev)264 static void vgic_destroy(struct kvm_device *dev)
265 {
266 kfree(dev);
267 }
268
kvm_register_vgic_device(unsigned long type)269 int kvm_register_vgic_device(unsigned long type)
270 {
271 int ret = -ENODEV;
272
273 switch (type) {
274 case KVM_DEV_TYPE_ARM_VGIC_V2:
275 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
276 KVM_DEV_TYPE_ARM_VGIC_V2);
277 break;
278 case KVM_DEV_TYPE_ARM_VGIC_V3:
279 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
280 KVM_DEV_TYPE_ARM_VGIC_V3);
281
282 if (ret)
283 break;
284 ret = kvm_vgic_register_its_device();
285 break;
286 }
287
288 return ret;
289 }
290
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)291 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
292 struct vgic_reg_attr *reg_attr)
293 {
294 int cpuid;
295
296 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
297 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
298
299 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
300 return -EINVAL;
301
302 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
303 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
304
305 return 0;
306 }
307
308 /* unlocks vcpus from @vcpu_lock_idx and smaller */
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)309 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
310 {
311 struct kvm_vcpu *tmp_vcpu;
312
313 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
314 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
315 mutex_unlock(&tmp_vcpu->mutex);
316 }
317 }
318
unlock_all_vcpus(struct kvm * kvm)319 void unlock_all_vcpus(struct kvm *kvm)
320 {
321 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
322 }
323
324 /* Returns true if all vcpus were locked, false otherwise */
lock_all_vcpus(struct kvm * kvm)325 bool lock_all_vcpus(struct kvm *kvm)
326 {
327 struct kvm_vcpu *tmp_vcpu;
328 int c;
329
330 /*
331 * Any time a vcpu is run, vcpu_load is called which tries to grab the
332 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
333 * that no other VCPUs are run and fiddle with the vgic state while we
334 * access it.
335 */
336 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
337 if (!mutex_trylock(&tmp_vcpu->mutex)) {
338 unlock_vcpus(kvm, c - 1);
339 return false;
340 }
341 }
342
343 return true;
344 }
345
346 /**
347 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
348 *
349 * @dev: kvm device handle
350 * @attr: kvm device attribute
351 * @reg: address the value is read or written
352 * @is_write: true if userspace is writing a register
353 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u32 * reg,bool is_write)354 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
355 struct kvm_device_attr *attr,
356 u32 *reg, bool is_write)
357 {
358 struct vgic_reg_attr reg_attr;
359 gpa_t addr;
360 struct kvm_vcpu *vcpu;
361 int ret;
362
363 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
364 if (ret)
365 return ret;
366
367 vcpu = reg_attr.vcpu;
368 addr = reg_attr.addr;
369
370 mutex_lock(&dev->kvm->lock);
371
372 ret = vgic_init(dev->kvm);
373 if (ret)
374 goto out;
375
376 if (!lock_all_vcpus(dev->kvm)) {
377 ret = -EBUSY;
378 goto out;
379 }
380
381 switch (attr->group) {
382 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
383 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
384 break;
385 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
386 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
387 break;
388 default:
389 ret = -EINVAL;
390 break;
391 }
392
393 unlock_all_vcpus(dev->kvm);
394 out:
395 mutex_unlock(&dev->kvm->lock);
396 return ret;
397 }
398
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)399 static int vgic_v2_set_attr(struct kvm_device *dev,
400 struct kvm_device_attr *attr)
401 {
402 int ret;
403
404 ret = vgic_set_common_attr(dev, attr);
405 if (ret != -ENXIO)
406 return ret;
407
408 switch (attr->group) {
409 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
410 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
411 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
412 u32 reg;
413
414 if (get_user(reg, uaddr))
415 return -EFAULT;
416
417 return vgic_v2_attr_regs_access(dev, attr, ®, true);
418 }
419 }
420
421 return -ENXIO;
422 }
423
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)424 static int vgic_v2_get_attr(struct kvm_device *dev,
425 struct kvm_device_attr *attr)
426 {
427 int ret;
428
429 ret = vgic_get_common_attr(dev, attr);
430 if (ret != -ENXIO)
431 return ret;
432
433 switch (attr->group) {
434 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
435 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
436 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
437 u32 reg = 0;
438
439 ret = vgic_v2_attr_regs_access(dev, attr, ®, false);
440 if (ret)
441 return ret;
442 return put_user(reg, uaddr);
443 }
444 }
445
446 return -ENXIO;
447 }
448
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)449 static int vgic_v2_has_attr(struct kvm_device *dev,
450 struct kvm_device_attr *attr)
451 {
452 switch (attr->group) {
453 case KVM_DEV_ARM_VGIC_GRP_ADDR:
454 switch (attr->attr) {
455 case KVM_VGIC_V2_ADDR_TYPE_DIST:
456 case KVM_VGIC_V2_ADDR_TYPE_CPU:
457 return 0;
458 }
459 break;
460 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
461 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
462 return vgic_v2_has_attr_regs(dev, attr);
463 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
464 return 0;
465 case KVM_DEV_ARM_VGIC_GRP_CTRL:
466 switch (attr->attr) {
467 case KVM_DEV_ARM_VGIC_CTRL_INIT:
468 return 0;
469 }
470 }
471 return -ENXIO;
472 }
473
474 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
475 .name = "kvm-arm-vgic-v2",
476 .create = vgic_create,
477 .destroy = vgic_destroy,
478 .set_attr = vgic_v2_set_attr,
479 .get_attr = vgic_v2_get_attr,
480 .has_attr = vgic_v2_has_attr,
481 };
482
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)483 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
484 struct vgic_reg_attr *reg_attr)
485 {
486 unsigned long vgic_mpidr, mpidr_reg;
487
488 /*
489 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
490 * attr might not hold MPIDR. Hence assume vcpu0.
491 */
492 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
493 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
494 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
495
496 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
497 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
498 } else {
499 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
500 }
501
502 if (!reg_attr->vcpu)
503 return -EINVAL;
504
505 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
506
507 return 0;
508 }
509
510 /*
511 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
512 *
513 * @dev: kvm device handle
514 * @attr: kvm device attribute
515 * @reg: address the value is read or written
516 * @is_write: true if userspace is writing a register
517 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u64 * reg,bool is_write)518 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
519 struct kvm_device_attr *attr,
520 u64 *reg, bool is_write)
521 {
522 struct vgic_reg_attr reg_attr;
523 gpa_t addr;
524 struct kvm_vcpu *vcpu;
525 int ret;
526 u32 tmp32;
527
528 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
529 if (ret)
530 return ret;
531
532 vcpu = reg_attr.vcpu;
533 addr = reg_attr.addr;
534
535 mutex_lock(&dev->kvm->lock);
536
537 if (unlikely(!vgic_initialized(dev->kvm))) {
538 ret = -EBUSY;
539 goto out;
540 }
541
542 if (!lock_all_vcpus(dev->kvm)) {
543 ret = -EBUSY;
544 goto out;
545 }
546
547 switch (attr->group) {
548 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
549 if (is_write)
550 tmp32 = *reg;
551
552 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
553 if (!is_write)
554 *reg = tmp32;
555 break;
556 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
557 if (is_write)
558 tmp32 = *reg;
559
560 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
561 if (!is_write)
562 *reg = tmp32;
563 break;
564 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
565 u64 regid;
566
567 regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
568 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
569 regid, reg);
570 break;
571 }
572 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
573 unsigned int info, intid;
574
575 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
576 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
577 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
578 intid = attr->attr &
579 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
580 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
581 intid, reg);
582 } else {
583 ret = -EINVAL;
584 }
585 break;
586 }
587 default:
588 ret = -EINVAL;
589 break;
590 }
591
592 unlock_all_vcpus(dev->kvm);
593 out:
594 mutex_unlock(&dev->kvm->lock);
595 return ret;
596 }
597
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)598 static int vgic_v3_set_attr(struct kvm_device *dev,
599 struct kvm_device_attr *attr)
600 {
601 int ret;
602
603 ret = vgic_set_common_attr(dev, attr);
604 if (ret != -ENXIO)
605 return ret;
606
607 switch (attr->group) {
608 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
609 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
610 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
611 u32 tmp32;
612 u64 reg;
613
614 if (get_user(tmp32, uaddr))
615 return -EFAULT;
616
617 reg = tmp32;
618 return vgic_v3_attr_regs_access(dev, attr, ®, true);
619 }
620 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
621 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
622 u64 reg;
623
624 if (get_user(reg, uaddr))
625 return -EFAULT;
626
627 return vgic_v3_attr_regs_access(dev, attr, ®, true);
628 }
629 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
630 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
631 u64 reg;
632 u32 tmp32;
633
634 if (get_user(tmp32, uaddr))
635 return -EFAULT;
636
637 reg = tmp32;
638 return vgic_v3_attr_regs_access(dev, attr, ®, true);
639 }
640 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
641 int ret;
642
643 switch (attr->attr) {
644 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
645 mutex_lock(&dev->kvm->lock);
646
647 if (!lock_all_vcpus(dev->kvm)) {
648 mutex_unlock(&dev->kvm->lock);
649 return -EBUSY;
650 }
651 ret = vgic_v3_save_pending_tables(dev->kvm);
652 unlock_all_vcpus(dev->kvm);
653 mutex_unlock(&dev->kvm->lock);
654 return ret;
655 }
656 break;
657 }
658 }
659 return -ENXIO;
660 }
661
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)662 static int vgic_v3_get_attr(struct kvm_device *dev,
663 struct kvm_device_attr *attr)
664 {
665 int ret;
666
667 ret = vgic_get_common_attr(dev, attr);
668 if (ret != -ENXIO)
669 return ret;
670
671 switch (attr->group) {
672 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
673 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
674 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
675 u64 reg;
676 u32 tmp32;
677
678 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
679 if (ret)
680 return ret;
681 tmp32 = reg;
682 return put_user(tmp32, uaddr);
683 }
684 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
685 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
686 u64 reg;
687
688 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
689 if (ret)
690 return ret;
691 return put_user(reg, uaddr);
692 }
693 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
694 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
695 u64 reg;
696 u32 tmp32;
697
698 ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
699 if (ret)
700 return ret;
701 tmp32 = reg;
702 return put_user(tmp32, uaddr);
703 }
704 }
705 return -ENXIO;
706 }
707
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)708 static int vgic_v3_has_attr(struct kvm_device *dev,
709 struct kvm_device_attr *attr)
710 {
711 switch (attr->group) {
712 case KVM_DEV_ARM_VGIC_GRP_ADDR:
713 switch (attr->attr) {
714 case KVM_VGIC_V3_ADDR_TYPE_DIST:
715 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
716 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
717 return 0;
718 }
719 break;
720 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
721 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
722 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
723 return vgic_v3_has_attr_regs(dev, attr);
724 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
725 return 0;
726 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
727 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
728 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
729 VGIC_LEVEL_INFO_LINE_LEVEL)
730 return 0;
731 break;
732 }
733 case KVM_DEV_ARM_VGIC_GRP_CTRL:
734 switch (attr->attr) {
735 case KVM_DEV_ARM_VGIC_CTRL_INIT:
736 return 0;
737 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
738 return 0;
739 }
740 }
741 return -ENXIO;
742 }
743
744 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
745 .name = "kvm-arm-vgic-v3",
746 .create = vgic_create,
747 .destroy = vgic_destroy,
748 .set_attr = vgic_v3_set_attr,
749 .get_attr = vgic_v3_get_attr,
750 .has_attr = vgic_v3_has_attr,
751 };
752