1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <linux/uaccess.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/cputype.h>
13 #include "vgic.h"
14
15 /* common helpers */
16
vgic_check_iorange(struct kvm * kvm,phys_addr_t ioaddr,phys_addr_t addr,phys_addr_t alignment,phys_addr_t size)17 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
18 phys_addr_t addr, phys_addr_t alignment,
19 phys_addr_t size)
20 {
21 if (!IS_VGIC_ADDR_UNDEF(ioaddr))
22 return -EEXIST;
23
24 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
25 return -EINVAL;
26
27 if (addr + size < addr)
28 return -EINVAL;
29
30 if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
31 return -E2BIG;
32
33 return 0;
34 }
35
vgic_check_type(struct kvm * kvm,int type_needed)36 static int vgic_check_type(struct kvm *kvm, int type_needed)
37 {
38 if (kvm->arch.vgic.vgic_model != type_needed)
39 return -ENODEV;
40 else
41 return 0;
42 }
43
kvm_set_legacy_vgic_v2_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)44 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
45 {
46 struct vgic_dist *vgic = &kvm->arch.vgic;
47 int r;
48
49 mutex_lock(&kvm->arch.config_lock);
50 switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
51 case KVM_VGIC_V2_ADDR_TYPE_DIST:
52 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
53 if (!r)
54 r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
55 SZ_4K, KVM_VGIC_V2_DIST_SIZE);
56 if (!r)
57 vgic->vgic_dist_base = dev_addr->addr;
58 break;
59 case KVM_VGIC_V2_ADDR_TYPE_CPU:
60 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
61 if (!r)
62 r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
63 SZ_4K, KVM_VGIC_V2_CPU_SIZE);
64 if (!r)
65 vgic->vgic_cpu_base = dev_addr->addr;
66 break;
67 default:
68 r = -ENODEV;
69 }
70
71 mutex_unlock(&kvm->arch.config_lock);
72
73 return r;
74 }
75
76 /**
77 * kvm_vgic_addr - set or get vgic VM base addresses
78 * @kvm: pointer to the vm struct
79 * @attr: pointer to the attribute being retrieved/updated
80 * @write: if true set the address in the VM address space, if false read the
81 * address
82 *
83 * Set or get the vgic base addresses for the distributor and the virtual CPU
84 * interface in the VM physical address space. These addresses are properties
85 * of the emulated core/SoC and therefore user space initially knows this
86 * information.
87 * Check them for sanity (alignment, double assignment). We can't check for
88 * overlapping regions in case of a virtual GICv3 here, since we don't know
89 * the number of VCPUs yet, so we defer this check to map_resources().
90 */
kvm_vgic_addr(struct kvm * kvm,struct kvm_device_attr * attr,bool write)91 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
92 {
93 u64 __user *uaddr = (u64 __user *)attr->addr;
94 struct vgic_dist *vgic = &kvm->arch.vgic;
95 phys_addr_t *addr_ptr, alignment, size;
96 u64 undef_value = VGIC_ADDR_UNDEF;
97 u64 addr;
98 int r;
99
100 /* Reading a redistributor region addr implies getting the index */
101 if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
102 if (get_user(addr, uaddr))
103 return -EFAULT;
104
105 /*
106 * Since we can't hold config_lock while registering the redistributor
107 * iodevs, take the slots_lock immediately.
108 */
109 mutex_lock(&kvm->slots_lock);
110 switch (attr->attr) {
111 case KVM_VGIC_V2_ADDR_TYPE_DIST:
112 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
113 addr_ptr = &vgic->vgic_dist_base;
114 alignment = SZ_4K;
115 size = KVM_VGIC_V2_DIST_SIZE;
116 break;
117 case KVM_VGIC_V2_ADDR_TYPE_CPU:
118 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
119 addr_ptr = &vgic->vgic_cpu_base;
120 alignment = SZ_4K;
121 size = KVM_VGIC_V2_CPU_SIZE;
122 break;
123 case KVM_VGIC_V3_ADDR_TYPE_DIST:
124 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
125 addr_ptr = &vgic->vgic_dist_base;
126 alignment = SZ_64K;
127 size = KVM_VGIC_V3_DIST_SIZE;
128 break;
129 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
130 struct vgic_redist_region *rdreg;
131
132 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
133 if (r)
134 break;
135 if (write) {
136 r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
137 goto out;
138 }
139 rdreg = list_first_entry_or_null(&vgic->rd_regions,
140 struct vgic_redist_region, list);
141 if (!rdreg)
142 addr_ptr = &undef_value;
143 else
144 addr_ptr = &rdreg->base;
145 break;
146 }
147 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
148 {
149 struct vgic_redist_region *rdreg;
150 u8 index;
151
152 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
153 if (r)
154 break;
155
156 index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
157
158 if (write) {
159 gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
160 u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
161 u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
162
163 if (!count || flags)
164 r = -EINVAL;
165 else
166 r = vgic_v3_set_redist_base(kvm, index,
167 base, count);
168 goto out;
169 }
170
171 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
172 if (!rdreg) {
173 r = -ENOENT;
174 goto out;
175 }
176
177 addr = index;
178 addr |= rdreg->base;
179 addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
180 goto out;
181 }
182 default:
183 r = -ENODEV;
184 }
185
186 if (r)
187 goto out;
188
189 mutex_lock(&kvm->arch.config_lock);
190 if (write) {
191 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
192 if (!r)
193 *addr_ptr = addr;
194 } else {
195 addr = *addr_ptr;
196 }
197 mutex_unlock(&kvm->arch.config_lock);
198
199 out:
200 mutex_unlock(&kvm->slots_lock);
201
202 if (!r && !write)
203 r = put_user(addr, uaddr);
204
205 return r;
206 }
207
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)208 static int vgic_set_common_attr(struct kvm_device *dev,
209 struct kvm_device_attr *attr)
210 {
211 int r;
212
213 switch (attr->group) {
214 case KVM_DEV_ARM_VGIC_GRP_ADDR:
215 r = kvm_vgic_addr(dev->kvm, attr, true);
216 return (r == -ENODEV) ? -ENXIO : r;
217 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
218 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
219 u32 val;
220 int ret = 0;
221
222 if (get_user(val, uaddr))
223 return -EFAULT;
224
225 /*
226 * We require:
227 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
228 * - at most 1024 interrupts
229 * - a multiple of 32 interrupts
230 */
231 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
232 val > VGIC_MAX_RESERVED ||
233 (val & 31))
234 return -EINVAL;
235
236 mutex_lock(&dev->kvm->arch.config_lock);
237
238 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
239 ret = -EBUSY;
240 else
241 dev->kvm->arch.vgic.nr_spis =
242 val - VGIC_NR_PRIVATE_IRQS;
243
244 mutex_unlock(&dev->kvm->arch.config_lock);
245
246 return ret;
247 }
248 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
249 switch (attr->attr) {
250 case KVM_DEV_ARM_VGIC_CTRL_INIT:
251 mutex_lock(&dev->kvm->arch.config_lock);
252 r = vgic_init(dev->kvm);
253 mutex_unlock(&dev->kvm->arch.config_lock);
254 return r;
255 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
256 /*
257 * OK, this one isn't common at all, but we
258 * want to handle all control group attributes
259 * in a single place.
260 */
261 if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
262 return -ENXIO;
263 mutex_lock(&dev->kvm->lock);
264
265 if (!lock_all_vcpus(dev->kvm)) {
266 mutex_unlock(&dev->kvm->lock);
267 return -EBUSY;
268 }
269
270 mutex_lock(&dev->kvm->arch.config_lock);
271 r = vgic_v3_save_pending_tables(dev->kvm);
272 mutex_unlock(&dev->kvm->arch.config_lock);
273 unlock_all_vcpus(dev->kvm);
274 mutex_unlock(&dev->kvm->lock);
275 return r;
276 }
277 break;
278 }
279 }
280
281 return -ENXIO;
282 }
283
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)284 static int vgic_get_common_attr(struct kvm_device *dev,
285 struct kvm_device_attr *attr)
286 {
287 int r = -ENXIO;
288
289 switch (attr->group) {
290 case KVM_DEV_ARM_VGIC_GRP_ADDR:
291 r = kvm_vgic_addr(dev->kvm, attr, false);
292 return (r == -ENODEV) ? -ENXIO : r;
293 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
294 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
295
296 r = put_user(dev->kvm->arch.vgic.nr_spis +
297 VGIC_NR_PRIVATE_IRQS, uaddr);
298 break;
299 }
300 }
301
302 return r;
303 }
304
vgic_create(struct kvm_device * dev,u32 type)305 static int vgic_create(struct kvm_device *dev, u32 type)
306 {
307 return kvm_vgic_create(dev->kvm, type);
308 }
309
vgic_destroy(struct kvm_device * dev)310 static void vgic_destroy(struct kvm_device *dev)
311 {
312 kfree(dev);
313 }
314
kvm_register_vgic_device(unsigned long type)315 int kvm_register_vgic_device(unsigned long type)
316 {
317 int ret = -ENODEV;
318
319 switch (type) {
320 case KVM_DEV_TYPE_ARM_VGIC_V2:
321 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
322 KVM_DEV_TYPE_ARM_VGIC_V2);
323 break;
324 case KVM_DEV_TYPE_ARM_VGIC_V3:
325 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
326 KVM_DEV_TYPE_ARM_VGIC_V3);
327
328 if (ret)
329 break;
330 ret = kvm_vgic_register_its_device();
331 break;
332 }
333
334 return ret;
335 }
336
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)337 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
338 struct vgic_reg_attr *reg_attr)
339 {
340 int cpuid;
341
342 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
343 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
344
345 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
346 return -EINVAL;
347
348 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
349 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
350
351 return 0;
352 }
353
354 /* unlocks vcpus from @vcpu_lock_idx and smaller */
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)355 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
356 {
357 struct kvm_vcpu *tmp_vcpu;
358
359 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
360 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
361 mutex_unlock(&tmp_vcpu->mutex);
362 }
363 }
364
unlock_all_vcpus(struct kvm * kvm)365 void unlock_all_vcpus(struct kvm *kvm)
366 {
367 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
368 }
369
370 /* Returns true if all vcpus were locked, false otherwise */
lock_all_vcpus(struct kvm * kvm)371 bool lock_all_vcpus(struct kvm *kvm)
372 {
373 struct kvm_vcpu *tmp_vcpu;
374 unsigned long c;
375
376 /*
377 * Any time a vcpu is run, vcpu_load is called which tries to grab the
378 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
379 * that no other VCPUs are run and fiddle with the vgic state while we
380 * access it.
381 */
382 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
383 if (!mutex_trylock(&tmp_vcpu->mutex)) {
384 unlock_vcpus(kvm, c - 1);
385 return false;
386 }
387 }
388
389 return true;
390 }
391
392 /**
393 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
394 *
395 * @dev: kvm device handle
396 * @attr: kvm device attribute
397 * @is_write: true if userspace is writing a register
398 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)399 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
400 struct kvm_device_attr *attr,
401 bool is_write)
402 {
403 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
404 struct vgic_reg_attr reg_attr;
405 gpa_t addr;
406 struct kvm_vcpu *vcpu;
407 int ret;
408 u32 val;
409
410 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
411 if (ret)
412 return ret;
413
414 vcpu = reg_attr.vcpu;
415 addr = reg_attr.addr;
416
417 if (is_write)
418 if (get_user(val, uaddr))
419 return -EFAULT;
420
421 mutex_lock(&dev->kvm->lock);
422
423 if (!lock_all_vcpus(dev->kvm)) {
424 mutex_unlock(&dev->kvm->lock);
425 return -EBUSY;
426 }
427
428 mutex_lock(&dev->kvm->arch.config_lock);
429
430 ret = vgic_init(dev->kvm);
431 if (ret)
432 goto out;
433
434 switch (attr->group) {
435 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
436 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
437 break;
438 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
439 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
440 break;
441 default:
442 ret = -EINVAL;
443 break;
444 }
445
446 out:
447 mutex_unlock(&dev->kvm->arch.config_lock);
448 unlock_all_vcpus(dev->kvm);
449 mutex_unlock(&dev->kvm->lock);
450
451 if (!ret && !is_write)
452 ret = put_user(val, uaddr);
453
454 return ret;
455 }
456
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)457 static int vgic_v2_set_attr(struct kvm_device *dev,
458 struct kvm_device_attr *attr)
459 {
460 switch (attr->group) {
461 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
462 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
463 return vgic_v2_attr_regs_access(dev, attr, true);
464 default:
465 return vgic_set_common_attr(dev, attr);
466 }
467 }
468
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)469 static int vgic_v2_get_attr(struct kvm_device *dev,
470 struct kvm_device_attr *attr)
471 {
472 switch (attr->group) {
473 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
474 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
475 return vgic_v2_attr_regs_access(dev, attr, false);
476 default:
477 return vgic_get_common_attr(dev, attr);
478 }
479 }
480
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)481 static int vgic_v2_has_attr(struct kvm_device *dev,
482 struct kvm_device_attr *attr)
483 {
484 switch (attr->group) {
485 case KVM_DEV_ARM_VGIC_GRP_ADDR:
486 switch (attr->attr) {
487 case KVM_VGIC_V2_ADDR_TYPE_DIST:
488 case KVM_VGIC_V2_ADDR_TYPE_CPU:
489 return 0;
490 }
491 break;
492 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
493 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
494 return vgic_v2_has_attr_regs(dev, attr);
495 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
496 return 0;
497 case KVM_DEV_ARM_VGIC_GRP_CTRL:
498 switch (attr->attr) {
499 case KVM_DEV_ARM_VGIC_CTRL_INIT:
500 return 0;
501 }
502 }
503 return -ENXIO;
504 }
505
506 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
507 .name = "kvm-arm-vgic-v2",
508 .create = vgic_create,
509 .destroy = vgic_destroy,
510 .set_attr = vgic_v2_set_attr,
511 .get_attr = vgic_v2_get_attr,
512 .has_attr = vgic_v2_has_attr,
513 };
514
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)515 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
516 struct vgic_reg_attr *reg_attr)
517 {
518 unsigned long vgic_mpidr, mpidr_reg;
519
520 /*
521 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
522 * attr might not hold MPIDR. Hence assume vcpu0.
523 */
524 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
525 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
526 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
527
528 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
529 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
530 } else {
531 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
532 }
533
534 if (!reg_attr->vcpu)
535 return -EINVAL;
536
537 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
538
539 return 0;
540 }
541
542 /*
543 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
544 *
545 * @dev: kvm device handle
546 * @attr: kvm device attribute
547 * @is_write: true if userspace is writing a register
548 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)549 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
550 struct kvm_device_attr *attr,
551 bool is_write)
552 {
553 struct vgic_reg_attr reg_attr;
554 gpa_t addr;
555 struct kvm_vcpu *vcpu;
556 bool uaccess;
557 u32 val;
558 int ret;
559
560 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
561 if (ret)
562 return ret;
563
564 vcpu = reg_attr.vcpu;
565 addr = reg_attr.addr;
566
567 switch (attr->group) {
568 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
569 /* Sysregs uaccess is performed by the sysreg handling code */
570 uaccess = false;
571 break;
572 default:
573 uaccess = true;
574 }
575
576 if (uaccess && is_write) {
577 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
578 if (get_user(val, uaddr))
579 return -EFAULT;
580 }
581
582 mutex_lock(&dev->kvm->lock);
583
584 if (!lock_all_vcpus(dev->kvm)) {
585 mutex_unlock(&dev->kvm->lock);
586 return -EBUSY;
587 }
588
589 mutex_lock(&dev->kvm->arch.config_lock);
590
591 if (unlikely(!vgic_initialized(dev->kvm))) {
592 ret = -EBUSY;
593 goto out;
594 }
595
596 switch (attr->group) {
597 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
598 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
599 break;
600 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
601 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
602 break;
603 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
604 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
605 break;
606 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
607 unsigned int info, intid;
608
609 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
610 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
611 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
612 intid = attr->attr &
613 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
614 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
615 intid, &val);
616 } else {
617 ret = -EINVAL;
618 }
619 break;
620 }
621 default:
622 ret = -EINVAL;
623 break;
624 }
625
626 out:
627 mutex_unlock(&dev->kvm->arch.config_lock);
628 unlock_all_vcpus(dev->kvm);
629 mutex_unlock(&dev->kvm->lock);
630
631 if (!ret && uaccess && !is_write) {
632 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
633 ret = put_user(val, uaddr);
634 }
635
636 return ret;
637 }
638
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)639 static int vgic_v3_set_attr(struct kvm_device *dev,
640 struct kvm_device_attr *attr)
641 {
642 switch (attr->group) {
643 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
644 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
645 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
646 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
647 return vgic_v3_attr_regs_access(dev, attr, true);
648 default:
649 return vgic_set_common_attr(dev, attr);
650 }
651 }
652
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)653 static int vgic_v3_get_attr(struct kvm_device *dev,
654 struct kvm_device_attr *attr)
655 {
656 switch (attr->group) {
657 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
658 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
659 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
660 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
661 return vgic_v3_attr_regs_access(dev, attr, false);
662 default:
663 return vgic_get_common_attr(dev, attr);
664 }
665 }
666
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)667 static int vgic_v3_has_attr(struct kvm_device *dev,
668 struct kvm_device_attr *attr)
669 {
670 switch (attr->group) {
671 case KVM_DEV_ARM_VGIC_GRP_ADDR:
672 switch (attr->attr) {
673 case KVM_VGIC_V3_ADDR_TYPE_DIST:
674 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
675 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
676 return 0;
677 }
678 break;
679 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
680 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
681 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
682 return vgic_v3_has_attr_regs(dev, attr);
683 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
684 return 0;
685 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
686 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
687 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
688 VGIC_LEVEL_INFO_LINE_LEVEL)
689 return 0;
690 break;
691 }
692 case KVM_DEV_ARM_VGIC_GRP_CTRL:
693 switch (attr->attr) {
694 case KVM_DEV_ARM_VGIC_CTRL_INIT:
695 return 0;
696 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
697 return 0;
698 }
699 }
700 return -ENXIO;
701 }
702
703 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
704 .name = "kvm-arm-vgic-v3",
705 .create = vgic_create,
706 .destroy = vgic_destroy,
707 .set_attr = vgic_v3_set_attr,
708 .get_attr = vgic_v3_get_attr,
709 .has_attr = vgic_v3_has_attr,
710 };
711