1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGICv3 MMIO handling functions
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/irqchip/arm-gic-v3.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <kvm/iodev.h>
12 #include <kvm/arm_vgic.h>
13
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17
18 #include "vgic.h"
19 #include "vgic-mmio.h"
20
21 /* extract @num bytes at @offset bytes offset in data */
extract_bytes(u64 data,unsigned int offset,unsigned int num)22 unsigned long extract_bytes(u64 data, unsigned int offset,
23 unsigned int num)
24 {
25 return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
26 }
27
28 /* allows updates of any half of a 64-bit register (or the whole thing) */
update_64bit_reg(u64 reg,unsigned int offset,unsigned int len,unsigned long val)29 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
30 unsigned long val)
31 {
32 int lower = (offset & 4) * 8;
33 int upper = lower + 8 * len - 1;
34
35 reg &= ~GENMASK_ULL(upper, lower);
36 val &= GENMASK_ULL(len * 8 - 1, 0);
37
38 return reg | ((u64)val << lower);
39 }
40
vgic_has_its(struct kvm * kvm)41 bool vgic_has_its(struct kvm *kvm)
42 {
43 struct vgic_dist *dist = &kvm->arch.vgic;
44
45 if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
46 return false;
47
48 return dist->has_its;
49 }
50
vgic_supports_direct_msis(struct kvm * kvm)51 bool vgic_supports_direct_msis(struct kvm *kvm)
52 {
53 return (kvm_vgic_global_state.has_gicv4_1 ||
54 (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
55 }
56
57 /*
58 * The Revision field in the IIDR have the following meanings:
59 *
60 * Revision 2: Interrupt groups are guest-configurable and signaled using
61 * their configured groups.
62 */
63
vgic_mmio_read_v3_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)64 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
65 gpa_t addr, unsigned int len)
66 {
67 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
68 u32 value = 0;
69
70 switch (addr & 0x0c) {
71 case GICD_CTLR:
72 if (vgic->enabled)
73 value |= GICD_CTLR_ENABLE_SS_G1;
74 value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
75 if (vgic->nassgireq)
76 value |= GICD_CTLR_nASSGIreq;
77 break;
78 case GICD_TYPER:
79 value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
80 value = (value >> 5) - 1;
81 if (vgic_has_its(vcpu->kvm)) {
82 value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
83 value |= GICD_TYPER_LPIS;
84 } else {
85 value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
86 }
87 break;
88 case GICD_TYPER2:
89 if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi())
90 value = GICD_TYPER2_nASSGIcap;
91 break;
92 case GICD_IIDR:
93 value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
94 (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
95 (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
96 break;
97 default:
98 return 0;
99 }
100
101 return value;
102 }
103
vgic_mmio_write_v3_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)104 static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
105 gpa_t addr, unsigned int len,
106 unsigned long val)
107 {
108 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
109
110 switch (addr & 0x0c) {
111 case GICD_CTLR: {
112 bool was_enabled, is_hwsgi;
113
114 mutex_lock(&vcpu->kvm->lock);
115
116 was_enabled = dist->enabled;
117 is_hwsgi = dist->nassgireq;
118
119 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
120
121 /* Not a GICv4.1? No HW SGIs */
122 if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi())
123 val &= ~GICD_CTLR_nASSGIreq;
124
125 /* Dist stays enabled? nASSGIreq is RO */
126 if (was_enabled && dist->enabled) {
127 val &= ~GICD_CTLR_nASSGIreq;
128 val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
129 }
130
131 /* Switching HW SGIs? */
132 dist->nassgireq = val & GICD_CTLR_nASSGIreq;
133 if (is_hwsgi != dist->nassgireq)
134 vgic_v4_configure_vsgis(vcpu->kvm);
135
136 if (kvm_vgic_global_state.has_gicv4_1 &&
137 was_enabled != dist->enabled)
138 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
139 else if (!was_enabled && dist->enabled)
140 vgic_kick_vcpus(vcpu->kvm);
141
142 mutex_unlock(&vcpu->kvm->lock);
143 break;
144 }
145 case GICD_TYPER:
146 case GICD_TYPER2:
147 case GICD_IIDR:
148 /* This is at best for documentation purposes... */
149 return;
150 }
151 }
152
vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)153 static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
154 gpa_t addr, unsigned int len,
155 unsigned long val)
156 {
157 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
158
159 switch (addr & 0x0c) {
160 case GICD_TYPER2:
161 case GICD_IIDR:
162 if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
163 return -EINVAL;
164 return 0;
165 case GICD_CTLR:
166 /* Not a GICv4.1? No HW SGIs */
167 if (!kvm_vgic_global_state.has_gicv4_1)
168 val &= ~GICD_CTLR_nASSGIreq;
169
170 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
171 dist->nassgireq = val & GICD_CTLR_nASSGIreq;
172 return 0;
173 }
174
175 vgic_mmio_write_v3_misc(vcpu, addr, len, val);
176 return 0;
177 }
178
vgic_mmio_read_irouter(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)179 static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
180 gpa_t addr, unsigned int len)
181 {
182 int intid = VGIC_ADDR_TO_INTID(addr, 64);
183 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
184 unsigned long ret = 0;
185
186 if (!irq)
187 return 0;
188
189 /* The upper word is RAZ for us. */
190 if (!(addr & 4))
191 ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
192
193 vgic_put_irq(vcpu->kvm, irq);
194 return ret;
195 }
196
vgic_mmio_write_irouter(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)197 static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
198 gpa_t addr, unsigned int len,
199 unsigned long val)
200 {
201 int intid = VGIC_ADDR_TO_INTID(addr, 64);
202 struct vgic_irq *irq;
203 unsigned long flags;
204
205 /* The upper word is WI for us since we don't implement Aff3. */
206 if (addr & 4)
207 return;
208
209 irq = vgic_get_irq(vcpu->kvm, NULL, intid);
210
211 if (!irq)
212 return;
213
214 raw_spin_lock_irqsave(&irq->irq_lock, flags);
215
216 /* We only care about and preserve Aff0, Aff1 and Aff2. */
217 irq->mpidr = val & GENMASK(23, 0);
218 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
219
220 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
221 vgic_put_irq(vcpu->kvm, irq);
222 }
223
vgic_mmio_read_v3r_ctlr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)224 static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
225 gpa_t addr, unsigned int len)
226 {
227 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
228
229 return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
230 }
231
232
vgic_mmio_write_v3r_ctlr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)233 static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
234 gpa_t addr, unsigned int len,
235 unsigned long val)
236 {
237 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
238 bool was_enabled = vgic_cpu->lpis_enabled;
239
240 if (!vgic_has_its(vcpu->kvm))
241 return;
242
243 vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
244
245 if (was_enabled && !vgic_cpu->lpis_enabled) {
246 vgic_flush_pending_lpis(vcpu);
247 vgic_its_invalidate_cache(vcpu->kvm);
248 }
249
250 if (!was_enabled && vgic_cpu->lpis_enabled)
251 vgic_enable_lpis(vcpu);
252 }
253
vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu * vcpu)254 static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
255 {
256 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
257 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
258 struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
259
260 if (!rdreg)
261 return false;
262
263 if (vgic_cpu->rdreg_index < rdreg->free_index - 1) {
264 return false;
265 } else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) {
266 struct list_head *rd_regions = &vgic->rd_regions;
267 gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
268
269 /*
270 * the rdist is the last one of the redist region,
271 * check whether there is no other contiguous rdist region
272 */
273 list_for_each_entry(iter, rd_regions, list) {
274 if (iter->base == end && iter->free_index > 0)
275 return false;
276 }
277 }
278 return true;
279 }
280
vgic_mmio_read_v3r_typer(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)281 static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
282 gpa_t addr, unsigned int len)
283 {
284 unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
285 int target_vcpu_id = vcpu->vcpu_id;
286 u64 value;
287
288 value = (u64)(mpidr & GENMASK(23, 0)) << 32;
289 value |= ((target_vcpu_id & 0xffff) << 8);
290
291 if (vgic_has_its(vcpu->kvm))
292 value |= GICR_TYPER_PLPIS;
293
294 if (vgic_mmio_vcpu_rdist_is_last(vcpu))
295 value |= GICR_TYPER_LAST;
296
297 return extract_bytes(value, addr & 7, len);
298 }
299
vgic_mmio_read_v3r_iidr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)300 static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
301 gpa_t addr, unsigned int len)
302 {
303 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
304 }
305
vgic_mmio_read_v3_idregs(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)306 static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
307 gpa_t addr, unsigned int len)
308 {
309 switch (addr & 0xffff) {
310 case GICD_PIDR2:
311 /* report a GICv3 compliant implementation */
312 return 0x3b;
313 }
314
315 return 0;
316 }
317
vgic_v3_uaccess_read_pending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)318 static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
319 gpa_t addr, unsigned int len)
320 {
321 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
322 u32 value = 0;
323 int i;
324
325 /*
326 * pending state of interrupt is latched in pending_latch variable.
327 * Userspace will save and restore pending state and line_level
328 * separately.
329 * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
330 * for handling of ISPENDR and ICPENDR.
331 */
332 for (i = 0; i < len * 8; i++) {
333 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
334 bool state = irq->pending_latch;
335
336 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
337 int err;
338
339 err = irq_get_irqchip_state(irq->host_irq,
340 IRQCHIP_STATE_PENDING,
341 &state);
342 WARN_ON(err);
343 }
344
345 if (state)
346 value |= (1U << i);
347
348 vgic_put_irq(vcpu->kvm, irq);
349 }
350
351 return value;
352 }
353
vgic_v3_uaccess_write_pending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)354 static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
355 gpa_t addr, unsigned int len,
356 unsigned long val)
357 {
358 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
359 int i;
360 unsigned long flags;
361
362 for (i = 0; i < len * 8; i++) {
363 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
364
365 raw_spin_lock_irqsave(&irq->irq_lock, flags);
366
367 /*
368 * pending_latch is set irrespective of irq type
369 * (level or edge) to avoid dependency that VM should
370 * restore irq config before pending info.
371 */
372 irq->pending_latch = test_bit(i, &val);
373
374 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
375 irq_set_irqchip_state(irq->host_irq,
376 IRQCHIP_STATE_PENDING,
377 irq->pending_latch);
378 irq->pending_latch = false;
379 }
380
381 if (irq->pending_latch)
382 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
383 else
384 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
385
386 vgic_put_irq(vcpu->kvm, irq);
387 }
388
389 return 0;
390 }
391
392 /* We want to avoid outer shareable. */
vgic_sanitise_shareability(u64 field)393 u64 vgic_sanitise_shareability(u64 field)
394 {
395 switch (field) {
396 case GIC_BASER_OuterShareable:
397 return GIC_BASER_InnerShareable;
398 default:
399 return field;
400 }
401 }
402
403 /* Avoid any inner non-cacheable mapping. */
vgic_sanitise_inner_cacheability(u64 field)404 u64 vgic_sanitise_inner_cacheability(u64 field)
405 {
406 switch (field) {
407 case GIC_BASER_CACHE_nCnB:
408 case GIC_BASER_CACHE_nC:
409 return GIC_BASER_CACHE_RaWb;
410 default:
411 return field;
412 }
413 }
414
415 /* Non-cacheable or same-as-inner are OK. */
vgic_sanitise_outer_cacheability(u64 field)416 u64 vgic_sanitise_outer_cacheability(u64 field)
417 {
418 switch (field) {
419 case GIC_BASER_CACHE_SameAsInner:
420 case GIC_BASER_CACHE_nC:
421 return field;
422 default:
423 return GIC_BASER_CACHE_SameAsInner;
424 }
425 }
426
vgic_sanitise_field(u64 reg,u64 field_mask,int field_shift,u64 (* sanitise_fn)(u64))427 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
428 u64 (*sanitise_fn)(u64))
429 {
430 u64 field = (reg & field_mask) >> field_shift;
431
432 field = sanitise_fn(field) << field_shift;
433 return (reg & ~field_mask) | field;
434 }
435
436 #define PROPBASER_RES0_MASK \
437 (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
438 #define PENDBASER_RES0_MASK \
439 (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
440 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
441
vgic_sanitise_pendbaser(u64 reg)442 static u64 vgic_sanitise_pendbaser(u64 reg)
443 {
444 reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
445 GICR_PENDBASER_SHAREABILITY_SHIFT,
446 vgic_sanitise_shareability);
447 reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
448 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
449 vgic_sanitise_inner_cacheability);
450 reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
451 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
452 vgic_sanitise_outer_cacheability);
453
454 reg &= ~PENDBASER_RES0_MASK;
455
456 return reg;
457 }
458
vgic_sanitise_propbaser(u64 reg)459 static u64 vgic_sanitise_propbaser(u64 reg)
460 {
461 reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
462 GICR_PROPBASER_SHAREABILITY_SHIFT,
463 vgic_sanitise_shareability);
464 reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
465 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
466 vgic_sanitise_inner_cacheability);
467 reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
468 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
469 vgic_sanitise_outer_cacheability);
470
471 reg &= ~PROPBASER_RES0_MASK;
472 return reg;
473 }
474
vgic_mmio_read_propbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)475 static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
476 gpa_t addr, unsigned int len)
477 {
478 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
479
480 return extract_bytes(dist->propbaser, addr & 7, len);
481 }
482
vgic_mmio_write_propbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)483 static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
484 gpa_t addr, unsigned int len,
485 unsigned long val)
486 {
487 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
488 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
489 u64 old_propbaser, propbaser;
490
491 /* Storing a value with LPIs already enabled is undefined */
492 if (vgic_cpu->lpis_enabled)
493 return;
494
495 do {
496 old_propbaser = READ_ONCE(dist->propbaser);
497 propbaser = old_propbaser;
498 propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
499 propbaser = vgic_sanitise_propbaser(propbaser);
500 } while (cmpxchg64(&dist->propbaser, old_propbaser,
501 propbaser) != old_propbaser);
502 }
503
vgic_mmio_read_pendbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)504 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
505 gpa_t addr, unsigned int len)
506 {
507 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
508 u64 value = vgic_cpu->pendbaser;
509
510 value &= ~GICR_PENDBASER_PTZ;
511
512 return extract_bytes(value, addr & 7, len);
513 }
514
vgic_mmio_write_pendbase(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)515 static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
516 gpa_t addr, unsigned int len,
517 unsigned long val)
518 {
519 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
520 u64 old_pendbaser, pendbaser;
521
522 /* Storing a value with LPIs already enabled is undefined */
523 if (vgic_cpu->lpis_enabled)
524 return;
525
526 do {
527 old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
528 pendbaser = old_pendbaser;
529 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
530 pendbaser = vgic_sanitise_pendbaser(pendbaser);
531 } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
532 pendbaser) != old_pendbaser);
533 }
534
535 /*
536 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
537 * redistributors, while SPIs are covered by registers in the distributor
538 * block. Trying to set private IRQs in this block gets ignored.
539 * We take some special care here to fix the calculation of the register
540 * offset.
541 */
542 #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
543 { \
544 .reg_offset = off, \
545 .bits_per_irq = bpi, \
546 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
547 .access_flags = acc, \
548 .read = vgic_mmio_read_raz, \
549 .write = vgic_mmio_write_wi, \
550 }, { \
551 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
552 .bits_per_irq = bpi, \
553 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
554 .access_flags = acc, \
555 .read = rd, \
556 .write = wr, \
557 .uaccess_read = ur, \
558 .uaccess_write = uw, \
559 }
560
561 static const struct vgic_register_region vgic_v3_dist_registers[] = {
562 REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR,
563 vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc,
564 NULL, vgic_mmio_uaccess_write_v3_misc,
565 16, VGIC_ACCESS_32bit),
566 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
567 vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
568 VGIC_ACCESS_32bit),
569 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
570 vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
571 VGIC_ACCESS_32bit),
572 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
573 vgic_mmio_read_enable, vgic_mmio_write_senable,
574 NULL, vgic_uaccess_write_senable, 1,
575 VGIC_ACCESS_32bit),
576 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
577 vgic_mmio_read_enable, vgic_mmio_write_cenable,
578 NULL, vgic_uaccess_write_cenable, 1,
579 VGIC_ACCESS_32bit),
580 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
581 vgic_mmio_read_pending, vgic_mmio_write_spending,
582 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
583 VGIC_ACCESS_32bit),
584 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
585 vgic_mmio_read_pending, vgic_mmio_write_cpending,
586 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 1,
587 VGIC_ACCESS_32bit),
588 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
589 vgic_mmio_read_active, vgic_mmio_write_sactive,
590 vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
591 VGIC_ACCESS_32bit),
592 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
593 vgic_mmio_read_active, vgic_mmio_write_cactive,
594 vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
595 1, VGIC_ACCESS_32bit),
596 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
597 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
598 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
599 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
600 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
601 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
602 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
603 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
604 VGIC_ACCESS_32bit),
605 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
606 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
607 VGIC_ACCESS_32bit),
608 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
609 vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
610 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
611 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
612 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
613 VGIC_ACCESS_32bit),
614 };
615
616 static const struct vgic_register_region vgic_v3_rd_registers[] = {
617 /* RD_base registers */
618 REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
619 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
620 VGIC_ACCESS_32bit),
621 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
622 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
623 VGIC_ACCESS_32bit),
624 REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
625 vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
626 VGIC_ACCESS_32bit),
627 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
628 vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
629 NULL, vgic_mmio_uaccess_write_wi, 8,
630 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
631 REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
632 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
633 VGIC_ACCESS_32bit),
634 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
635 vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
636 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
637 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
638 vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
639 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
640 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
641 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
642 VGIC_ACCESS_32bit),
643 /* SGI_base registers */
644 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
645 vgic_mmio_read_group, vgic_mmio_write_group, 4,
646 VGIC_ACCESS_32bit),
647 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
648 vgic_mmio_read_enable, vgic_mmio_write_senable,
649 NULL, vgic_uaccess_write_senable, 4,
650 VGIC_ACCESS_32bit),
651 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
652 vgic_mmio_read_enable, vgic_mmio_write_cenable,
653 NULL, vgic_uaccess_write_cenable, 4,
654 VGIC_ACCESS_32bit),
655 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
656 vgic_mmio_read_pending, vgic_mmio_write_spending,
657 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
658 VGIC_ACCESS_32bit),
659 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0,
660 vgic_mmio_read_pending, vgic_mmio_write_cpending,
661 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4,
662 VGIC_ACCESS_32bit),
663 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
664 vgic_mmio_read_active, vgic_mmio_write_sactive,
665 vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
666 VGIC_ACCESS_32bit),
667 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
668 vgic_mmio_read_active, vgic_mmio_write_cactive,
669 vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
670 VGIC_ACCESS_32bit),
671 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
672 vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
673 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
674 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0,
675 vgic_mmio_read_config, vgic_mmio_write_config, 8,
676 VGIC_ACCESS_32bit),
677 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0,
678 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
679 VGIC_ACCESS_32bit),
680 REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR,
681 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
682 VGIC_ACCESS_32bit),
683 };
684
vgic_v3_init_dist_iodev(struct vgic_io_device * dev)685 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
686 {
687 dev->regions = vgic_v3_dist_registers;
688 dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
689
690 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
691
692 return SZ_64K;
693 }
694
695 /**
696 * vgic_register_redist_iodev - register a single redist iodev
697 * @vcpu: The VCPU to which the redistributor belongs
698 *
699 * Register a KVM iodev for this VCPU's redistributor using the address
700 * provided.
701 *
702 * Return 0 on success, -ERRNO otherwise.
703 */
vgic_register_redist_iodev(struct kvm_vcpu * vcpu)704 int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
705 {
706 struct kvm *kvm = vcpu->kvm;
707 struct vgic_dist *vgic = &kvm->arch.vgic;
708 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
709 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
710 struct vgic_redist_region *rdreg;
711 gpa_t rd_base;
712 int ret;
713
714 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
715 return 0;
716
717 /*
718 * We may be creating VCPUs before having set the base address for the
719 * redistributor region, in which case we will come back to this
720 * function for all VCPUs when the base address is set. Just return
721 * without doing any work for now.
722 */
723 rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
724 if (!rdreg)
725 return 0;
726
727 if (!vgic_v3_check_base(kvm))
728 return -EINVAL;
729
730 vgic_cpu->rdreg = rdreg;
731 vgic_cpu->rdreg_index = rdreg->free_index;
732
733 rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
734
735 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
736 rd_dev->base_addr = rd_base;
737 rd_dev->iodev_type = IODEV_REDIST;
738 rd_dev->regions = vgic_v3_rd_registers;
739 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
740 rd_dev->redist_vcpu = vcpu;
741
742 mutex_lock(&kvm->slots_lock);
743 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
744 2 * SZ_64K, &rd_dev->dev);
745 mutex_unlock(&kvm->slots_lock);
746
747 if (ret)
748 return ret;
749
750 rdreg->free_index++;
751 return 0;
752 }
753
vgic_unregister_redist_iodev(struct kvm_vcpu * vcpu)754 void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
755 {
756 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
757
758 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
759 }
760
vgic_register_all_redist_iodevs(struct kvm * kvm)761 static int vgic_register_all_redist_iodevs(struct kvm *kvm)
762 {
763 struct kvm_vcpu *vcpu;
764 int c, ret = 0;
765
766 kvm_for_each_vcpu(c, vcpu, kvm) {
767 ret = vgic_register_redist_iodev(vcpu);
768 if (ret)
769 break;
770 }
771
772 if (ret) {
773 /* The current c failed, so iterate over the previous ones. */
774 int i;
775
776 mutex_lock(&kvm->slots_lock);
777 for (i = 0; i < c; i++) {
778 vcpu = kvm_get_vcpu(kvm, i);
779 vgic_unregister_redist_iodev(vcpu);
780 }
781 mutex_unlock(&kvm->slots_lock);
782 }
783
784 return ret;
785 }
786
787 /**
788 * vgic_v3_alloc_redist_region - Allocate a new redistributor region
789 *
790 * Performs various checks before inserting the rdist region in the list.
791 * Those tests depend on whether the size of the rdist region is known
792 * (ie. count != 0). The list is sorted by rdist region index.
793 *
794 * @kvm: kvm handle
795 * @index: redist region index
796 * @base: base of the new rdist region
797 * @count: number of redistributors the region is made of (0 in the old style
798 * single region, whose size is induced from the number of vcpus)
799 *
800 * Return 0 on success, < 0 otherwise
801 */
vgic_v3_alloc_redist_region(struct kvm * kvm,uint32_t index,gpa_t base,uint32_t count)802 static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
803 gpa_t base, uint32_t count)
804 {
805 struct vgic_dist *d = &kvm->arch.vgic;
806 struct vgic_redist_region *rdreg;
807 struct list_head *rd_regions = &d->rd_regions;
808 int nr_vcpus = atomic_read(&kvm->online_vcpus);
809 size_t size = count ? count * KVM_VGIC_V3_REDIST_SIZE
810 : nr_vcpus * KVM_VGIC_V3_REDIST_SIZE;
811 int ret;
812
813 /* cross the end of memory ? */
814 if (base + size < base)
815 return -EINVAL;
816
817 if (list_empty(rd_regions)) {
818 if (index != 0)
819 return -EINVAL;
820 } else {
821 rdreg = list_last_entry(rd_regions,
822 struct vgic_redist_region, list);
823
824 /* Don't mix single region and discrete redist regions */
825 if (!count && rdreg->count)
826 return -EINVAL;
827
828 if (!count)
829 return -EEXIST;
830
831 if (index != rdreg->index + 1)
832 return -EINVAL;
833 }
834
835 /*
836 * For legacy single-region redistributor regions (!count),
837 * check that the redistributor region does not overlap with the
838 * distributor's address space.
839 */
840 if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
841 vgic_dist_overlap(kvm, base, size))
842 return -EINVAL;
843
844 /* collision with any other rdist region? */
845 if (vgic_v3_rdist_overlap(kvm, base, size))
846 return -EINVAL;
847
848 rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL_ACCOUNT);
849 if (!rdreg)
850 return -ENOMEM;
851
852 rdreg->base = VGIC_ADDR_UNDEF;
853
854 ret = vgic_check_iorange(kvm, rdreg->base, base, SZ_64K, size);
855 if (ret)
856 goto free;
857
858 rdreg->base = base;
859 rdreg->count = count;
860 rdreg->free_index = 0;
861 rdreg->index = index;
862
863 list_add_tail(&rdreg->list, rd_regions);
864 return 0;
865 free:
866 kfree(rdreg);
867 return ret;
868 }
869
vgic_v3_free_redist_region(struct vgic_redist_region * rdreg)870 void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
871 {
872 list_del(&rdreg->list);
873 kfree(rdreg);
874 }
875
vgic_v3_set_redist_base(struct kvm * kvm,u32 index,u64 addr,u32 count)876 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
877 {
878 int ret;
879
880 ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
881 if (ret)
882 return ret;
883
884 /*
885 * Register iodevs for each existing VCPU. Adding more VCPUs
886 * afterwards will register the iodevs when needed.
887 */
888 ret = vgic_register_all_redist_iodevs(kvm);
889 if (ret) {
890 struct vgic_redist_region *rdreg;
891
892 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
893 vgic_v3_free_redist_region(rdreg);
894 return ret;
895 }
896
897 return 0;
898 }
899
vgic_v3_has_attr_regs(struct kvm_device * dev,struct kvm_device_attr * attr)900 int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
901 {
902 const struct vgic_register_region *region;
903 struct vgic_io_device iodev;
904 struct vgic_reg_attr reg_attr;
905 struct kvm_vcpu *vcpu;
906 gpa_t addr;
907 int ret;
908
909 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
910 if (ret)
911 return ret;
912
913 vcpu = reg_attr.vcpu;
914 addr = reg_attr.addr;
915
916 switch (attr->group) {
917 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
918 iodev.regions = vgic_v3_dist_registers;
919 iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
920 iodev.base_addr = 0;
921 break;
922 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
923 iodev.regions = vgic_v3_rd_registers;
924 iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
925 iodev.base_addr = 0;
926 break;
927 }
928 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
929 u64 reg, id;
930
931 id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
932 return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, ®);
933 }
934 default:
935 return -ENXIO;
936 }
937
938 /* We only support aligned 32-bit accesses. */
939 if (addr & 3)
940 return -ENXIO;
941
942 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
943 if (!region)
944 return -ENXIO;
945
946 return 0;
947 }
948 /*
949 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
950 * generation register ICC_SGI1R_EL1) with a given VCPU.
951 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
952 * return -1.
953 */
match_mpidr(u64 sgi_aff,u16 sgi_cpu_mask,struct kvm_vcpu * vcpu)954 static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
955 {
956 unsigned long affinity;
957 int level0;
958
959 /*
960 * Split the current VCPU's MPIDR into affinity level 0 and the
961 * rest as this is what we have to compare against.
962 */
963 affinity = kvm_vcpu_get_mpidr_aff(vcpu);
964 level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
965 affinity &= ~MPIDR_LEVEL_MASK;
966
967 /* bail out if the upper three levels don't match */
968 if (sgi_aff != affinity)
969 return -1;
970
971 /* Is this VCPU's bit set in the mask ? */
972 if (!(sgi_cpu_mask & BIT(level0)))
973 return -1;
974
975 return level0;
976 }
977
978 /*
979 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
980 * so provide a wrapper to use the existing defines to isolate a certain
981 * affinity level.
982 */
983 #define SGI_AFFINITY_LEVEL(reg, level) \
984 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
985 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
986
987 /**
988 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
989 * @vcpu: The VCPU requesting a SGI
990 * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
991 * @allow_group1: Does the sysreg access allow generation of G1 SGIs
992 *
993 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
994 * This will trap in sys_regs.c and call this function.
995 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
996 * target processors as well as a bitmask of 16 Aff0 CPUs.
997 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
998 * check for matching ones. If this bit is set, we signal all, but not the
999 * calling VCPU.
1000 */
vgic_v3_dispatch_sgi(struct kvm_vcpu * vcpu,u64 reg,bool allow_group1)1001 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
1002 {
1003 struct kvm *kvm = vcpu->kvm;
1004 struct kvm_vcpu *c_vcpu;
1005 u16 target_cpus;
1006 u64 mpidr;
1007 int sgi, c;
1008 int vcpu_id = vcpu->vcpu_id;
1009 bool broadcast;
1010 unsigned long flags;
1011
1012 sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
1013 broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
1014 target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
1015 mpidr = SGI_AFFINITY_LEVEL(reg, 3);
1016 mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
1017 mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
1018
1019 /*
1020 * We iterate over all VCPUs to find the MPIDRs matching the request.
1021 * If we have handled one CPU, we clear its bit to detect early
1022 * if we are already finished. This avoids iterating through all
1023 * VCPUs when most of the times we just signal a single VCPU.
1024 */
1025 kvm_for_each_vcpu(c, c_vcpu, kvm) {
1026 struct vgic_irq *irq;
1027
1028 /* Exit early if we have dealt with all requested CPUs */
1029 if (!broadcast && target_cpus == 0)
1030 break;
1031
1032 /* Don't signal the calling VCPU */
1033 if (broadcast && c == vcpu_id)
1034 continue;
1035
1036 if (!broadcast) {
1037 int level0;
1038
1039 level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
1040 if (level0 == -1)
1041 continue;
1042
1043 /* remove this matching VCPU from the mask */
1044 target_cpus &= ~BIT(level0);
1045 }
1046
1047 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
1048
1049 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1050
1051 /*
1052 * An access targeting Group0 SGIs can only generate
1053 * those, while an access targeting Group1 SGIs can
1054 * generate interrupts of either group.
1055 */
1056 if (!irq->group || allow_group1) {
1057 if (!irq->hw) {
1058 irq->pending_latch = true;
1059 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
1060 } else {
1061 /* HW SGI? Ask the GIC to inject it */
1062 int err;
1063 err = irq_set_irqchip_state(irq->host_irq,
1064 IRQCHIP_STATE_PENDING,
1065 true);
1066 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
1067 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1068 }
1069 } else {
1070 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1071 }
1072
1073 vgic_put_irq(vcpu->kvm, irq);
1074 }
1075 }
1076
vgic_v3_dist_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)1077 int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1078 int offset, u32 *val)
1079 {
1080 struct vgic_io_device dev = {
1081 .regions = vgic_v3_dist_registers,
1082 .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
1083 };
1084
1085 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
1086 }
1087
vgic_v3_redist_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)1088 int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1089 int offset, u32 *val)
1090 {
1091 struct vgic_io_device rd_dev = {
1092 .regions = vgic_v3_rd_registers,
1093 .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers),
1094 };
1095
1096 return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
1097 }
1098
vgic_v3_line_level_info_uaccess(struct kvm_vcpu * vcpu,bool is_write,u32 intid,u64 * val)1099 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1100 u32 intid, u64 *val)
1101 {
1102 if (intid % 32)
1103 return -EINVAL;
1104
1105 if (is_write)
1106 vgic_write_irq_line_level_info(vcpu, intid, *val);
1107 else
1108 *val = vgic_read_irq_line_level_info(vcpu, intid);
1109
1110 return 0;
1111 }
1112