• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VGIC MMIO handling functions
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <kvm/iodev.h>
11 #include <kvm/arm_arch_timer.h>
12 #include <kvm/arm_vgic.h>
13 
14 #include "vgic.h"
15 #include "vgic-mmio.h"
16 
vgic_mmio_read_raz(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)17 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
18 				 gpa_t addr, unsigned int len)
19 {
20 	return 0;
21 }
22 
vgic_mmio_read_rao(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)23 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
24 				 gpa_t addr, unsigned int len)
25 {
26 	return -1UL;
27 }
28 
vgic_mmio_write_wi(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)29 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
30 			unsigned int len, unsigned long val)
31 {
32 	/* Ignore */
33 }
34 
vgic_mmio_uaccess_write_wi(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)35 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
36 			       unsigned int len, unsigned long val)
37 {
38 	/* Ignore */
39 	return 0;
40 }
41 
vgic_mmio_read_group(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)42 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
43 				   gpa_t addr, unsigned int len)
44 {
45 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
46 	u32 value = 0;
47 	int i;
48 
49 	/* Loop over all IRQs affected by this read */
50 	for (i = 0; i < len * 8; i++) {
51 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
52 
53 		if (irq->group)
54 			value |= BIT(i);
55 
56 		vgic_put_irq(vcpu->kvm, irq);
57 	}
58 
59 	return value;
60 }
61 
vgic_mmio_write_group(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)62 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
63 			   unsigned int len, unsigned long val)
64 {
65 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
66 	int i;
67 	unsigned long flags;
68 
69 	for (i = 0; i < len * 8; i++) {
70 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
71 
72 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
73 		irq->group = !!(val & BIT(i));
74 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
75 
76 		vgic_put_irq(vcpu->kvm, irq);
77 	}
78 }
79 
80 /*
81  * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
82  * of the enabled bit, so there is only one function for both here.
83  */
vgic_mmio_read_enable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)84 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
85 				    gpa_t addr, unsigned int len)
86 {
87 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
88 	u32 value = 0;
89 	int i;
90 
91 	/* Loop over all IRQs affected by this read */
92 	for (i = 0; i < len * 8; i++) {
93 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
94 
95 		if (irq->enabled)
96 			value |= (1U << i);
97 
98 		vgic_put_irq(vcpu->kvm, irq);
99 	}
100 
101 	return value;
102 }
103 
vgic_mmio_write_senable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)104 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
105 			     gpa_t addr, unsigned int len,
106 			     unsigned long val)
107 {
108 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
109 	int i;
110 	unsigned long flags;
111 
112 	for_each_set_bit(i, &val, len * 8) {
113 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
114 
115 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
116 		if (vgic_irq_is_mapped_level(irq)) {
117 			bool was_high = irq->line_level;
118 
119 			/*
120 			 * We need to update the state of the interrupt because
121 			 * the guest might have changed the state of the device
122 			 * while the interrupt was disabled at the VGIC level.
123 			 */
124 			irq->line_level = vgic_get_phys_line_level(irq);
125 			/*
126 			 * Deactivate the physical interrupt so the GIC will let
127 			 * us know when it is asserted again.
128 			 */
129 			if (!irq->active && was_high && !irq->line_level)
130 				vgic_irq_set_phys_active(irq, false);
131 		}
132 		irq->enabled = true;
133 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
134 
135 		vgic_put_irq(vcpu->kvm, irq);
136 	}
137 }
138 
vgic_mmio_write_cenable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)139 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
140 			     gpa_t addr, unsigned int len,
141 			     unsigned long val)
142 {
143 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
144 	int i;
145 	unsigned long flags;
146 
147 	for_each_set_bit(i, &val, len * 8) {
148 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
149 
150 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 
152 		irq->enabled = false;
153 
154 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
155 		vgic_put_irq(vcpu->kvm, irq);
156 	}
157 }
158 
vgic_mmio_read_pending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)159 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 				     gpa_t addr, unsigned int len)
161 {
162 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
163 	u32 value = 0;
164 	int i;
165 
166 	/* Loop over all IRQs affected by this read */
167 	for (i = 0; i < len * 8; i++) {
168 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
169 		unsigned long flags;
170 
171 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
172 		if (irq_is_pending(irq))
173 			value |= (1U << i);
174 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
175 
176 		vgic_put_irq(vcpu->kvm, irq);
177 	}
178 
179 	return value;
180 }
181 
182 /*
183  * This function will return the VCPU that performed the MMIO access and
184  * trapped from within the VM, and will return NULL if this is a userspace
185  * access.
186  *
187  * We can disable preemption locally around accessing the per-CPU variable,
188  * and use the resolved vcpu pointer after enabling preemption again, because
189  * even if the current thread is migrated to another CPU, reading the per-CPU
190  * value later will give us the same value as we update the per-CPU variable
191  * in the preempt notifier handlers.
192  */
vgic_get_mmio_requester_vcpu(void)193 static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
194 {
195 	struct kvm_vcpu *vcpu;
196 
197 	preempt_disable();
198 	vcpu = kvm_arm_get_running_vcpu();
199 	preempt_enable();
200 	return vcpu;
201 }
202 
203 /* Must be called with irq->irq_lock held */
vgic_hw_irq_spending(struct kvm_vcpu * vcpu,struct vgic_irq * irq,bool is_uaccess)204 static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
205 				 bool is_uaccess)
206 {
207 	if (is_uaccess)
208 		return;
209 
210 	irq->pending_latch = true;
211 	vgic_irq_set_phys_active(irq, true);
212 }
213 
is_vgic_v2_sgi(struct kvm_vcpu * vcpu,struct vgic_irq * irq)214 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
215 {
216 	return (vgic_irq_is_sgi(irq->intid) &&
217 		vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
218 }
219 
vgic_mmio_write_spending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)220 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
221 			      gpa_t addr, unsigned int len,
222 			      unsigned long val)
223 {
224 	bool is_uaccess = !vgic_get_mmio_requester_vcpu();
225 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
226 	int i;
227 	unsigned long flags;
228 
229 	for_each_set_bit(i, &val, len * 8) {
230 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
231 
232 		/* GICD_ISPENDR0 SGI bits are WI */
233 		if (is_vgic_v2_sgi(vcpu, irq)) {
234 			vgic_put_irq(vcpu->kvm, irq);
235 			continue;
236 		}
237 
238 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
239 		if (irq->hw)
240 			vgic_hw_irq_spending(vcpu, irq, is_uaccess);
241 		else
242 			irq->pending_latch = true;
243 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
244 		vgic_put_irq(vcpu->kvm, irq);
245 	}
246 }
247 
248 /* Must be called with irq->irq_lock held */
vgic_hw_irq_cpending(struct kvm_vcpu * vcpu,struct vgic_irq * irq,bool is_uaccess)249 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
250 				 bool is_uaccess)
251 {
252 	if (is_uaccess)
253 		return;
254 
255 	irq->pending_latch = false;
256 
257 	/*
258 	 * We don't want the guest to effectively mask the physical
259 	 * interrupt by doing a write to SPENDR followed by a write to
260 	 * CPENDR for HW interrupts, so we clear the active state on
261 	 * the physical side if the virtual interrupt is not active.
262 	 * This may lead to taking an additional interrupt on the
263 	 * host, but that should not be a problem as the worst that
264 	 * can happen is an additional vgic injection.  We also clear
265 	 * the pending state to maintain proper semantics for edge HW
266 	 * interrupts.
267 	 */
268 	vgic_irq_set_phys_pending(irq, false);
269 	if (!irq->active)
270 		vgic_irq_set_phys_active(irq, false);
271 }
272 
vgic_mmio_write_cpending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)273 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
274 			      gpa_t addr, unsigned int len,
275 			      unsigned long val)
276 {
277 	bool is_uaccess = !vgic_get_mmio_requester_vcpu();
278 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
279 	int i;
280 	unsigned long flags;
281 
282 	for_each_set_bit(i, &val, len * 8) {
283 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
284 
285 		/* GICD_ICPENDR0 SGI bits are WI */
286 		if (is_vgic_v2_sgi(vcpu, irq)) {
287 			vgic_put_irq(vcpu->kvm, irq);
288 			continue;
289 		}
290 
291 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
292 
293 		if (irq->hw)
294 			vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
295 		else
296 			irq->pending_latch = false;
297 
298 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
299 		vgic_put_irq(vcpu->kvm, irq);
300 	}
301 }
302 
303 
304 /*
305  * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
306  * is not queued on some running VCPU's LRs, because then the change to the
307  * active state can be overwritten when the VCPU's state is synced coming back
308  * from the guest.
309  *
310  * For shared interrupts as well as GICv3 private interrupts, we have to
311  * stop all the VCPUs because interrupts can be migrated while we don't hold
312  * the IRQ locks and we don't want to be chasing moving targets.
313  *
314  * For GICv2 private interrupts we don't have to do anything because
315  * userspace accesses to the VGIC state already require all VCPUs to be
316  * stopped, and only the VCPU itself can modify its private interrupts
317  * active state, which guarantees that the VCPU is not running.
318  */
vgic_access_active_prepare(struct kvm_vcpu * vcpu,u32 intid)319 static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
320 {
321 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
322 	    intid >= VGIC_NR_PRIVATE_IRQS)
323 		kvm_arm_halt_guest(vcpu->kvm);
324 }
325 
326 /* See vgic_access_active_prepare */
vgic_access_active_finish(struct kvm_vcpu * vcpu,u32 intid)327 static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
328 {
329 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
330 	    intid >= VGIC_NR_PRIVATE_IRQS)
331 		kvm_arm_resume_guest(vcpu->kvm);
332 }
333 
__vgic_mmio_read_active(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)334 static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
335 					     gpa_t addr, unsigned int len)
336 {
337 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
338 	u32 value = 0;
339 	int i;
340 
341 	/* Loop over all IRQs affected by this read */
342 	for (i = 0; i < len * 8; i++) {
343 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
344 
345 		/*
346 		 * Even for HW interrupts, don't evaluate the HW state as
347 		 * all the guest is interested in is the virtual state.
348 		 */
349 		if (irq->active)
350 			value |= (1U << i);
351 
352 		vgic_put_irq(vcpu->kvm, irq);
353 	}
354 
355 	return value;
356 }
357 
vgic_mmio_read_active(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)358 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
359 				    gpa_t addr, unsigned int len)
360 {
361 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
362 	u32 val;
363 
364 	mutex_lock(&vcpu->kvm->lock);
365 	vgic_access_active_prepare(vcpu, intid);
366 
367 	val = __vgic_mmio_read_active(vcpu, addr, len);
368 
369 	vgic_access_active_finish(vcpu, intid);
370 	mutex_unlock(&vcpu->kvm->lock);
371 
372 	return val;
373 }
374 
vgic_uaccess_read_active(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)375 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
376 				    gpa_t addr, unsigned int len)
377 {
378 	return __vgic_mmio_read_active(vcpu, addr, len);
379 }
380 
381 /* Must be called with irq->irq_lock held */
vgic_hw_irq_change_active(struct kvm_vcpu * vcpu,struct vgic_irq * irq,bool active,bool is_uaccess)382 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
383 				      bool active, bool is_uaccess)
384 {
385 	if (is_uaccess)
386 		return;
387 
388 	irq->active = active;
389 	vgic_irq_set_phys_active(irq, active);
390 }
391 
vgic_mmio_change_active(struct kvm_vcpu * vcpu,struct vgic_irq * irq,bool active)392 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
393 				    bool active)
394 {
395 	unsigned long flags;
396 	struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
397 
398 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
399 
400 	if (irq->hw) {
401 		vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
402 	} else {
403 		u32 model = vcpu->kvm->arch.vgic.vgic_model;
404 		u8 active_source;
405 
406 		irq->active = active;
407 
408 		/*
409 		 * The GICv2 architecture indicates that the source CPUID for
410 		 * an SGI should be provided during an EOI which implies that
411 		 * the active state is stored somewhere, but at the same time
412 		 * this state is not architecturally exposed anywhere and we
413 		 * have no way of knowing the right source.
414 		 *
415 		 * This may lead to a VCPU not being able to receive
416 		 * additional instances of a particular SGI after migration
417 		 * for a GICv2 VM on some GIC implementations.  Oh well.
418 		 */
419 		active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
420 
421 		if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
422 		    active && vgic_irq_is_sgi(irq->intid))
423 			irq->active_source = active_source;
424 	}
425 
426 	if (irq->active)
427 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
428 	else
429 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
430 }
431 
__vgic_mmio_write_cactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)432 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
433 				      gpa_t addr, unsigned int len,
434 				      unsigned long val)
435 {
436 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
437 	int i;
438 
439 	for_each_set_bit(i, &val, len * 8) {
440 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
441 		vgic_mmio_change_active(vcpu, irq, false);
442 		vgic_put_irq(vcpu->kvm, irq);
443 	}
444 }
445 
vgic_mmio_write_cactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)446 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
447 			     gpa_t addr, unsigned int len,
448 			     unsigned long val)
449 {
450 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
451 
452 	mutex_lock(&vcpu->kvm->lock);
453 	vgic_access_active_prepare(vcpu, intid);
454 
455 	__vgic_mmio_write_cactive(vcpu, addr, len, val);
456 
457 	vgic_access_active_finish(vcpu, intid);
458 	mutex_unlock(&vcpu->kvm->lock);
459 }
460 
vgic_mmio_uaccess_write_cactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)461 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
462 				     gpa_t addr, unsigned int len,
463 				     unsigned long val)
464 {
465 	__vgic_mmio_write_cactive(vcpu, addr, len, val);
466 	return 0;
467 }
468 
__vgic_mmio_write_sactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)469 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
470 				      gpa_t addr, unsigned int len,
471 				      unsigned long val)
472 {
473 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
474 	int i;
475 
476 	for_each_set_bit(i, &val, len * 8) {
477 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
478 		vgic_mmio_change_active(vcpu, irq, true);
479 		vgic_put_irq(vcpu->kvm, irq);
480 	}
481 }
482 
vgic_mmio_write_sactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)483 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
484 			     gpa_t addr, unsigned int len,
485 			     unsigned long val)
486 {
487 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
488 
489 	mutex_lock(&vcpu->kvm->lock);
490 	vgic_access_active_prepare(vcpu, intid);
491 
492 	__vgic_mmio_write_sactive(vcpu, addr, len, val);
493 
494 	vgic_access_active_finish(vcpu, intid);
495 	mutex_unlock(&vcpu->kvm->lock);
496 }
497 
vgic_mmio_uaccess_write_sactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)498 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
499 				     gpa_t addr, unsigned int len,
500 				     unsigned long val)
501 {
502 	__vgic_mmio_write_sactive(vcpu, addr, len, val);
503 	return 0;
504 }
505 
vgic_mmio_read_priority(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)506 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
507 				      gpa_t addr, unsigned int len)
508 {
509 	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
510 	int i;
511 	u64 val = 0;
512 
513 	for (i = 0; i < len; i++) {
514 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
515 
516 		val |= (u64)irq->priority << (i * 8);
517 
518 		vgic_put_irq(vcpu->kvm, irq);
519 	}
520 
521 	return val;
522 }
523 
524 /*
525  * We currently don't handle changing the priority of an interrupt that
526  * is already pending on a VCPU. If there is a need for this, we would
527  * need to make this VCPU exit and re-evaluate the priorities, potentially
528  * leading to this interrupt getting presented now to the guest (if it has
529  * been masked by the priority mask before).
530  */
vgic_mmio_write_priority(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)531 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
532 			      gpa_t addr, unsigned int len,
533 			      unsigned long val)
534 {
535 	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
536 	int i;
537 	unsigned long flags;
538 
539 	for (i = 0; i < len; i++) {
540 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
541 
542 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
543 		/* Narrow the priority range to what we actually support */
544 		irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
545 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
546 
547 		vgic_put_irq(vcpu->kvm, irq);
548 	}
549 }
550 
vgic_mmio_read_config(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)551 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
552 				    gpa_t addr, unsigned int len)
553 {
554 	u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
555 	u32 value = 0;
556 	int i;
557 
558 	for (i = 0; i < len * 4; i++) {
559 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
560 
561 		if (irq->config == VGIC_CONFIG_EDGE)
562 			value |= (2U << (i * 2));
563 
564 		vgic_put_irq(vcpu->kvm, irq);
565 	}
566 
567 	return value;
568 }
569 
vgic_mmio_write_config(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)570 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
571 			    gpa_t addr, unsigned int len,
572 			    unsigned long val)
573 {
574 	u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
575 	int i;
576 	unsigned long flags;
577 
578 	for (i = 0; i < len * 4; i++) {
579 		struct vgic_irq *irq;
580 
581 		/*
582 		 * The configuration cannot be changed for SGIs in general,
583 		 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
584 		 * code relies on PPIs being level triggered, so we also
585 		 * make them read-only here.
586 		 */
587 		if (intid + i < VGIC_NR_PRIVATE_IRQS)
588 			continue;
589 
590 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
591 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
592 
593 		if (test_bit(i * 2 + 1, &val))
594 			irq->config = VGIC_CONFIG_EDGE;
595 		else
596 			irq->config = VGIC_CONFIG_LEVEL;
597 
598 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599 		vgic_put_irq(vcpu->kvm, irq);
600 	}
601 }
602 
vgic_read_irq_line_level_info(struct kvm_vcpu * vcpu,u32 intid)603 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
604 {
605 	int i;
606 	u64 val = 0;
607 	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
608 
609 	for (i = 0; i < 32; i++) {
610 		struct vgic_irq *irq;
611 
612 		if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
613 			continue;
614 
615 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
616 		if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
617 			val |= (1U << i);
618 
619 		vgic_put_irq(vcpu->kvm, irq);
620 	}
621 
622 	return val;
623 }
624 
vgic_write_irq_line_level_info(struct kvm_vcpu * vcpu,u32 intid,const u64 val)625 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
626 				    const u64 val)
627 {
628 	int i;
629 	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
630 	unsigned long flags;
631 
632 	for (i = 0; i < 32; i++) {
633 		struct vgic_irq *irq;
634 		bool new_level;
635 
636 		if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
637 			continue;
638 
639 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
640 
641 		/*
642 		 * Line level is set irrespective of irq type
643 		 * (level or edge) to avoid dependency that VM should
644 		 * restore irq config before line level.
645 		 */
646 		new_level = !!(val & (1U << i));
647 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
648 		irq->line_level = new_level;
649 		if (new_level)
650 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
651 		else
652 			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
653 
654 		vgic_put_irq(vcpu->kvm, irq);
655 	}
656 }
657 
match_region(const void * key,const void * elt)658 static int match_region(const void *key, const void *elt)
659 {
660 	const unsigned int offset = (unsigned long)key;
661 	const struct vgic_register_region *region = elt;
662 
663 	if (offset < region->reg_offset)
664 		return -1;
665 
666 	if (offset >= region->reg_offset + region->len)
667 		return 1;
668 
669 	return 0;
670 }
671 
672 const struct vgic_register_region *
vgic_find_mmio_region(const struct vgic_register_region * regions,int nr_regions,unsigned int offset)673 vgic_find_mmio_region(const struct vgic_register_region *regions,
674 		      int nr_regions, unsigned int offset)
675 {
676 	return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
677 		       sizeof(regions[0]), match_region);
678 }
679 
vgic_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)680 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
681 {
682 	if (kvm_vgic_global_state.type == VGIC_V2)
683 		vgic_v2_set_vmcr(vcpu, vmcr);
684 	else
685 		vgic_v3_set_vmcr(vcpu, vmcr);
686 }
687 
vgic_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)688 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
689 {
690 	if (kvm_vgic_global_state.type == VGIC_V2)
691 		vgic_v2_get_vmcr(vcpu, vmcr);
692 	else
693 		vgic_v3_get_vmcr(vcpu, vmcr);
694 }
695 
696 /*
697  * kvm_mmio_read_buf() returns a value in a format where it can be converted
698  * to a byte array and be directly observed as the guest wanted it to appear
699  * in memory if it had done the store itself, which is LE for the GIC, as the
700  * guest knows the GIC is always LE.
701  *
702  * We convert this value to the CPUs native format to deal with it as a data
703  * value.
704  */
vgic_data_mmio_bus_to_host(const void * val,unsigned int len)705 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
706 {
707 	unsigned long data = kvm_mmio_read_buf(val, len);
708 
709 	switch (len) {
710 	case 1:
711 		return data;
712 	case 2:
713 		return le16_to_cpu(data);
714 	case 4:
715 		return le32_to_cpu(data);
716 	default:
717 		return le64_to_cpu(data);
718 	}
719 }
720 
721 /*
722  * kvm_mmio_write_buf() expects a value in a format such that if converted to
723  * a byte array it is observed as the guest would see it if it could perform
724  * the load directly.  Since the GIC is LE, and the guest knows this, the
725  * guest expects a value in little endian format.
726  *
727  * We convert the data value from the CPUs native format to LE so that the
728  * value is returned in the proper format.
729  */
vgic_data_host_to_mmio_bus(void * buf,unsigned int len,unsigned long data)730 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
731 				unsigned long data)
732 {
733 	switch (len) {
734 	case 1:
735 		break;
736 	case 2:
737 		data = cpu_to_le16(data);
738 		break;
739 	case 4:
740 		data = cpu_to_le32(data);
741 		break;
742 	default:
743 		data = cpu_to_le64(data);
744 	}
745 
746 	kvm_mmio_write_buf(buf, len, data);
747 }
748 
749 static
kvm_to_vgic_iodev(const struct kvm_io_device * dev)750 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
751 {
752 	return container_of(dev, struct vgic_io_device, dev);
753 }
754 
check_region(const struct kvm * kvm,const struct vgic_register_region * region,gpa_t addr,int len)755 static bool check_region(const struct kvm *kvm,
756 			 const struct vgic_register_region *region,
757 			 gpa_t addr, int len)
758 {
759 	int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
760 
761 	switch (len) {
762 	case sizeof(u8):
763 		flags = VGIC_ACCESS_8bit;
764 		break;
765 	case sizeof(u32):
766 		flags = VGIC_ACCESS_32bit;
767 		break;
768 	case sizeof(u64):
769 		flags = VGIC_ACCESS_64bit;
770 		break;
771 	default:
772 		return false;
773 	}
774 
775 	if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
776 		if (!region->bits_per_irq)
777 			return true;
778 
779 		/* Do we access a non-allocated IRQ? */
780 		return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
781 	}
782 
783 	return false;
784 }
785 
786 const struct vgic_register_region *
vgic_get_mmio_region(struct kvm_vcpu * vcpu,struct vgic_io_device * iodev,gpa_t addr,int len)787 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
788 		     gpa_t addr, int len)
789 {
790 	const struct vgic_register_region *region;
791 
792 	region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
793 				       addr - iodev->base_addr);
794 	if (!region || !check_region(vcpu->kvm, region, addr, len))
795 		return NULL;
796 
797 	return region;
798 }
799 
vgic_uaccess_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,u32 * val)800 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
801 			     gpa_t addr, u32 *val)
802 {
803 	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
804 	const struct vgic_register_region *region;
805 	struct kvm_vcpu *r_vcpu;
806 
807 	region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
808 	if (!region) {
809 		*val = 0;
810 		return 0;
811 	}
812 
813 	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
814 	if (region->uaccess_read)
815 		*val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
816 	else
817 		*val = region->read(r_vcpu, addr, sizeof(u32));
818 
819 	return 0;
820 }
821 
vgic_uaccess_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,const u32 * val)822 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
823 			      gpa_t addr, const u32 *val)
824 {
825 	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
826 	const struct vgic_register_region *region;
827 	struct kvm_vcpu *r_vcpu;
828 
829 	region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
830 	if (!region)
831 		return 0;
832 
833 	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
834 	if (region->uaccess_write)
835 		return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
836 
837 	region->write(r_vcpu, addr, sizeof(u32), *val);
838 	return 0;
839 }
840 
841 /*
842  * Userland access to VGIC registers.
843  */
vgic_uaccess(struct kvm_vcpu * vcpu,struct vgic_io_device * dev,bool is_write,int offset,u32 * val)844 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
845 		 bool is_write, int offset, u32 *val)
846 {
847 	if (is_write)
848 		return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
849 	else
850 		return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
851 }
852 
dispatch_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)853 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
854 			      gpa_t addr, int len, void *val)
855 {
856 	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
857 	const struct vgic_register_region *region;
858 	unsigned long data = 0;
859 
860 	region = vgic_get_mmio_region(vcpu, iodev, addr, len);
861 	if (!region) {
862 		memset(val, 0, len);
863 		return 0;
864 	}
865 
866 	switch (iodev->iodev_type) {
867 	case IODEV_CPUIF:
868 		data = region->read(vcpu, addr, len);
869 		break;
870 	case IODEV_DIST:
871 		data = region->read(vcpu, addr, len);
872 		break;
873 	case IODEV_REDIST:
874 		data = region->read(iodev->redist_vcpu, addr, len);
875 		break;
876 	case IODEV_ITS:
877 		data = region->its_read(vcpu->kvm, iodev->its, addr, len);
878 		break;
879 	}
880 
881 	vgic_data_host_to_mmio_bus(val, len, data);
882 	return 0;
883 }
884 
dispatch_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)885 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
886 			       gpa_t addr, int len, const void *val)
887 {
888 	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
889 	const struct vgic_register_region *region;
890 	unsigned long data = vgic_data_mmio_bus_to_host(val, len);
891 
892 	region = vgic_get_mmio_region(vcpu, iodev, addr, len);
893 	if (!region)
894 		return 0;
895 
896 	switch (iodev->iodev_type) {
897 	case IODEV_CPUIF:
898 		region->write(vcpu, addr, len, data);
899 		break;
900 	case IODEV_DIST:
901 		region->write(vcpu, addr, len, data);
902 		break;
903 	case IODEV_REDIST:
904 		region->write(iodev->redist_vcpu, addr, len, data);
905 		break;
906 	case IODEV_ITS:
907 		region->its_write(vcpu->kvm, iodev->its, addr, len, data);
908 		break;
909 	}
910 
911 	return 0;
912 }
913 
914 struct kvm_io_device_ops kvm_io_gic_ops = {
915 	.read = dispatch_mmio_read,
916 	.write = dispatch_mmio_write,
917 };
918 
vgic_register_dist_iodev(struct kvm * kvm,gpa_t dist_base_address,enum vgic_type type)919 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
920 			     enum vgic_type type)
921 {
922 	struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
923 	int ret = 0;
924 	unsigned int len;
925 
926 	switch (type) {
927 	case VGIC_V2:
928 		len = vgic_v2_init_dist_iodev(io_device);
929 		break;
930 	case VGIC_V3:
931 		len = vgic_v3_init_dist_iodev(io_device);
932 		break;
933 	default:
934 		BUG_ON(1);
935 	}
936 
937 	io_device->base_addr = dist_base_address;
938 	io_device->iodev_type = IODEV_DIST;
939 	io_device->redist_vcpu = NULL;
940 
941 	mutex_lock(&kvm->slots_lock);
942 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
943 				      len, &io_device->dev);
944 	mutex_unlock(&kvm->slots_lock);
945 
946 	return ret;
947 }
948