• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18 
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
33 #include <trace/events/kvm.h>
34 #include <asm/kvm.h>
35 #include <kvm/iodev.h>
36 
37 #define CREATE_TRACE_POINTS
38 #include "trace.h"
39 
40 /*
41  * How the whole thing works (courtesy of Christoffer Dall):
42  *
43  * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
44  *   something is pending on the CPU interface.
45  * - Interrupts that are pending on the distributor are stored on the
46  *   vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
47  *   ioctls and guest mmio ops, and other in-kernel peripherals such as the
48  *   arch. timers).
49  * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
50  *   recalculated
51  * - To calculate the oracle, we need info for each cpu from
52  *   compute_pending_for_cpu, which considers:
53  *   - PPI: dist->irq_pending & dist->irq_enable
54  *   - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
55  *   - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
56  *     registers, stored on each vcpu. We only keep one bit of
57  *     information per interrupt, making sure that only one vcpu can
58  *     accept the interrupt.
59  * - If any of the above state changes, we must recalculate the oracle.
60  * - The same is true when injecting an interrupt, except that we only
61  *   consider a single interrupt at a time. The irq_spi_cpu array
62  *   contains the target CPU for each SPI.
63  *
64  * The handling of level interrupts adds some extra complexity. We
65  * need to track when the interrupt has been EOIed, so we can sample
66  * the 'line' again. This is achieved as such:
67  *
68  * - When a level interrupt is moved onto a vcpu, the corresponding
69  *   bit in irq_queued is set. As long as this bit is set, the line
70  *   will be ignored for further interrupts. The interrupt is injected
71  *   into the vcpu with the GICH_LR_EOI bit set (generate a
72  *   maintenance interrupt on EOI).
73  * - When the interrupt is EOIed, the maintenance interrupt fires,
74  *   and clears the corresponding bit in irq_queued. This allows the
75  *   interrupt line to be sampled again.
76  * - Note that level-triggered interrupts can also be set to pending from
77  *   writes to GICD_ISPENDRn and lowering the external input line does not
78  *   cause the interrupt to become inactive in such a situation.
79  *   Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
80  *   inactive as long as the external input line is held high.
81  *
82  *
83  * Initialization rules: there are multiple stages to the vgic
84  * initialization, both for the distributor and the CPU interfaces.
85  *
86  * Distributor:
87  *
88  * - kvm_vgic_early_init(): initialization of static data that doesn't
89  *   depend on any sizing information or emulation type. No allocation
90  *   is allowed there.
91  *
92  * - vgic_init(): allocation and initialization of the generic data
93  *   structures that depend on sizing information (number of CPUs,
94  *   number of interrupts). Also initializes the vcpu specific data
95  *   structures. Can be executed lazily for GICv2.
96  *   [to be renamed to kvm_vgic_init??]
97  *
98  * CPU Interface:
99  *
100  * - kvm_vgic_cpu_early_init(): initialization of static data that
101  *   doesn't depend on any sizing information or emulation type. No
102  *   allocation is allowed there.
103  */
104 
105 #include "vgic.h"
106 
107 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
108 static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
109 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
110 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
111 static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
112 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
113 						int virt_irq);
114 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
115 
116 static const struct vgic_ops *vgic_ops;
117 static const struct vgic_params *vgic;
118 
add_sgi_source(struct kvm_vcpu * vcpu,int irq,int source)119 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
120 {
121 	vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
122 }
123 
queue_sgi(struct kvm_vcpu * vcpu,int irq)124 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
125 {
126 	return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
127 }
128 
kvm_vgic_map_resources(struct kvm * kvm)129 int kvm_vgic_map_resources(struct kvm *kvm)
130 {
131 	return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
132 }
133 
134 /*
135  * struct vgic_bitmap contains a bitmap made of unsigned longs, but
136  * extracts u32s out of them.
137  *
138  * This does not work on 64-bit BE systems, because the bitmap access
139  * will store two consecutive 32-bit words with the higher-addressed
140  * register's bits at the lower index and the lower-addressed register's
141  * bits at the higher index.
142  *
143  * Therefore, swizzle the register index when accessing the 32-bit word
144  * registers to access the right register's value.
145  */
146 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
147 #define REG_OFFSET_SWIZZLE	1
148 #else
149 #define REG_OFFSET_SWIZZLE	0
150 #endif
151 
vgic_init_bitmap(struct vgic_bitmap * b,int nr_cpus,int nr_irqs)152 static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
153 {
154 	int nr_longs;
155 
156 	nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
157 
158 	b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
159 	if (!b->private)
160 		return -ENOMEM;
161 
162 	b->shared = b->private + nr_cpus;
163 
164 	return 0;
165 }
166 
vgic_free_bitmap(struct vgic_bitmap * b)167 static void vgic_free_bitmap(struct vgic_bitmap *b)
168 {
169 	kfree(b->private);
170 	b->private = NULL;
171 	b->shared = NULL;
172 }
173 
174 /*
175  * Call this function to convert a u64 value to an unsigned long * bitmask
176  * in a way that works on both 32-bit and 64-bit LE and BE platforms.
177  *
178  * Warning: Calling this function may modify *val.
179  */
u64_to_bitmask(u64 * val)180 static unsigned long *u64_to_bitmask(u64 *val)
181 {
182 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
183 	*val = (*val >> 32) | (*val << 32);
184 #endif
185 	return (unsigned long *)val;
186 }
187 
vgic_bitmap_get_reg(struct vgic_bitmap * x,int cpuid,u32 offset)188 u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
189 {
190 	offset >>= 2;
191 	if (!offset)
192 		return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
193 	else
194 		return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
195 }
196 
vgic_bitmap_get_irq_val(struct vgic_bitmap * x,int cpuid,int irq)197 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
198 				   int cpuid, int irq)
199 {
200 	if (irq < VGIC_NR_PRIVATE_IRQS)
201 		return test_bit(irq, x->private + cpuid);
202 
203 	return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
204 }
205 
vgic_bitmap_set_irq_val(struct vgic_bitmap * x,int cpuid,int irq,int val)206 void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
207 			     int irq, int val)
208 {
209 	unsigned long *reg;
210 
211 	if (irq < VGIC_NR_PRIVATE_IRQS) {
212 		reg = x->private + cpuid;
213 	} else {
214 		reg = x->shared;
215 		irq -= VGIC_NR_PRIVATE_IRQS;
216 	}
217 
218 	if (val)
219 		set_bit(irq, reg);
220 	else
221 		clear_bit(irq, reg);
222 }
223 
vgic_bitmap_get_cpu_map(struct vgic_bitmap * x,int cpuid)224 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
225 {
226 	return x->private + cpuid;
227 }
228 
vgic_bitmap_get_shared_map(struct vgic_bitmap * x)229 unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
230 {
231 	return x->shared;
232 }
233 
vgic_init_bytemap(struct vgic_bytemap * x,int nr_cpus,int nr_irqs)234 static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
235 {
236 	int size;
237 
238 	size  = nr_cpus * VGIC_NR_PRIVATE_IRQS;
239 	size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
240 
241 	x->private = kzalloc(size, GFP_KERNEL);
242 	if (!x->private)
243 		return -ENOMEM;
244 
245 	x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
246 	return 0;
247 }
248 
vgic_free_bytemap(struct vgic_bytemap * b)249 static void vgic_free_bytemap(struct vgic_bytemap *b)
250 {
251 	kfree(b->private);
252 	b->private = NULL;
253 	b->shared = NULL;
254 }
255 
vgic_bytemap_get_reg(struct vgic_bytemap * x,int cpuid,u32 offset)256 u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
257 {
258 	u32 *reg;
259 
260 	if (offset < VGIC_NR_PRIVATE_IRQS) {
261 		reg = x->private;
262 		offset += cpuid * VGIC_NR_PRIVATE_IRQS;
263 	} else {
264 		reg = x->shared;
265 		offset -= VGIC_NR_PRIVATE_IRQS;
266 	}
267 
268 	return reg + (offset / sizeof(u32));
269 }
270 
271 #define VGIC_CFG_LEVEL	0
272 #define VGIC_CFG_EDGE	1
273 
vgic_irq_is_edge(struct kvm_vcpu * vcpu,int irq)274 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
275 {
276 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
277 	int irq_val;
278 
279 	irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
280 	return irq_val == VGIC_CFG_EDGE;
281 }
282 
vgic_irq_is_enabled(struct kvm_vcpu * vcpu,int irq)283 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
284 {
285 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
286 
287 	return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
288 }
289 
vgic_irq_is_queued(struct kvm_vcpu * vcpu,int irq)290 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
291 {
292 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
293 
294 	return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
295 }
296 
vgic_irq_is_active(struct kvm_vcpu * vcpu,int irq)297 static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
298 {
299 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
300 
301 	return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
302 }
303 
vgic_irq_set_queued(struct kvm_vcpu * vcpu,int irq)304 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
305 {
306 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
307 
308 	vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
309 }
310 
vgic_irq_clear_queued(struct kvm_vcpu * vcpu,int irq)311 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
312 {
313 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
314 
315 	vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
316 }
317 
vgic_irq_set_active(struct kvm_vcpu * vcpu,int irq)318 static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
319 {
320 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
321 
322 	vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
323 }
324 
vgic_irq_clear_active(struct kvm_vcpu * vcpu,int irq)325 static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
326 {
327 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
328 
329 	vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
330 }
331 
vgic_dist_irq_get_level(struct kvm_vcpu * vcpu,int irq)332 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
333 {
334 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
335 
336 	return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
337 }
338 
vgic_dist_irq_set_level(struct kvm_vcpu * vcpu,int irq)339 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
340 {
341 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
342 
343 	vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
344 }
345 
vgic_dist_irq_clear_level(struct kvm_vcpu * vcpu,int irq)346 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
347 {
348 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
349 
350 	vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
351 }
352 
vgic_dist_irq_soft_pend(struct kvm_vcpu * vcpu,int irq)353 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
354 {
355 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
356 
357 	return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
358 }
359 
vgic_dist_irq_clear_soft_pend(struct kvm_vcpu * vcpu,int irq)360 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
361 {
362 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
363 
364 	vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
365 	if (!vgic_dist_irq_get_level(vcpu, irq)) {
366 		vgic_dist_irq_clear_pending(vcpu, irq);
367 		if (!compute_pending_for_cpu(vcpu))
368 			clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
369 	}
370 }
371 
vgic_dist_irq_is_pending(struct kvm_vcpu * vcpu,int irq)372 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
373 {
374 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
375 
376 	return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
377 }
378 
vgic_dist_irq_set_pending(struct kvm_vcpu * vcpu,int irq)379 void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
380 {
381 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
382 
383 	vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
384 }
385 
vgic_dist_irq_clear_pending(struct kvm_vcpu * vcpu,int irq)386 void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
387 {
388 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
389 
390 	vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
391 }
392 
vgic_cpu_irq_set(struct kvm_vcpu * vcpu,int irq)393 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
394 {
395 	if (irq < VGIC_NR_PRIVATE_IRQS)
396 		set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
397 	else
398 		set_bit(irq - VGIC_NR_PRIVATE_IRQS,
399 			vcpu->arch.vgic_cpu.pending_shared);
400 }
401 
vgic_cpu_irq_clear(struct kvm_vcpu * vcpu,int irq)402 void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
403 {
404 	if (irq < VGIC_NR_PRIVATE_IRQS)
405 		clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
406 	else
407 		clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
408 			  vcpu->arch.vgic_cpu.pending_shared);
409 }
410 
vgic_can_sample_irq(struct kvm_vcpu * vcpu,int irq)411 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
412 {
413 	return !vgic_irq_is_queued(vcpu, irq);
414 }
415 
416 /**
417  * vgic_reg_access - access vgic register
418  * @mmio:   pointer to the data describing the mmio access
419  * @reg:    pointer to the virtual backing of vgic distributor data
420  * @offset: least significant 2 bits used for word offset
421  * @mode:   ACCESS_ mode (see defines above)
422  *
423  * Helper to make vgic register access easier using one of the access
424  * modes defined for vgic register access
425  * (read,raz,write-ignored,setbit,clearbit,write)
426  */
vgic_reg_access(struct kvm_exit_mmio * mmio,u32 * reg,phys_addr_t offset,int mode)427 void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
428 		     phys_addr_t offset, int mode)
429 {
430 	int word_offset = (offset & 3) * 8;
431 	u32 mask = (1UL << (mmio->len * 8)) - 1;
432 	u32 regval;
433 
434 	/*
435 	 * Any alignment fault should have been delivered to the guest
436 	 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
437 	 */
438 
439 	if (reg) {
440 		regval = *reg;
441 	} else {
442 		BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
443 		regval = 0;
444 	}
445 
446 	if (mmio->is_write) {
447 		u32 data = mmio_data_read(mmio, mask) << word_offset;
448 		switch (ACCESS_WRITE_MASK(mode)) {
449 		case ACCESS_WRITE_IGNORED:
450 			return;
451 
452 		case ACCESS_WRITE_SETBIT:
453 			regval |= data;
454 			break;
455 
456 		case ACCESS_WRITE_CLEARBIT:
457 			regval &= ~data;
458 			break;
459 
460 		case ACCESS_WRITE_VALUE:
461 			regval = (regval & ~(mask << word_offset)) | data;
462 			break;
463 		}
464 		*reg = regval;
465 	} else {
466 		switch (ACCESS_READ_MASK(mode)) {
467 		case ACCESS_READ_RAZ:
468 			regval = 0;
469 			/* fall through */
470 
471 		case ACCESS_READ_VALUE:
472 			mmio_data_write(mmio, mask, regval >> word_offset);
473 		}
474 	}
475 }
476 
handle_mmio_raz_wi(struct kvm_vcpu * vcpu,struct kvm_exit_mmio * mmio,phys_addr_t offset)477 bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
478 			phys_addr_t offset)
479 {
480 	vgic_reg_access(mmio, NULL, offset,
481 			ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
482 	return false;
483 }
484 
vgic_handle_enable_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id,int access)485 bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
486 			    phys_addr_t offset, int vcpu_id, int access)
487 {
488 	u32 *reg;
489 	int mode = ACCESS_READ_VALUE | access;
490 	struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
491 
492 	reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
493 	vgic_reg_access(mmio, reg, offset, mode);
494 	if (mmio->is_write) {
495 		if (access & ACCESS_WRITE_CLEARBIT) {
496 			if (offset < 4) /* Force SGI enabled */
497 				*reg |= 0xffff;
498 			vgic_retire_disabled_irqs(target_vcpu);
499 		}
500 		vgic_update_state(kvm);
501 		return true;
502 	}
503 
504 	return false;
505 }
506 
vgic_handle_set_pending_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)507 bool vgic_handle_set_pending_reg(struct kvm *kvm,
508 				 struct kvm_exit_mmio *mmio,
509 				 phys_addr_t offset, int vcpu_id)
510 {
511 	u32 *reg, orig;
512 	u32 level_mask;
513 	int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
514 	struct vgic_dist *dist = &kvm->arch.vgic;
515 
516 	reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
517 	level_mask = (~(*reg));
518 
519 	/* Mark both level and edge triggered irqs as pending */
520 	reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
521 	orig = *reg;
522 	vgic_reg_access(mmio, reg, offset, mode);
523 
524 	if (mmio->is_write) {
525 		/* Set the soft-pending flag only for level-triggered irqs */
526 		reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
527 					  vcpu_id, offset);
528 		vgic_reg_access(mmio, reg, offset, mode);
529 		*reg &= level_mask;
530 
531 		/* Ignore writes to SGIs */
532 		if (offset < 2) {
533 			*reg &= ~0xffff;
534 			*reg |= orig & 0xffff;
535 		}
536 
537 		vgic_update_state(kvm);
538 		return true;
539 	}
540 
541 	return false;
542 }
543 
vgic_handle_clear_pending_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)544 bool vgic_handle_clear_pending_reg(struct kvm *kvm,
545 				   struct kvm_exit_mmio *mmio,
546 				   phys_addr_t offset, int vcpu_id)
547 {
548 	u32 *level_active;
549 	u32 *reg, orig;
550 	int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
551 	struct vgic_dist *dist = &kvm->arch.vgic;
552 
553 	reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
554 	orig = *reg;
555 	vgic_reg_access(mmio, reg, offset, mode);
556 	if (mmio->is_write) {
557 		/* Re-set level triggered level-active interrupts */
558 		level_active = vgic_bitmap_get_reg(&dist->irq_level,
559 					  vcpu_id, offset);
560 		reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
561 		*reg |= *level_active;
562 
563 		/* Ignore writes to SGIs */
564 		if (offset < 2) {
565 			*reg &= ~0xffff;
566 			*reg |= orig & 0xffff;
567 		}
568 
569 		/* Clear soft-pending flags */
570 		reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
571 					  vcpu_id, offset);
572 		vgic_reg_access(mmio, reg, offset, mode);
573 
574 		vgic_update_state(kvm);
575 		return true;
576 	}
577 	return false;
578 }
579 
vgic_handle_set_active_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)580 bool vgic_handle_set_active_reg(struct kvm *kvm,
581 				struct kvm_exit_mmio *mmio,
582 				phys_addr_t offset, int vcpu_id)
583 {
584 	u32 *reg;
585 	struct vgic_dist *dist = &kvm->arch.vgic;
586 
587 	reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
588 	vgic_reg_access(mmio, reg, offset,
589 			ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
590 
591 	if (mmio->is_write) {
592 		vgic_update_state(kvm);
593 		return true;
594 	}
595 
596 	return false;
597 }
598 
vgic_handle_clear_active_reg(struct kvm * kvm,struct kvm_exit_mmio * mmio,phys_addr_t offset,int vcpu_id)599 bool vgic_handle_clear_active_reg(struct kvm *kvm,
600 				  struct kvm_exit_mmio *mmio,
601 				  phys_addr_t offset, int vcpu_id)
602 {
603 	u32 *reg;
604 	struct vgic_dist *dist = &kvm->arch.vgic;
605 
606 	reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
607 	vgic_reg_access(mmio, reg, offset,
608 			ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
609 
610 	if (mmio->is_write) {
611 		vgic_update_state(kvm);
612 		return true;
613 	}
614 
615 	return false;
616 }
617 
vgic_cfg_expand(u16 val)618 static u32 vgic_cfg_expand(u16 val)
619 {
620 	u32 res = 0;
621 	int i;
622 
623 	/*
624 	 * Turn a 16bit value like abcd...mnop into a 32bit word
625 	 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
626 	 */
627 	for (i = 0; i < 16; i++)
628 		res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
629 
630 	return res;
631 }
632 
vgic_cfg_compress(u32 val)633 static u16 vgic_cfg_compress(u32 val)
634 {
635 	u16 res = 0;
636 	int i;
637 
638 	/*
639 	 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
640 	 * abcd...mnop which is what we really care about.
641 	 */
642 	for (i = 0; i < 16; i++)
643 		res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
644 
645 	return res;
646 }
647 
648 /*
649  * The distributor uses 2 bits per IRQ for the CFG register, but the
650  * LSB is always 0. As such, we only keep the upper bit, and use the
651  * two above functions to compress/expand the bits
652  */
vgic_handle_cfg_reg(u32 * reg,struct kvm_exit_mmio * mmio,phys_addr_t offset)653 bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
654 			 phys_addr_t offset)
655 {
656 	u32 val;
657 
658 	if (offset & 4)
659 		val = *reg >> 16;
660 	else
661 		val = *reg & 0xffff;
662 
663 	val = vgic_cfg_expand(val);
664 	vgic_reg_access(mmio, &val, offset,
665 			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
666 	if (mmio->is_write) {
667 		/* Ignore writes to read-only SGI and PPI bits */
668 		if (offset < 8)
669 			return false;
670 
671 		val = vgic_cfg_compress(val);
672 		if (offset & 4) {
673 			*reg &= 0xffff;
674 			*reg |= val << 16;
675 		} else {
676 			*reg &= 0xffff << 16;
677 			*reg |= val;
678 		}
679 	}
680 
681 	return false;
682 }
683 
684 /**
685  * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
686  * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
687  *
688  * Move any IRQs that have already been assigned to LRs back to the
689  * emulated distributor state so that the complete emulated state can be read
690  * from the main emulation structures without investigating the LRs.
691  */
vgic_unqueue_irqs(struct kvm_vcpu * vcpu)692 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
693 {
694 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
695 	u64 elrsr = vgic_get_elrsr(vcpu);
696 	unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
697 	int i;
698 
699 	for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
700 		struct vgic_lr lr = vgic_get_lr(vcpu, i);
701 
702 		/*
703 		 * There are three options for the state bits:
704 		 *
705 		 * 01: pending
706 		 * 10: active
707 		 * 11: pending and active
708 		 */
709 		BUG_ON(!(lr.state & LR_STATE_MASK));
710 
711 		/* Reestablish SGI source for pending and active IRQs */
712 		if (lr.irq < VGIC_NR_SGIS)
713 			add_sgi_source(vcpu, lr.irq, lr.source);
714 
715 		/*
716 		 * If the LR holds an active (10) or a pending and active (11)
717 		 * interrupt then move the active state to the
718 		 * distributor tracking bit.
719 		 */
720 		if (lr.state & LR_STATE_ACTIVE)
721 			vgic_irq_set_active(vcpu, lr.irq);
722 
723 		/*
724 		 * Reestablish the pending state on the distributor and the
725 		 * CPU interface and mark the LR as free for other use.
726 		 */
727 		vgic_retire_lr(i, vcpu);
728 
729 		/* Finally update the VGIC state. */
730 		vgic_update_state(vcpu->kvm);
731 	}
732 }
733 
734 const
vgic_find_range(const struct vgic_io_range * ranges,int len,gpa_t offset)735 struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
736 				      int len, gpa_t offset)
737 {
738 	while (ranges->len) {
739 		if (offset >= ranges->base &&
740 		    (offset + len) <= (ranges->base + ranges->len))
741 			return ranges;
742 		ranges++;
743 	}
744 
745 	return NULL;
746 }
747 
vgic_validate_access(const struct vgic_dist * dist,const struct vgic_io_range * range,unsigned long offset)748 static bool vgic_validate_access(const struct vgic_dist *dist,
749 				 const struct vgic_io_range *range,
750 				 unsigned long offset)
751 {
752 	int irq;
753 
754 	if (!range->bits_per_irq)
755 		return true;	/* Not an irq-based access */
756 
757 	irq = offset * 8 / range->bits_per_irq;
758 	if (irq >= dist->nr_irqs)
759 		return false;
760 
761 	return true;
762 }
763 
764 /*
765  * Call the respective handler function for the given range.
766  * We split up any 64 bit accesses into two consecutive 32 bit
767  * handler calls and merge the result afterwards.
768  * We do this in a little endian fashion regardless of the host's
769  * or guest's endianness, because the GIC is always LE and the rest of
770  * the code (vgic_reg_access) also puts it in a LE fashion already.
771  * At this point we have already identified the handle function, so
772  * range points to that one entry and offset is relative to this.
773  */
call_range_handler(struct kvm_vcpu * vcpu,struct kvm_exit_mmio * mmio,unsigned long offset,const struct vgic_io_range * range)774 static bool call_range_handler(struct kvm_vcpu *vcpu,
775 			       struct kvm_exit_mmio *mmio,
776 			       unsigned long offset,
777 			       const struct vgic_io_range *range)
778 {
779 	struct kvm_exit_mmio mmio32;
780 	bool ret;
781 
782 	if (likely(mmio->len <= 4))
783 		return range->handle_mmio(vcpu, mmio, offset);
784 
785 	/*
786 	 * Any access bigger than 4 bytes (that we currently handle in KVM)
787 	 * is actually 8 bytes long, caused by a 64-bit access
788 	 */
789 
790 	mmio32.len = 4;
791 	mmio32.is_write = mmio->is_write;
792 	mmio32.private = mmio->private;
793 
794 	mmio32.phys_addr = mmio->phys_addr + 4;
795 	mmio32.data = &((u32 *)mmio->data)[1];
796 	ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
797 
798 	mmio32.phys_addr = mmio->phys_addr;
799 	mmio32.data = &((u32 *)mmio->data)[0];
800 	ret |= range->handle_mmio(vcpu, &mmio32, offset);
801 
802 	return ret;
803 }
804 
805 /**
806  * vgic_handle_mmio_access - handle an in-kernel MMIO access
807  * This is called by the read/write KVM IO device wrappers below.
808  * @vcpu:	pointer to the vcpu performing the access
809  * @this:	pointer to the KVM IO device in charge
810  * @addr:	guest physical address of the access
811  * @len:	size of the access
812  * @val:	pointer to the data region
813  * @is_write:	read or write access
814  *
815  * returns true if the MMIO access could be performed
816  */
vgic_handle_mmio_access(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * val,bool is_write)817 static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
818 				   struct kvm_io_device *this, gpa_t addr,
819 				   int len, void *val, bool is_write)
820 {
821 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
822 	struct vgic_io_device *iodev = container_of(this,
823 						    struct vgic_io_device, dev);
824 	const struct vgic_io_range *range;
825 	struct kvm_exit_mmio mmio;
826 	bool updated_state;
827 	gpa_t offset;
828 
829 	offset = addr - iodev->addr;
830 	range = vgic_find_range(iodev->reg_ranges, len, offset);
831 	if (unlikely(!range || !range->handle_mmio)) {
832 		pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
833 		return -ENXIO;
834 	}
835 
836 	mmio.phys_addr = addr;
837 	mmio.len = len;
838 	mmio.is_write = is_write;
839 	mmio.data = val;
840 	mmio.private = iodev->redist_vcpu;
841 
842 	spin_lock(&dist->lock);
843 	offset -= range->base;
844 	if (vgic_validate_access(dist, range, offset)) {
845 		updated_state = call_range_handler(vcpu, &mmio, offset, range);
846 	} else {
847 		if (!is_write)
848 			memset(val, 0, len);
849 		updated_state = false;
850 	}
851 	spin_unlock(&dist->lock);
852 
853 	if (updated_state)
854 		vgic_kick_vcpus(vcpu->kvm);
855 
856 	return 0;
857 }
858 
vgic_handle_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * val)859 static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
860 				 struct kvm_io_device *this,
861 				 gpa_t addr, int len, void *val)
862 {
863 	return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
864 }
865 
vgic_handle_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)866 static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
867 				  struct kvm_io_device *this,
868 				  gpa_t addr, int len, const void *val)
869 {
870 	return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
871 				       true);
872 }
873 
874 struct kvm_io_device_ops vgic_io_ops = {
875 	.read	= vgic_handle_mmio_read,
876 	.write	= vgic_handle_mmio_write,
877 };
878 
879 /**
880  * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
881  * @kvm:            The VM structure pointer
882  * @base:           The (guest) base address for the register frame
883  * @len:            Length of the register frame window
884  * @ranges:         Describing the handler functions for each register
885  * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
886  * @iodev:          Points to memory to be passed on to the handler
887  *
888  * @iodev stores the parameters of this function to be usable by the handler
889  * respectively the dispatcher function (since the KVM I/O bus framework lacks
890  * an opaque parameter). Initialization is done in this function, but the
891  * reference should be valid and unique for the whole VGIC lifetime.
892  * If the register frame is not mapped for a specific VCPU, pass -1 to
893  * @redist_vcpu_id.
894  */
vgic_register_kvm_io_dev(struct kvm * kvm,gpa_t base,int len,const struct vgic_io_range * ranges,int redist_vcpu_id,struct vgic_io_device * iodev)895 int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
896 			     const struct vgic_io_range *ranges,
897 			     int redist_vcpu_id,
898 			     struct vgic_io_device *iodev)
899 {
900 	struct kvm_vcpu *vcpu = NULL;
901 	int ret;
902 
903 	if (redist_vcpu_id >= 0)
904 		vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
905 
906 	iodev->addr		= base;
907 	iodev->len		= len;
908 	iodev->reg_ranges	= ranges;
909 	iodev->redist_vcpu	= vcpu;
910 
911 	kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
912 
913 	mutex_lock(&kvm->slots_lock);
914 
915 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
916 				      &iodev->dev);
917 	mutex_unlock(&kvm->slots_lock);
918 
919 	/* Mark the iodev as invalid if registration fails. */
920 	if (ret)
921 		iodev->dev.ops = NULL;
922 
923 	return ret;
924 }
925 
vgic_nr_shared_irqs(struct vgic_dist * dist)926 static int vgic_nr_shared_irqs(struct vgic_dist *dist)
927 {
928 	return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
929 }
930 
compute_active_for_cpu(struct kvm_vcpu * vcpu)931 static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
932 {
933 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
934 	unsigned long *active, *enabled, *act_percpu, *act_shared;
935 	unsigned long active_private, active_shared;
936 	int nr_shared = vgic_nr_shared_irqs(dist);
937 	int vcpu_id;
938 
939 	vcpu_id = vcpu->vcpu_id;
940 	act_percpu = vcpu->arch.vgic_cpu.active_percpu;
941 	act_shared = vcpu->arch.vgic_cpu.active_shared;
942 
943 	active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
944 	enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
945 	bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
946 
947 	active = vgic_bitmap_get_shared_map(&dist->irq_active);
948 	enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
949 	bitmap_and(act_shared, active, enabled, nr_shared);
950 	bitmap_and(act_shared, act_shared,
951 		   vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
952 		   nr_shared);
953 
954 	active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
955 	active_shared = find_first_bit(act_shared, nr_shared);
956 
957 	return (active_private < VGIC_NR_PRIVATE_IRQS ||
958 		active_shared < nr_shared);
959 }
960 
compute_pending_for_cpu(struct kvm_vcpu * vcpu)961 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
962 {
963 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
964 	unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
965 	unsigned long pending_private, pending_shared;
966 	int nr_shared = vgic_nr_shared_irqs(dist);
967 	int vcpu_id;
968 
969 	vcpu_id = vcpu->vcpu_id;
970 	pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
971 	pend_shared = vcpu->arch.vgic_cpu.pending_shared;
972 
973 	if (!dist->enabled) {
974 		bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
975 		bitmap_zero(pend_shared, nr_shared);
976 		return 0;
977 	}
978 
979 	pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
980 	enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
981 	bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
982 
983 	pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
984 	enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
985 	bitmap_and(pend_shared, pending, enabled, nr_shared);
986 	bitmap_and(pend_shared, pend_shared,
987 		   vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
988 		   nr_shared);
989 
990 	pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
991 	pending_shared = find_first_bit(pend_shared, nr_shared);
992 	return (pending_private < VGIC_NR_PRIVATE_IRQS ||
993 		pending_shared < vgic_nr_shared_irqs(dist));
994 }
995 
996 /*
997  * Update the interrupt state and determine which CPUs have pending
998  * or active interrupts. Must be called with distributor lock held.
999  */
vgic_update_state(struct kvm * kvm)1000 void vgic_update_state(struct kvm *kvm)
1001 {
1002 	struct vgic_dist *dist = &kvm->arch.vgic;
1003 	struct kvm_vcpu *vcpu;
1004 	int c;
1005 
1006 	kvm_for_each_vcpu(c, vcpu, kvm) {
1007 		if (compute_pending_for_cpu(vcpu))
1008 			set_bit(c, dist->irq_pending_on_cpu);
1009 
1010 		if (compute_active_for_cpu(vcpu))
1011 			set_bit(c, dist->irq_active_on_cpu);
1012 		else
1013 			clear_bit(c, dist->irq_active_on_cpu);
1014 	}
1015 }
1016 
vgic_get_lr(const struct kvm_vcpu * vcpu,int lr)1017 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1018 {
1019 	return vgic_ops->get_lr(vcpu, lr);
1020 }
1021 
vgic_set_lr(struct kvm_vcpu * vcpu,int lr,struct vgic_lr vlr)1022 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1023 			       struct vgic_lr vlr)
1024 {
1025 	vgic_ops->set_lr(vcpu, lr, vlr);
1026 }
1027 
vgic_get_elrsr(struct kvm_vcpu * vcpu)1028 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1029 {
1030 	return vgic_ops->get_elrsr(vcpu);
1031 }
1032 
vgic_get_eisr(struct kvm_vcpu * vcpu)1033 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1034 {
1035 	return vgic_ops->get_eisr(vcpu);
1036 }
1037 
vgic_clear_eisr(struct kvm_vcpu * vcpu)1038 static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
1039 {
1040 	vgic_ops->clear_eisr(vcpu);
1041 }
1042 
vgic_get_interrupt_status(struct kvm_vcpu * vcpu)1043 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1044 {
1045 	return vgic_ops->get_interrupt_status(vcpu);
1046 }
1047 
vgic_enable_underflow(struct kvm_vcpu * vcpu)1048 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1049 {
1050 	vgic_ops->enable_underflow(vcpu);
1051 }
1052 
vgic_disable_underflow(struct kvm_vcpu * vcpu)1053 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1054 {
1055 	vgic_ops->disable_underflow(vcpu);
1056 }
1057 
vgic_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)1058 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1059 {
1060 	vgic_ops->get_vmcr(vcpu, vmcr);
1061 }
1062 
vgic_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)1063 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1064 {
1065 	vgic_ops->set_vmcr(vcpu, vmcr);
1066 }
1067 
vgic_enable(struct kvm_vcpu * vcpu)1068 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1069 {
1070 	vgic_ops->enable(vcpu);
1071 }
1072 
vgic_retire_lr(int lr_nr,struct kvm_vcpu * vcpu)1073 static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
1074 {
1075 	struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1076 
1077 	vgic_irq_clear_queued(vcpu, vlr.irq);
1078 
1079 	/*
1080 	 * We must transfer the pending state back to the distributor before
1081 	 * retiring the LR, otherwise we may loose edge-triggered interrupts.
1082 	 */
1083 	if (vlr.state & LR_STATE_PENDING) {
1084 		vgic_dist_irq_set_pending(vcpu, vlr.irq);
1085 		vlr.hwirq = 0;
1086 	}
1087 
1088 	vlr.state = 0;
1089 	vgic_set_lr(vcpu, lr_nr, vlr);
1090 }
1091 
dist_active_irq(struct kvm_vcpu * vcpu)1092 static bool dist_active_irq(struct kvm_vcpu *vcpu)
1093 {
1094 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1095 
1096 	return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1097 }
1098 
kvm_vgic_map_is_active(struct kvm_vcpu * vcpu,struct irq_phys_map * map)1099 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1100 {
1101 	int i;
1102 
1103 	for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
1104 		struct vgic_lr vlr = vgic_get_lr(vcpu, i);
1105 
1106 		if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
1107 			return true;
1108 	}
1109 
1110 	return vgic_irq_is_active(vcpu, map->virt_irq);
1111 }
1112 
1113 /*
1114  * An interrupt may have been disabled after being made pending on the
1115  * CPU interface (the classic case is a timer running while we're
1116  * rebooting the guest - the interrupt would kick as soon as the CPU
1117  * interface gets enabled, with deadly consequences).
1118  *
1119  * The solution is to examine already active LRs, and check the
1120  * interrupt is still enabled. If not, just retire it.
1121  */
vgic_retire_disabled_irqs(struct kvm_vcpu * vcpu)1122 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1123 {
1124 	u64 elrsr = vgic_get_elrsr(vcpu);
1125 	unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
1126 	int lr;
1127 
1128 	for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
1129 		struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1130 
1131 		if (!vgic_irq_is_enabled(vcpu, vlr.irq))
1132 			vgic_retire_lr(lr, vcpu);
1133 	}
1134 }
1135 
vgic_queue_irq_to_lr(struct kvm_vcpu * vcpu,int irq,int lr_nr,struct vgic_lr vlr)1136 static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1137 				 int lr_nr, struct vgic_lr vlr)
1138 {
1139 	if (vgic_irq_is_active(vcpu, irq)) {
1140 		vlr.state |= LR_STATE_ACTIVE;
1141 		kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1142 		vgic_irq_clear_active(vcpu, irq);
1143 		vgic_update_state(vcpu->kvm);
1144 	} else {
1145 		WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
1146 		vlr.state |= LR_STATE_PENDING;
1147 		kvm_debug("Set pending: 0x%x\n", vlr.state);
1148 	}
1149 
1150 	if (!vgic_irq_is_edge(vcpu, irq))
1151 		vlr.state |= LR_EOI_INT;
1152 
1153 	if (vlr.irq >= VGIC_NR_SGIS) {
1154 		struct irq_phys_map *map;
1155 		map = vgic_irq_map_search(vcpu, irq);
1156 
1157 		if (map) {
1158 			vlr.hwirq = map->phys_irq;
1159 			vlr.state |= LR_HW;
1160 			vlr.state &= ~LR_EOI_INT;
1161 
1162 			/*
1163 			 * Make sure we're not going to sample this
1164 			 * again, as a HW-backed interrupt cannot be
1165 			 * in the PENDING_ACTIVE stage.
1166 			 */
1167 			vgic_irq_set_queued(vcpu, irq);
1168 		}
1169 	}
1170 
1171 	vgic_set_lr(vcpu, lr_nr, vlr);
1172 }
1173 
1174 /*
1175  * Queue an interrupt to a CPU virtual interface. Return true on success,
1176  * or false if it wasn't possible to queue it.
1177  * sgi_source must be zero for any non-SGI interrupts.
1178  */
vgic_queue_irq(struct kvm_vcpu * vcpu,u8 sgi_source_id,int irq)1179 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1180 {
1181 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1182 	u64 elrsr = vgic_get_elrsr(vcpu);
1183 	unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
1184 	struct vgic_lr vlr;
1185 	int lr;
1186 
1187 	/* Sanitize the input... */
1188 	BUG_ON(sgi_source_id & ~7);
1189 	BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1190 	BUG_ON(irq >= dist->nr_irqs);
1191 
1192 	kvm_debug("Queue IRQ%d\n", irq);
1193 
1194 	/* Do we have an active interrupt for the same CPUID? */
1195 	for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
1196 		vlr = vgic_get_lr(vcpu, lr);
1197 		if (vlr.irq == irq && vlr.source == sgi_source_id) {
1198 			kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1199 			vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1200 			return true;
1201 		}
1202 	}
1203 
1204 	/* Try to use another LR for this interrupt */
1205 	lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
1206 	if (lr >= vgic->nr_lr)
1207 		return false;
1208 
1209 	kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1210 
1211 	vlr.irq = irq;
1212 	vlr.source = sgi_source_id;
1213 	vlr.state = 0;
1214 	vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1215 
1216 	return true;
1217 }
1218 
vgic_queue_hwirq(struct kvm_vcpu * vcpu,int irq)1219 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1220 {
1221 	if (!vgic_can_sample_irq(vcpu, irq))
1222 		return true; /* level interrupt, already queued */
1223 
1224 	if (vgic_queue_irq(vcpu, 0, irq)) {
1225 		if (vgic_irq_is_edge(vcpu, irq)) {
1226 			vgic_dist_irq_clear_pending(vcpu, irq);
1227 			vgic_cpu_irq_clear(vcpu, irq);
1228 		} else {
1229 			vgic_irq_set_queued(vcpu, irq);
1230 		}
1231 
1232 		return true;
1233 	}
1234 
1235 	return false;
1236 }
1237 
1238 /*
1239  * Fill the list registers with pending interrupts before running the
1240  * guest.
1241  */
__kvm_vgic_flush_hwstate(struct kvm_vcpu * vcpu)1242 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1243 {
1244 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1245 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1246 	unsigned long *pa_percpu, *pa_shared;
1247 	int i, vcpu_id;
1248 	int overflow = 0;
1249 	int nr_shared = vgic_nr_shared_irqs(dist);
1250 
1251 	vcpu_id = vcpu->vcpu_id;
1252 
1253 	pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
1254 	pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
1255 
1256 	bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
1257 		  VGIC_NR_PRIVATE_IRQS);
1258 	bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
1259 		  nr_shared);
1260 	/*
1261 	 * We may not have any pending interrupt, or the interrupts
1262 	 * may have been serviced from another vcpu. In all cases,
1263 	 * move along.
1264 	 */
1265 	if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
1266 		goto epilog;
1267 
1268 	/* SGIs */
1269 	for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
1270 		if (!queue_sgi(vcpu, i))
1271 			overflow = 1;
1272 	}
1273 
1274 	/* PPIs */
1275 	for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
1276 		if (!vgic_queue_hwirq(vcpu, i))
1277 			overflow = 1;
1278 	}
1279 
1280 	/* SPIs */
1281 	for_each_set_bit(i, pa_shared, nr_shared) {
1282 		if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1283 			overflow = 1;
1284 	}
1285 
1286 
1287 
1288 
1289 epilog:
1290 	if (overflow) {
1291 		vgic_enable_underflow(vcpu);
1292 	} else {
1293 		vgic_disable_underflow(vcpu);
1294 		/*
1295 		 * We're about to run this VCPU, and we've consumed
1296 		 * everything the distributor had in store for
1297 		 * us. Claim we don't have anything pending. We'll
1298 		 * adjust that if needed while exiting.
1299 		 */
1300 		clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1301 	}
1302 }
1303 
process_queued_irq(struct kvm_vcpu * vcpu,int lr,struct vgic_lr vlr)1304 static int process_queued_irq(struct kvm_vcpu *vcpu,
1305 				   int lr, struct vgic_lr vlr)
1306 {
1307 	int pending = 0;
1308 
1309 	/*
1310 	 * If the IRQ was EOIed (called from vgic_process_maintenance) or it
1311 	 * went from active to non-active (called from vgic_sync_hwirq) it was
1312 	 * also ACKed and we we therefore assume we can clear the soft pending
1313 	 * state (should it had been set) for this interrupt.
1314 	 *
1315 	 * Note: if the IRQ soft pending state was set after the IRQ was
1316 	 * acked, it actually shouldn't be cleared, but we have no way of
1317 	 * knowing that unless we start trapping ACKs when the soft-pending
1318 	 * state is set.
1319 	 */
1320 	vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1321 
1322 	/*
1323 	 * Tell the gic to start sampling this interrupt again.
1324 	 */
1325 	vgic_irq_clear_queued(vcpu, vlr.irq);
1326 
1327 	/* Any additional pending interrupt? */
1328 	if (vgic_irq_is_edge(vcpu, vlr.irq)) {
1329 		BUG_ON(!(vlr.state & LR_HW));
1330 		pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
1331 	} else {
1332 		if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1333 			vgic_cpu_irq_set(vcpu, vlr.irq);
1334 			pending = 1;
1335 		} else {
1336 			vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1337 			vgic_cpu_irq_clear(vcpu, vlr.irq);
1338 		}
1339 	}
1340 
1341 	/*
1342 	 * Despite being EOIed, the LR may not have
1343 	 * been marked as empty.
1344 	 */
1345 	vlr.state = 0;
1346 	vlr.hwirq = 0;
1347 	vgic_set_lr(vcpu, lr, vlr);
1348 
1349 	return pending;
1350 }
1351 
vgic_process_maintenance(struct kvm_vcpu * vcpu)1352 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1353 {
1354 	u32 status = vgic_get_interrupt_status(vcpu);
1355 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1356 	struct kvm *kvm = vcpu->kvm;
1357 	int level_pending = 0;
1358 
1359 	kvm_debug("STATUS = %08x\n", status);
1360 
1361 	if (status & INT_STATUS_EOI) {
1362 		/*
1363 		 * Some level interrupts have been EOIed. Clear their
1364 		 * active bit.
1365 		 */
1366 		u64 eisr = vgic_get_eisr(vcpu);
1367 		unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
1368 		int lr;
1369 
1370 		for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1371 			struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1372 
1373 			WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1374 			WARN_ON(vlr.state & LR_STATE_MASK);
1375 
1376 
1377 			/*
1378 			 * kvm_notify_acked_irq calls kvm_set_irq()
1379 			 * to reset the IRQ level, which grabs the dist->lock
1380 			 * so we call this before taking the dist->lock.
1381 			 */
1382 			kvm_notify_acked_irq(kvm, 0,
1383 					     vlr.irq - VGIC_NR_PRIVATE_IRQS);
1384 
1385 			spin_lock(&dist->lock);
1386 			level_pending |= process_queued_irq(vcpu, lr, vlr);
1387 			spin_unlock(&dist->lock);
1388 		}
1389 	}
1390 
1391 	if (status & INT_STATUS_UNDERFLOW)
1392 		vgic_disable_underflow(vcpu);
1393 
1394 	/*
1395 	 * In the next iterations of the vcpu loop, if we sync the vgic state
1396 	 * after flushing it, but before entering the guest (this happens for
1397 	 * pending signals and vmid rollovers), then make sure we don't pick
1398 	 * up any old maintenance interrupts here.
1399 	 */
1400 	vgic_clear_eisr(vcpu);
1401 
1402 	return level_pending;
1403 }
1404 
1405 /*
1406  * Save the physical active state, and reset it to inactive.
1407  *
1408  * Return true if there's a pending forwarded interrupt to queue.
1409  */
vgic_sync_hwirq(struct kvm_vcpu * vcpu,int lr,struct vgic_lr vlr)1410 static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1411 {
1412 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1413 	bool level_pending;
1414 
1415 	if (!(vlr.state & LR_HW))
1416 		return false;
1417 
1418 	if (vlr.state & LR_STATE_ACTIVE)
1419 		return false;
1420 
1421 	spin_lock(&dist->lock);
1422 	level_pending = process_queued_irq(vcpu, lr, vlr);
1423 	spin_unlock(&dist->lock);
1424 	return level_pending;
1425 }
1426 
1427 /* Sync back the VGIC state after a guest run */
__kvm_vgic_sync_hwstate(struct kvm_vcpu * vcpu)1428 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1429 {
1430 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1431 	u64 elrsr;
1432 	unsigned long *elrsr_ptr;
1433 	int lr, pending;
1434 	bool level_pending;
1435 
1436 	level_pending = vgic_process_maintenance(vcpu);
1437 
1438 	/* Deal with HW interrupts, and clear mappings for empty LRs */
1439 	for (lr = 0; lr < vgic->nr_lr; lr++) {
1440 		struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1441 
1442 		level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
1443 		BUG_ON(vlr.irq >= dist->nr_irqs);
1444 	}
1445 
1446 	/* Check if we still have something up our sleeve... */
1447 	elrsr = vgic_get_elrsr(vcpu);
1448 	elrsr_ptr = u64_to_bitmask(&elrsr);
1449 	pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1450 	if (level_pending || pending < vgic->nr_lr)
1451 		set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1452 }
1453 
kvm_vgic_flush_hwstate(struct kvm_vcpu * vcpu)1454 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1455 {
1456 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1457 
1458 	if (!irqchip_in_kernel(vcpu->kvm))
1459 		return;
1460 
1461 	spin_lock(&dist->lock);
1462 	__kvm_vgic_flush_hwstate(vcpu);
1463 	spin_unlock(&dist->lock);
1464 }
1465 
kvm_vgic_sync_hwstate(struct kvm_vcpu * vcpu)1466 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1467 {
1468 	if (!irqchip_in_kernel(vcpu->kvm))
1469 		return;
1470 
1471 	__kvm_vgic_sync_hwstate(vcpu);
1472 }
1473 
kvm_vgic_vcpu_pending_irq(struct kvm_vcpu * vcpu)1474 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1475 {
1476 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1477 
1478 	if (!irqchip_in_kernel(vcpu->kvm))
1479 		return 0;
1480 
1481 	return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1482 }
1483 
vgic_kick_vcpus(struct kvm * kvm)1484 void vgic_kick_vcpus(struct kvm *kvm)
1485 {
1486 	struct kvm_vcpu *vcpu;
1487 	int c;
1488 
1489 	/*
1490 	 * We've injected an interrupt, time to find out who deserves
1491 	 * a good kick...
1492 	 */
1493 	kvm_for_each_vcpu(c, vcpu, kvm) {
1494 		if (kvm_vgic_vcpu_pending_irq(vcpu))
1495 			kvm_vcpu_kick(vcpu);
1496 	}
1497 }
1498 
vgic_validate_injection(struct kvm_vcpu * vcpu,int irq,int level)1499 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1500 {
1501 	int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1502 
1503 	/*
1504 	 * Only inject an interrupt if:
1505 	 * - edge triggered and we have a rising edge
1506 	 * - level triggered and we change level
1507 	 */
1508 	if (edge_triggered) {
1509 		int state = vgic_dist_irq_is_pending(vcpu, irq);
1510 		return level > state;
1511 	} else {
1512 		int state = vgic_dist_irq_get_level(vcpu, irq);
1513 		return level != state;
1514 	}
1515 }
1516 
vgic_update_irq_pending(struct kvm * kvm,int cpuid,struct irq_phys_map * map,unsigned int irq_num,bool level)1517 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1518 				   struct irq_phys_map *map,
1519 				   unsigned int irq_num, bool level)
1520 {
1521 	struct vgic_dist *dist = &kvm->arch.vgic;
1522 	struct kvm_vcpu *vcpu;
1523 	int edge_triggered, level_triggered;
1524 	int enabled;
1525 	bool ret = true, can_inject = true;
1526 
1527 	trace_vgic_update_irq_pending(cpuid, irq_num, level);
1528 
1529 	if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
1530 		return -EINVAL;
1531 
1532 	spin_lock(&dist->lock);
1533 
1534 	vcpu = kvm_get_vcpu(kvm, cpuid);
1535 	edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1536 	level_triggered = !edge_triggered;
1537 
1538 	if (!vgic_validate_injection(vcpu, irq_num, level)) {
1539 		ret = false;
1540 		goto out;
1541 	}
1542 
1543 	if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1544 		cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1545 		if (cpuid == VCPU_NOT_ALLOCATED) {
1546 			/* Pretend we use CPU0, and prevent injection */
1547 			cpuid = 0;
1548 			can_inject = false;
1549 		}
1550 		vcpu = kvm_get_vcpu(kvm, cpuid);
1551 	}
1552 
1553 	kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1554 
1555 	if (level) {
1556 		if (level_triggered)
1557 			vgic_dist_irq_set_level(vcpu, irq_num);
1558 		vgic_dist_irq_set_pending(vcpu, irq_num);
1559 	} else {
1560 		if (level_triggered) {
1561 			vgic_dist_irq_clear_level(vcpu, irq_num);
1562 			if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
1563 				vgic_dist_irq_clear_pending(vcpu, irq_num);
1564 				vgic_cpu_irq_clear(vcpu, irq_num);
1565 				if (!compute_pending_for_cpu(vcpu))
1566 					clear_bit(cpuid, dist->irq_pending_on_cpu);
1567 			}
1568 		}
1569 
1570 		ret = false;
1571 		goto out;
1572 	}
1573 
1574 	enabled = vgic_irq_is_enabled(vcpu, irq_num);
1575 
1576 	if (!enabled || !can_inject) {
1577 		ret = false;
1578 		goto out;
1579 	}
1580 
1581 	if (!vgic_can_sample_irq(vcpu, irq_num)) {
1582 		/*
1583 		 * Level interrupt in progress, will be picked up
1584 		 * when EOId.
1585 		 */
1586 		ret = false;
1587 		goto out;
1588 	}
1589 
1590 	if (level) {
1591 		vgic_cpu_irq_set(vcpu, irq_num);
1592 		set_bit(cpuid, dist->irq_pending_on_cpu);
1593 	}
1594 
1595 out:
1596 	spin_unlock(&dist->lock);
1597 
1598 	if (ret) {
1599 		/* kick the specified vcpu */
1600 		kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
1601 	}
1602 
1603 	return 0;
1604 }
1605 
vgic_lazy_init(struct kvm * kvm)1606 static int vgic_lazy_init(struct kvm *kvm)
1607 {
1608 	int ret = 0;
1609 
1610 	if (unlikely(!vgic_initialized(kvm))) {
1611 		/*
1612 		 * We only provide the automatic initialization of the VGIC
1613 		 * for the legacy case of a GICv2. Any other type must
1614 		 * be explicitly initialized once setup with the respective
1615 		 * KVM device call.
1616 		 */
1617 		if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
1618 			return -EBUSY;
1619 
1620 		mutex_lock(&kvm->lock);
1621 		ret = vgic_init(kvm);
1622 		mutex_unlock(&kvm->lock);
1623 	}
1624 
1625 	return ret;
1626 }
1627 
1628 /**
1629  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1630  * @kvm:     The VM structure pointer
1631  * @cpuid:   The CPU for PPIs
1632  * @irq_num: The IRQ number that is assigned to the device. This IRQ
1633  *           must not be mapped to a HW interrupt.
1634  * @level:   Edge-triggered:  true:  to trigger the interrupt
1635  *			      false: to ignore the call
1636  *	     Level-sensitive  true:  raise the input signal
1637  *			      false: lower the input signal
1638  *
1639  * The GIC is not concerned with devices being active-LOW or active-HIGH for
1640  * level-sensitive interrupts.  You can think of the level parameter as 1
1641  * being HIGH and 0 being LOW and all devices being active-HIGH.
1642  */
kvm_vgic_inject_irq(struct kvm * kvm,int cpuid,unsigned int irq_num,bool level)1643 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1644 			bool level)
1645 {
1646 	struct irq_phys_map *map;
1647 	int ret;
1648 
1649 	ret = vgic_lazy_init(kvm);
1650 	if (ret)
1651 		return ret;
1652 
1653 	map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
1654 	if (map)
1655 		return -EINVAL;
1656 
1657 	return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
1658 }
1659 
1660 /**
1661  * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
1662  * @kvm:     The VM structure pointer
1663  * @cpuid:   The CPU for PPIs
1664  * @map:     Pointer to a irq_phys_map structure describing the mapping
1665  * @level:   Edge-triggered:  true:  to trigger the interrupt
1666  *			      false: to ignore the call
1667  *	     Level-sensitive  true:  raise the input signal
1668  *			      false: lower the input signal
1669  *
1670  * The GIC is not concerned with devices being active-LOW or active-HIGH for
1671  * level-sensitive interrupts.  You can think of the level parameter as 1
1672  * being HIGH and 0 being LOW and all devices being active-HIGH.
1673  */
kvm_vgic_inject_mapped_irq(struct kvm * kvm,int cpuid,struct irq_phys_map * map,bool level)1674 int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
1675 			       struct irq_phys_map *map, bool level)
1676 {
1677 	int ret;
1678 
1679 	ret = vgic_lazy_init(kvm);
1680 	if (ret)
1681 		return ret;
1682 
1683 	return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
1684 }
1685 
vgic_maintenance_handler(int irq,void * data)1686 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1687 {
1688 	/*
1689 	 * We cannot rely on the vgic maintenance interrupt to be
1690 	 * delivered synchronously. This means we can only use it to
1691 	 * exit the VM, and we perform the handling of EOIed
1692 	 * interrupts on the exit path (see vgic_process_maintenance).
1693 	 */
1694 	return IRQ_HANDLED;
1695 }
1696 
vgic_get_irq_phys_map_list(struct kvm_vcpu * vcpu,int virt_irq)1697 static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
1698 						    int virt_irq)
1699 {
1700 	if (virt_irq < VGIC_NR_PRIVATE_IRQS)
1701 		return &vcpu->arch.vgic_cpu.irq_phys_map_list;
1702 	else
1703 		return &vcpu->kvm->arch.vgic.irq_phys_map_list;
1704 }
1705 
1706 /**
1707  * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
1708  * @vcpu: The VCPU pointer
1709  * @virt_irq: The virtual irq number
1710  * @irq: The Linux IRQ number
1711  *
1712  * Establish a mapping between a guest visible irq (@virt_irq) and a
1713  * Linux irq (@irq). On injection, @virt_irq will be associated with
1714  * the physical interrupt represented by @irq. This mapping can be
1715  * established multiple times as long as the parameters are the same.
1716  *
1717  * Returns a valid pointer on success, and an error pointer otherwise
1718  */
kvm_vgic_map_phys_irq(struct kvm_vcpu * vcpu,int virt_irq,int irq)1719 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
1720 					   int virt_irq, int irq)
1721 {
1722 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1723 	struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1724 	struct irq_phys_map *map;
1725 	struct irq_phys_map_entry *entry;
1726 	struct irq_desc *desc;
1727 	struct irq_data *data;
1728 	int phys_irq;
1729 
1730 	desc = irq_to_desc(irq);
1731 	if (!desc) {
1732 		kvm_err("%s: no interrupt descriptor\n", __func__);
1733 		return ERR_PTR(-EINVAL);
1734 	}
1735 
1736 	data = irq_desc_get_irq_data(desc);
1737 	while (data->parent_data)
1738 		data = data->parent_data;
1739 
1740 	phys_irq = data->hwirq;
1741 
1742 	/* Create a new mapping */
1743 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1744 	if (!entry)
1745 		return ERR_PTR(-ENOMEM);
1746 
1747 	spin_lock(&dist->irq_phys_map_lock);
1748 
1749 	/* Try to match an existing mapping */
1750 	map = vgic_irq_map_search(vcpu, virt_irq);
1751 	if (map) {
1752 		/* Make sure this mapping matches */
1753 		if (map->phys_irq != phys_irq	||
1754 		    map->irq      != irq)
1755 			map = ERR_PTR(-EINVAL);
1756 
1757 		/* Found an existing, valid mapping */
1758 		goto out;
1759 	}
1760 
1761 	map           = &entry->map;
1762 	map->virt_irq = virt_irq;
1763 	map->phys_irq = phys_irq;
1764 	map->irq      = irq;
1765 
1766 	list_add_tail_rcu(&entry->entry, root);
1767 
1768 out:
1769 	spin_unlock(&dist->irq_phys_map_lock);
1770 	/* If we've found a hit in the existing list, free the useless
1771 	 * entry */
1772 	if (IS_ERR(map) || map != &entry->map)
1773 		kfree(entry);
1774 	return map;
1775 }
1776 
vgic_irq_map_search(struct kvm_vcpu * vcpu,int virt_irq)1777 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
1778 						int virt_irq)
1779 {
1780 	struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1781 	struct irq_phys_map_entry *entry;
1782 	struct irq_phys_map *map;
1783 
1784 	rcu_read_lock();
1785 
1786 	list_for_each_entry_rcu(entry, root, entry) {
1787 		map = &entry->map;
1788 		if (map->virt_irq == virt_irq) {
1789 			rcu_read_unlock();
1790 			return map;
1791 		}
1792 	}
1793 
1794 	rcu_read_unlock();
1795 
1796 	return NULL;
1797 }
1798 
vgic_free_phys_irq_map_rcu(struct rcu_head * rcu)1799 static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
1800 {
1801 	struct irq_phys_map_entry *entry;
1802 
1803 	entry = container_of(rcu, struct irq_phys_map_entry, rcu);
1804 	kfree(entry);
1805 }
1806 
1807 /**
1808  * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
1809  * @vcpu: The VCPU pointer
1810  * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
1811  *
1812  * Remove an existing mapping between virtual and physical interrupts.
1813  */
kvm_vgic_unmap_phys_irq(struct kvm_vcpu * vcpu,struct irq_phys_map * map)1814 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1815 {
1816 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1817 	struct irq_phys_map_entry *entry;
1818 	struct list_head *root;
1819 
1820 	if (!map)
1821 		return -EINVAL;
1822 
1823 	root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
1824 
1825 	spin_lock(&dist->irq_phys_map_lock);
1826 
1827 	list_for_each_entry(entry, root, entry) {
1828 		if (&entry->map == map) {
1829 			list_del_rcu(&entry->entry);
1830 			call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1831 			break;
1832 		}
1833 	}
1834 
1835 	spin_unlock(&dist->irq_phys_map_lock);
1836 
1837 	return 0;
1838 }
1839 
vgic_destroy_irq_phys_map(struct kvm * kvm,struct list_head * root)1840 static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
1841 {
1842 	struct vgic_dist *dist = &kvm->arch.vgic;
1843 	struct irq_phys_map_entry *entry;
1844 
1845 	spin_lock(&dist->irq_phys_map_lock);
1846 
1847 	list_for_each_entry(entry, root, entry) {
1848 		list_del_rcu(&entry->entry);
1849 		call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1850 	}
1851 
1852 	spin_unlock(&dist->irq_phys_map_lock);
1853 }
1854 
kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)1855 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1856 {
1857 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1858 
1859 	kfree(vgic_cpu->pending_shared);
1860 	kfree(vgic_cpu->active_shared);
1861 	kfree(vgic_cpu->pend_act_shared);
1862 	vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
1863 	vgic_cpu->pending_shared = NULL;
1864 	vgic_cpu->active_shared = NULL;
1865 	vgic_cpu->pend_act_shared = NULL;
1866 }
1867 
vgic_vcpu_init_maps(struct kvm_vcpu * vcpu,int nr_irqs)1868 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1869 {
1870 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1871 	int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
1872 	int sz = nr_longs * sizeof(unsigned long);
1873 	vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1874 	vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1875 	vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
1876 
1877 	if (!vgic_cpu->pending_shared
1878 		|| !vgic_cpu->active_shared
1879 		|| !vgic_cpu->pend_act_shared) {
1880 		kvm_vgic_vcpu_destroy(vcpu);
1881 		return -ENOMEM;
1882 	}
1883 
1884 	/*
1885 	 * Store the number of LRs per vcpu, so we don't have to go
1886 	 * all the way to the distributor structure to find out. Only
1887 	 * assembly code should use this one.
1888 	 */
1889 	vgic_cpu->nr_lr = vgic->nr_lr;
1890 
1891 	return 0;
1892 }
1893 
1894 /**
1895  * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
1896  *
1897  * No memory allocation should be performed here, only static init.
1898  */
kvm_vgic_vcpu_early_init(struct kvm_vcpu * vcpu)1899 void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
1900 {
1901 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1902 	INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
1903 }
1904 
1905 /**
1906  * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
1907  *
1908  * The host's GIC naturally limits the maximum amount of VCPUs a guest
1909  * can use.
1910  */
kvm_vgic_get_max_vcpus(void)1911 int kvm_vgic_get_max_vcpus(void)
1912 {
1913 	return vgic->max_gic_vcpus;
1914 }
1915 
kvm_vgic_destroy(struct kvm * kvm)1916 void kvm_vgic_destroy(struct kvm *kvm)
1917 {
1918 	struct vgic_dist *dist = &kvm->arch.vgic;
1919 	struct kvm_vcpu *vcpu;
1920 	int i;
1921 
1922 	kvm_for_each_vcpu(i, vcpu, kvm)
1923 		kvm_vgic_vcpu_destroy(vcpu);
1924 
1925 	vgic_free_bitmap(&dist->irq_enabled);
1926 	vgic_free_bitmap(&dist->irq_level);
1927 	vgic_free_bitmap(&dist->irq_pending);
1928 	vgic_free_bitmap(&dist->irq_soft_pend);
1929 	vgic_free_bitmap(&dist->irq_queued);
1930 	vgic_free_bitmap(&dist->irq_cfg);
1931 	vgic_free_bytemap(&dist->irq_priority);
1932 	if (dist->irq_spi_target) {
1933 		for (i = 0; i < dist->nr_cpus; i++)
1934 			vgic_free_bitmap(&dist->irq_spi_target[i]);
1935 	}
1936 	kfree(dist->irq_sgi_sources);
1937 	kfree(dist->irq_spi_cpu);
1938 	kfree(dist->irq_spi_mpidr);
1939 	kfree(dist->irq_spi_target);
1940 	kfree(dist->irq_pending_on_cpu);
1941 	kfree(dist->irq_active_on_cpu);
1942 	vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
1943 	dist->irq_sgi_sources = NULL;
1944 	dist->irq_spi_cpu = NULL;
1945 	dist->irq_spi_target = NULL;
1946 	dist->irq_pending_on_cpu = NULL;
1947 	dist->irq_active_on_cpu = NULL;
1948 	dist->nr_cpus = 0;
1949 }
1950 
1951 /*
1952  * Allocate and initialize the various data structures. Must be called
1953  * with kvm->lock held!
1954  */
vgic_init(struct kvm * kvm)1955 int vgic_init(struct kvm *kvm)
1956 {
1957 	struct vgic_dist *dist = &kvm->arch.vgic;
1958 	struct kvm_vcpu *vcpu;
1959 	int nr_cpus, nr_irqs;
1960 	int ret, i, vcpu_id;
1961 
1962 	if (vgic_initialized(kvm))
1963 		return 0;
1964 
1965 	nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
1966 	if (!nr_cpus)		/* No vcpus? Can't be good... */
1967 		return -ENODEV;
1968 
1969 	/*
1970 	 * If nobody configured the number of interrupts, use the
1971 	 * legacy one.
1972 	 */
1973 	if (!dist->nr_irqs)
1974 		dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
1975 
1976 	nr_irqs = dist->nr_irqs;
1977 
1978 	ret  = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1979 	ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1980 	ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1981 	ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1982 	ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1983 	ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
1984 	ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1985 	ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1986 
1987 	if (ret)
1988 		goto out;
1989 
1990 	dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1991 	dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1992 	dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
1993 				       GFP_KERNEL);
1994 	dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1995 					   GFP_KERNEL);
1996 	dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1997 					   GFP_KERNEL);
1998 	if (!dist->irq_sgi_sources ||
1999 	    !dist->irq_spi_cpu ||
2000 	    !dist->irq_spi_target ||
2001 	    !dist->irq_pending_on_cpu ||
2002 	    !dist->irq_active_on_cpu) {
2003 		ret = -ENOMEM;
2004 		goto out;
2005 	}
2006 
2007 	for (i = 0; i < nr_cpus; i++)
2008 		ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
2009 					nr_cpus, nr_irqs);
2010 
2011 	if (ret)
2012 		goto out;
2013 
2014 	ret = kvm->arch.vgic.vm_ops.init_model(kvm);
2015 	if (ret)
2016 		goto out;
2017 
2018 	kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
2019 		ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
2020 		if (ret) {
2021 			kvm_err("VGIC: Failed to allocate vcpu memory\n");
2022 			break;
2023 		}
2024 
2025 		/*
2026 		 * Enable and configure all SGIs to be edge-triggere and
2027 		 * configure all PPIs as level-triggered.
2028 		 */
2029 		for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
2030 			if (i < VGIC_NR_SGIS) {
2031 				/* SGIs */
2032 				vgic_bitmap_set_irq_val(&dist->irq_enabled,
2033 							vcpu->vcpu_id, i, 1);
2034 				vgic_bitmap_set_irq_val(&dist->irq_cfg,
2035 							vcpu->vcpu_id, i,
2036 							VGIC_CFG_EDGE);
2037 			} else if (i < VGIC_NR_PRIVATE_IRQS) {
2038 				/* PPIs */
2039 				vgic_bitmap_set_irq_val(&dist->irq_cfg,
2040 							vcpu->vcpu_id, i,
2041 							VGIC_CFG_LEVEL);
2042 			}
2043 		}
2044 
2045 		vgic_enable(vcpu);
2046 	}
2047 
2048 out:
2049 	if (ret)
2050 		kvm_vgic_destroy(kvm);
2051 
2052 	return ret;
2053 }
2054 
init_vgic_model(struct kvm * kvm,int type)2055 static int init_vgic_model(struct kvm *kvm, int type)
2056 {
2057 	switch (type) {
2058 	case KVM_DEV_TYPE_ARM_VGIC_V2:
2059 		vgic_v2_init_emulation(kvm);
2060 		break;
2061 #ifdef CONFIG_KVM_ARM_VGIC_V3
2062 	case KVM_DEV_TYPE_ARM_VGIC_V3:
2063 		vgic_v3_init_emulation(kvm);
2064 		break;
2065 #endif
2066 	default:
2067 		return -ENODEV;
2068 	}
2069 
2070 	if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
2071 		return -E2BIG;
2072 
2073 	return 0;
2074 }
2075 
2076 /**
2077  * kvm_vgic_early_init - Earliest possible vgic initialization stage
2078  *
2079  * No memory allocation should be performed here, only static init.
2080  */
kvm_vgic_early_init(struct kvm * kvm)2081 void kvm_vgic_early_init(struct kvm *kvm)
2082 {
2083 	spin_lock_init(&kvm->arch.vgic.lock);
2084 	spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
2085 	INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
2086 }
2087 
kvm_vgic_create(struct kvm * kvm,u32 type)2088 int kvm_vgic_create(struct kvm *kvm, u32 type)
2089 {
2090 	int i, vcpu_lock_idx = -1, ret;
2091 	struct kvm_vcpu *vcpu;
2092 
2093 	mutex_lock(&kvm->lock);
2094 
2095 	if (irqchip_in_kernel(kvm)) {
2096 		ret = -EEXIST;
2097 		goto out;
2098 	}
2099 
2100 	/*
2101 	 * This function is also called by the KVM_CREATE_IRQCHIP handler,
2102 	 * which had no chance yet to check the availability of the GICv2
2103 	 * emulation. So check this here again. KVM_CREATE_DEVICE does
2104 	 * the proper checks already.
2105 	 */
2106 	if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
2107 		ret = -ENODEV;
2108 		goto out;
2109 	}
2110 
2111 	/*
2112 	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
2113 	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
2114 	 * that no other VCPUs are run while we create the vgic.
2115 	 */
2116 	ret = -EBUSY;
2117 	kvm_for_each_vcpu(i, vcpu, kvm) {
2118 		if (!mutex_trylock(&vcpu->mutex))
2119 			goto out_unlock;
2120 		vcpu_lock_idx = i;
2121 	}
2122 
2123 	kvm_for_each_vcpu(i, vcpu, kvm) {
2124 		if (vcpu->arch.has_run_once)
2125 			goto out_unlock;
2126 	}
2127 	ret = 0;
2128 
2129 	ret = init_vgic_model(kvm, type);
2130 	if (ret)
2131 		goto out_unlock;
2132 
2133 	kvm->arch.vgic.in_kernel = true;
2134 	kvm->arch.vgic.vgic_model = type;
2135 	kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
2136 	kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
2137 	kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
2138 	kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
2139 
2140 out_unlock:
2141 	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
2142 		vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
2143 		mutex_unlock(&vcpu->mutex);
2144 	}
2145 
2146 out:
2147 	mutex_unlock(&kvm->lock);
2148 	return ret;
2149 }
2150 
vgic_ioaddr_overlap(struct kvm * kvm)2151 static int vgic_ioaddr_overlap(struct kvm *kvm)
2152 {
2153 	phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
2154 	phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
2155 
2156 	if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
2157 		return 0;
2158 	if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
2159 	    (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
2160 		return -EBUSY;
2161 	return 0;
2162 }
2163 
vgic_ioaddr_assign(struct kvm * kvm,phys_addr_t * ioaddr,phys_addr_t addr,phys_addr_t size)2164 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
2165 			      phys_addr_t addr, phys_addr_t size)
2166 {
2167 	int ret;
2168 
2169 	if (addr & ~KVM_PHYS_MASK)
2170 		return -E2BIG;
2171 
2172 	if (addr & (SZ_4K - 1))
2173 		return -EINVAL;
2174 
2175 	if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
2176 		return -EEXIST;
2177 	if (addr + size < addr)
2178 		return -EINVAL;
2179 
2180 	*ioaddr = addr;
2181 	ret = vgic_ioaddr_overlap(kvm);
2182 	if (ret)
2183 		*ioaddr = VGIC_ADDR_UNDEF;
2184 
2185 	return ret;
2186 }
2187 
2188 /**
2189  * kvm_vgic_addr - set or get vgic VM base addresses
2190  * @kvm:   pointer to the vm struct
2191  * @type:  the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
2192  * @addr:  pointer to address value
2193  * @write: if true set the address in the VM address space, if false read the
2194  *          address
2195  *
2196  * Set or get the vgic base addresses for the distributor and the virtual CPU
2197  * interface in the VM physical address space.  These addresses are properties
2198  * of the emulated core/SoC and therefore user space initially knows this
2199  * information.
2200  */
kvm_vgic_addr(struct kvm * kvm,unsigned long type,u64 * addr,bool write)2201 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2202 {
2203 	int r = 0;
2204 	struct vgic_dist *vgic = &kvm->arch.vgic;
2205 	int type_needed;
2206 	phys_addr_t *addr_ptr, block_size;
2207 	phys_addr_t alignment;
2208 
2209 	mutex_lock(&kvm->lock);
2210 	switch (type) {
2211 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
2212 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2213 		addr_ptr = &vgic->vgic_dist_base;
2214 		block_size = KVM_VGIC_V2_DIST_SIZE;
2215 		alignment = SZ_4K;
2216 		break;
2217 	case KVM_VGIC_V2_ADDR_TYPE_CPU:
2218 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2219 		addr_ptr = &vgic->vgic_cpu_base;
2220 		block_size = KVM_VGIC_V2_CPU_SIZE;
2221 		alignment = SZ_4K;
2222 		break;
2223 #ifdef CONFIG_KVM_ARM_VGIC_V3
2224 	case KVM_VGIC_V3_ADDR_TYPE_DIST:
2225 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2226 		addr_ptr = &vgic->vgic_dist_base;
2227 		block_size = KVM_VGIC_V3_DIST_SIZE;
2228 		alignment = SZ_64K;
2229 		break;
2230 	case KVM_VGIC_V3_ADDR_TYPE_REDIST:
2231 		type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2232 		addr_ptr = &vgic->vgic_redist_base;
2233 		block_size = KVM_VGIC_V3_REDIST_SIZE;
2234 		alignment = SZ_64K;
2235 		break;
2236 #endif
2237 	default:
2238 		r = -ENODEV;
2239 		goto out;
2240 	}
2241 
2242 	if (vgic->vgic_model != type_needed) {
2243 		r = -ENODEV;
2244 		goto out;
2245 	}
2246 
2247 	if (write) {
2248 		if (!IS_ALIGNED(*addr, alignment))
2249 			r = -EINVAL;
2250 		else
2251 			r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
2252 					       block_size);
2253 	} else {
2254 		*addr = *addr_ptr;
2255 	}
2256 
2257 out:
2258 	mutex_unlock(&kvm->lock);
2259 	return r;
2260 }
2261 
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2262 int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2263 {
2264 	int r;
2265 
2266 	switch (attr->group) {
2267 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2268 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2269 		u64 addr;
2270 		unsigned long type = (unsigned long)attr->attr;
2271 
2272 		if (copy_from_user(&addr, uaddr, sizeof(addr)))
2273 			return -EFAULT;
2274 
2275 		r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2276 		return (r == -ENODEV) ? -ENXIO : r;
2277 	}
2278 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2279 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2280 		u32 val;
2281 		int ret = 0;
2282 
2283 		if (get_user(val, uaddr))
2284 			return -EFAULT;
2285 
2286 		/*
2287 		 * We require:
2288 		 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
2289 		 * - at most 1024 interrupts
2290 		 * - a multiple of 32 interrupts
2291 		 */
2292 		if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
2293 		    val > VGIC_MAX_IRQS ||
2294 		    (val & 31))
2295 			return -EINVAL;
2296 
2297 		mutex_lock(&dev->kvm->lock);
2298 
2299 		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2300 			ret = -EBUSY;
2301 		else
2302 			dev->kvm->arch.vgic.nr_irqs = val;
2303 
2304 		mutex_unlock(&dev->kvm->lock);
2305 
2306 		return ret;
2307 	}
2308 	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
2309 		switch (attr->attr) {
2310 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
2311 			r = vgic_init(dev->kvm);
2312 			return r;
2313 		}
2314 		break;
2315 	}
2316 	}
2317 
2318 	return -ENXIO;
2319 }
2320 
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2321 int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2322 {
2323 	int r = -ENXIO;
2324 
2325 	switch (attr->group) {
2326 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2327 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2328 		u64 addr;
2329 		unsigned long type = (unsigned long)attr->attr;
2330 
2331 		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2332 		if (r)
2333 			return (r == -ENODEV) ? -ENXIO : r;
2334 
2335 		if (copy_to_user(uaddr, &addr, sizeof(addr)))
2336 			return -EFAULT;
2337 		break;
2338 	}
2339 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2340 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2341 
2342 		r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
2343 		break;
2344 	}
2345 
2346 	}
2347 
2348 	return r;
2349 }
2350 
vgic_has_attr_regs(const struct vgic_io_range * ranges,phys_addr_t offset)2351 int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
2352 {
2353 	if (vgic_find_range(ranges, 4, offset))
2354 		return 0;
2355 	else
2356 		return -ENXIO;
2357 }
2358 
vgic_init_maintenance_interrupt(void * info)2359 static void vgic_init_maintenance_interrupt(void *info)
2360 {
2361 	enable_percpu_irq(vgic->maint_irq, 0);
2362 }
2363 
vgic_cpu_notify(struct notifier_block * self,unsigned long action,void * cpu)2364 static int vgic_cpu_notify(struct notifier_block *self,
2365 			   unsigned long action, void *cpu)
2366 {
2367 	switch (action) {
2368 	case CPU_STARTING:
2369 	case CPU_STARTING_FROZEN:
2370 		vgic_init_maintenance_interrupt(NULL);
2371 		break;
2372 	case CPU_DYING:
2373 	case CPU_DYING_FROZEN:
2374 		disable_percpu_irq(vgic->maint_irq);
2375 		break;
2376 	}
2377 
2378 	return NOTIFY_OK;
2379 }
2380 
2381 static struct notifier_block vgic_cpu_nb = {
2382 	.notifier_call = vgic_cpu_notify,
2383 };
2384 
2385 static const struct of_device_id vgic_ids[] = {
2386 	{ .compatible = "arm,cortex-a15-gic",	.data = vgic_v2_probe, },
2387 	{ .compatible = "arm,cortex-a7-gic",	.data = vgic_v2_probe, },
2388 	{ .compatible = "arm,gic-400",		.data = vgic_v2_probe, },
2389 	{ .compatible = "arm,gic-v3",		.data = vgic_v3_probe, },
2390 	{},
2391 };
2392 
kvm_vgic_hyp_init(void)2393 int kvm_vgic_hyp_init(void)
2394 {
2395 	const struct of_device_id *matched_id;
2396 	const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2397 				const struct vgic_params **);
2398 	struct device_node *vgic_node;
2399 	int ret;
2400 
2401 	vgic_node = of_find_matching_node_and_match(NULL,
2402 						    vgic_ids, &matched_id);
2403 	if (!vgic_node) {
2404 		kvm_err("error: no compatible GIC node found\n");
2405 		return -ENODEV;
2406 	}
2407 
2408 	vgic_probe = matched_id->data;
2409 	ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2410 	if (ret)
2411 		return ret;
2412 
2413 	ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2414 				 "vgic", kvm_get_running_vcpus());
2415 	if (ret) {
2416 		kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2417 		return ret;
2418 	}
2419 
2420 	ret = __register_cpu_notifier(&vgic_cpu_nb);
2421 	if (ret) {
2422 		kvm_err("Cannot register vgic CPU notifier\n");
2423 		goto out_free_irq;
2424 	}
2425 
2426 	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2427 
2428 	return 0;
2429 
2430 out_free_irq:
2431 	free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
2432 	return ret;
2433 }
2434 
kvm_irq_map_gsi(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * entries,int gsi)2435 int kvm_irq_map_gsi(struct kvm *kvm,
2436 		    struct kvm_kernel_irq_routing_entry *entries,
2437 		    int gsi)
2438 {
2439 	return 0;
2440 }
2441 
kvm_irq_map_chip_pin(struct kvm * kvm,unsigned irqchip,unsigned pin)2442 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
2443 {
2444 	return pin;
2445 }
2446 
kvm_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)2447 int kvm_set_irq(struct kvm *kvm, int irq_source_id,
2448 		u32 irq, int level, bool line_status)
2449 {
2450 	unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
2451 
2452 	trace_kvm_set_irq(irq, level, irq_source_id);
2453 
2454 	BUG_ON(!vgic_initialized(kvm));
2455 
2456 	return kvm_vgic_inject_irq(kvm, 0, spi, level);
2457 }
2458 
2459 /* MSI not implemented yet */
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)2460 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
2461 		struct kvm *kvm, int irq_source_id,
2462 		int level, bool line_status)
2463 {
2464 	return 0;
2465 }
2466