• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VGICv2 MMIO handling functions
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/irqchip/arm-gic.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
19 
20 #include "vgic.h"
21 #include "vgic-mmio.h"
22 
vgic_mmio_read_v2_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)23 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
24 					    gpa_t addr, unsigned int len)
25 {
26 	u32 value;
27 
28 	switch (addr & 0x0c) {
29 	case GIC_DIST_CTRL:
30 		value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
31 		break;
32 	case GIC_DIST_CTR:
33 		value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
34 		value = (value >> 5) - 1;
35 		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
36 		break;
37 	case GIC_DIST_IIDR:
38 		value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
39 		break;
40 	default:
41 		return 0;
42 	}
43 
44 	return value;
45 }
46 
vgic_mmio_write_v2_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)47 static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
48 				    gpa_t addr, unsigned int len,
49 				    unsigned long val)
50 {
51 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
52 	bool was_enabled = dist->enabled;
53 
54 	switch (addr & 0x0c) {
55 	case GIC_DIST_CTRL:
56 		dist->enabled = val & GICD_ENABLE;
57 		if (!was_enabled && dist->enabled)
58 			vgic_kick_vcpus(vcpu->kvm);
59 		break;
60 	case GIC_DIST_CTR:
61 	case GIC_DIST_IIDR:
62 		/* Nothing to do */
63 		return;
64 	}
65 }
66 
vgic_mmio_write_sgir(struct kvm_vcpu * source_vcpu,gpa_t addr,unsigned int len,unsigned long val)67 static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
68 				 gpa_t addr, unsigned int len,
69 				 unsigned long val)
70 {
71 	int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
72 	int intid = val & 0xf;
73 	int targets = (val >> 16) & 0xff;
74 	int mode = (val >> 24) & 0x03;
75 	int c;
76 	struct kvm_vcpu *vcpu;
77 
78 	switch (mode) {
79 	case 0x0:		/* as specified by targets */
80 		break;
81 	case 0x1:
82 		targets = (1U << nr_vcpus) - 1;			/* all, ... */
83 		targets &= ~(1U << source_vcpu->vcpu_id);	/* but self */
84 		break;
85 	case 0x2:		/* this very vCPU only */
86 		targets = (1U << source_vcpu->vcpu_id);
87 		break;
88 	case 0x3:		/* reserved */
89 		return;
90 	}
91 
92 	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
93 		struct vgic_irq *irq;
94 
95 		if (!(targets & (1U << c)))
96 			continue;
97 
98 		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
99 
100 		spin_lock(&irq->irq_lock);
101 		irq->pending = true;
102 		irq->source |= 1U << source_vcpu->vcpu_id;
103 
104 		vgic_queue_irq_unlock(source_vcpu->kvm, irq);
105 		vgic_put_irq(source_vcpu->kvm, irq);
106 	}
107 }
108 
vgic_mmio_read_target(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)109 static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
110 					   gpa_t addr, unsigned int len)
111 {
112 	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
113 	int i;
114 	u64 val = 0;
115 
116 	for (i = 0; i < len; i++) {
117 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
118 
119 		val |= (u64)irq->targets << (i * 8);
120 
121 		vgic_put_irq(vcpu->kvm, irq);
122 	}
123 
124 	return val;
125 }
126 
vgic_mmio_write_target(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)127 static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
128 				   gpa_t addr, unsigned int len,
129 				   unsigned long val)
130 {
131 	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
132 	int i;
133 
134 	/* GICD_ITARGETSR[0-7] are read-only */
135 	if (intid < VGIC_NR_PRIVATE_IRQS)
136 		return;
137 
138 	for (i = 0; i < len; i++) {
139 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
140 		int target;
141 
142 		spin_lock(&irq->irq_lock);
143 
144 		irq->targets = (val >> (i * 8)) & 0xff;
145 		target = irq->targets ? __ffs(irq->targets) : 0;
146 		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
147 
148 		spin_unlock(&irq->irq_lock);
149 		vgic_put_irq(vcpu->kvm, irq);
150 	}
151 }
152 
vgic_mmio_read_sgipend(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)153 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
154 					    gpa_t addr, unsigned int len)
155 {
156 	u32 intid = addr & 0x0f;
157 	int i;
158 	u64 val = 0;
159 
160 	for (i = 0; i < len; i++) {
161 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
162 
163 		val |= (u64)irq->source << (i * 8);
164 
165 		vgic_put_irq(vcpu->kvm, irq);
166 	}
167 	return val;
168 }
169 
vgic_mmio_write_sgipendc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)170 static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
171 				     gpa_t addr, unsigned int len,
172 				     unsigned long val)
173 {
174 	u32 intid = addr & 0x0f;
175 	int i;
176 
177 	for (i = 0; i < len; i++) {
178 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
179 
180 		spin_lock(&irq->irq_lock);
181 
182 		irq->source &= ~((val >> (i * 8)) & 0xff);
183 		if (!irq->source)
184 			irq->pending = false;
185 
186 		spin_unlock(&irq->irq_lock);
187 		vgic_put_irq(vcpu->kvm, irq);
188 	}
189 }
190 
vgic_mmio_write_sgipends(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)191 static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
192 				     gpa_t addr, unsigned int len,
193 				     unsigned long val)
194 {
195 	u32 intid = addr & 0x0f;
196 	int i;
197 
198 	for (i = 0; i < len; i++) {
199 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
200 
201 		spin_lock(&irq->irq_lock);
202 
203 		irq->source |= (val >> (i * 8)) & 0xff;
204 
205 		if (irq->source) {
206 			irq->pending = true;
207 			vgic_queue_irq_unlock(vcpu->kvm, irq);
208 		} else {
209 			spin_unlock(&irq->irq_lock);
210 		}
211 		vgic_put_irq(vcpu->kvm, irq);
212 	}
213 }
214 
vgic_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)215 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
216 {
217 	if (kvm_vgic_global_state.type == VGIC_V2)
218 		vgic_v2_set_vmcr(vcpu, vmcr);
219 	else
220 		vgic_v3_set_vmcr(vcpu, vmcr);
221 }
222 
vgic_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)223 static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
224 {
225 	if (kvm_vgic_global_state.type == VGIC_V2)
226 		vgic_v2_get_vmcr(vcpu, vmcr);
227 	else
228 		vgic_v3_get_vmcr(vcpu, vmcr);
229 }
230 
231 #define GICC_ARCH_VERSION_V2	0x2
232 
233 /* These are for userland accesses only, there is no guest-facing emulation. */
vgic_mmio_read_vcpuif(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)234 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
235 					   gpa_t addr, unsigned int len)
236 {
237 	struct vgic_vmcr vmcr;
238 	u32 val;
239 
240 	vgic_get_vmcr(vcpu, &vmcr);
241 
242 	switch (addr & 0xff) {
243 	case GIC_CPU_CTRL:
244 		val = vmcr.ctlr;
245 		break;
246 	case GIC_CPU_PRIMASK:
247 		val = vmcr.pmr;
248 		break;
249 	case GIC_CPU_BINPOINT:
250 		val = vmcr.bpr;
251 		break;
252 	case GIC_CPU_ALIAS_BINPOINT:
253 		val = vmcr.abpr;
254 		break;
255 	case GIC_CPU_IDENT:
256 		val = ((PRODUCT_ID_KVM << 20) |
257 		       (GICC_ARCH_VERSION_V2 << 16) |
258 		       IMPLEMENTER_ARM);
259 		break;
260 	default:
261 		return 0;
262 	}
263 
264 	return val;
265 }
266 
vgic_mmio_write_vcpuif(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)267 static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
268 				   gpa_t addr, unsigned int len,
269 				   unsigned long val)
270 {
271 	struct vgic_vmcr vmcr;
272 
273 	vgic_get_vmcr(vcpu, &vmcr);
274 
275 	switch (addr & 0xff) {
276 	case GIC_CPU_CTRL:
277 		vmcr.ctlr = val;
278 		break;
279 	case GIC_CPU_PRIMASK:
280 		vmcr.pmr = val;
281 		break;
282 	case GIC_CPU_BINPOINT:
283 		vmcr.bpr = val;
284 		break;
285 	case GIC_CPU_ALIAS_BINPOINT:
286 		vmcr.abpr = val;
287 		break;
288 	}
289 
290 	vgic_set_vmcr(vcpu, &vmcr);
291 }
292 
293 static const struct vgic_register_region vgic_v2_dist_registers[] = {
294 	REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
295 		vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
296 		VGIC_ACCESS_32bit),
297 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
298 		vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
299 		VGIC_ACCESS_32bit),
300 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
301 		vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
302 		VGIC_ACCESS_32bit),
303 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
304 		vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
305 		VGIC_ACCESS_32bit),
306 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
307 		vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
308 		VGIC_ACCESS_32bit),
309 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
310 		vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
311 		VGIC_ACCESS_32bit),
312 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
313 		vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
314 		VGIC_ACCESS_32bit),
315 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
316 		vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
317 		VGIC_ACCESS_32bit),
318 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
319 		vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
320 		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
321 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
322 		vgic_mmio_read_target, vgic_mmio_write_target, 8,
323 		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
324 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
325 		vgic_mmio_read_config, vgic_mmio_write_config, 2,
326 		VGIC_ACCESS_32bit),
327 	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
328 		vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
329 		VGIC_ACCESS_32bit),
330 	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
331 		vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
332 		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
333 	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
334 		vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
335 		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
336 };
337 
338 static const struct vgic_register_region vgic_v2_cpu_registers[] = {
339 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
340 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
341 		VGIC_ACCESS_32bit),
342 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
343 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
344 		VGIC_ACCESS_32bit),
345 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
346 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
347 		VGIC_ACCESS_32bit),
348 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
349 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
350 		VGIC_ACCESS_32bit),
351 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
352 		vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
353 		VGIC_ACCESS_32bit),
354 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
355 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
356 		VGIC_ACCESS_32bit),
357 };
358 
vgic_v2_init_dist_iodev(struct vgic_io_device * dev)359 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
360 {
361 	dev->regions = vgic_v2_dist_registers;
362 	dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
363 
364 	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
365 
366 	return SZ_4K;
367 }
368 
vgic_v2_has_attr_regs(struct kvm_device * dev,struct kvm_device_attr * attr)369 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
370 {
371 	int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
372 	const struct vgic_register_region *regions;
373 	gpa_t addr;
374 	int nr_regions, i, len;
375 
376 	addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
377 
378 	switch (attr->group) {
379 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
380 		regions = vgic_v2_dist_registers;
381 		nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
382 		break;
383 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
384 		regions = vgic_v2_cpu_registers;
385 		nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
386 		break;
387 	default:
388 		return -ENXIO;
389 	}
390 
391 	/* We only support aligned 32-bit accesses. */
392 	if (addr & 3)
393 		return -ENXIO;
394 
395 	for (i = 0; i < nr_regions; i++) {
396 		if (regions[i].bits_per_irq)
397 			len = (regions[i].bits_per_irq * nr_irqs) / 8;
398 		else
399 			len = regions[i].len;
400 
401 		if (regions[i].reg_offset <= addr &&
402 		    regions[i].reg_offset + len > addr)
403 			return 0;
404 	}
405 
406 	return -ENXIO;
407 }
408 
409 /*
410  * When userland tries to access the VGIC register handlers, we need to
411  * create a usable struct vgic_io_device to be passed to the handlers and we
412  * have to set up a buffer similar to what would have happened if a guest MMIO
413  * access occurred, including doing endian conversions on BE systems.
414  */
vgic_uaccess(struct kvm_vcpu * vcpu,struct vgic_io_device * dev,bool is_write,int offset,u32 * val)415 static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
416 			bool is_write, int offset, u32 *val)
417 {
418 	unsigned int len = 4;
419 	u8 buf[4];
420 	int ret;
421 
422 	if (is_write) {
423 		vgic_data_host_to_mmio_bus(buf, len, *val);
424 		ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf);
425 	} else {
426 		ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf);
427 		if (!ret)
428 			*val = vgic_data_mmio_bus_to_host(buf, len);
429 	}
430 
431 	return ret;
432 }
433 
vgic_v2_cpuif_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)434 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
435 			  int offset, u32 *val)
436 {
437 	struct vgic_io_device dev = {
438 		.regions = vgic_v2_cpu_registers,
439 		.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
440 		.iodev_type = IODEV_CPUIF,
441 	};
442 
443 	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
444 }
445 
vgic_v2_dist_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)446 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
447 			 int offset, u32 *val)
448 {
449 	struct vgic_io_device dev = {
450 		.regions = vgic_v2_dist_registers,
451 		.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
452 		.iodev_type = IODEV_DIST,
453 	};
454 
455 	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
456 }
457