• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program. If not, see <http://www.gnu.org/licenses/>.
13  */
14 
15 #include <linux/irqchip/arm-gic-v3.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <kvm/arm_vgic.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/kvm_asm.h>
21 
22 #include "vgic.h"
23 
vgic_v3_process_maintenance(struct kvm_vcpu * vcpu)24 void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
25 {
26 	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
27 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
28 
29 	if (cpuif->vgic_misr & ICH_MISR_EOI) {
30 		unsigned long eisr_bmap = cpuif->vgic_eisr;
31 		int lr;
32 
33 		for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
34 			u32 intid;
35 			u64 val = cpuif->vgic_lr[lr];
36 
37 			if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
38 				intid = val & ICH_LR_VIRTUAL_ID_MASK;
39 			else
40 				intid = val & GICH_LR_VIRTUALID;
41 
42 			WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
43 
44 			/* Only SPIs require notification */
45 			if (vgic_valid_spi(vcpu->kvm, intid))
46 				kvm_notify_acked_irq(vcpu->kvm, 0,
47 						     intid - VGIC_NR_PRIVATE_IRQS);
48 		}
49 
50 		/*
51 		 * In the next iterations of the vcpu loop, if we sync
52 		 * the vgic state after flushing it, but before
53 		 * entering the guest (this happens for pending
54 		 * signals and vmid rollovers), then make sure we
55 		 * don't pick up any old maintenance interrupts here.
56 		 */
57 		cpuif->vgic_eisr = 0;
58 	}
59 
60 	cpuif->vgic_hcr &= ~ICH_HCR_UIE;
61 }
62 
vgic_v3_set_underflow(struct kvm_vcpu * vcpu)63 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
64 {
65 	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
66 
67 	cpuif->vgic_hcr |= ICH_HCR_UIE;
68 }
69 
vgic_v3_fold_lr_state(struct kvm_vcpu * vcpu)70 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
71 {
72 	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
73 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
74 	int lr;
75 
76 	for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
77 		u64 val = cpuif->vgic_lr[lr];
78 		u32 intid;
79 		struct vgic_irq *irq;
80 
81 		if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
82 			intid = val & ICH_LR_VIRTUAL_ID_MASK;
83 		else
84 			intid = val & GICH_LR_VIRTUALID;
85 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
86 		if (!irq)	/* An LPI could have been unmapped. */
87 			continue;
88 
89 		spin_lock(&irq->irq_lock);
90 
91 		/* Always preserve the active bit */
92 		irq->active = !!(val & ICH_LR_ACTIVE_BIT);
93 
94 		/* Edge is the only case where we preserve the pending bit */
95 		if (irq->config == VGIC_CONFIG_EDGE &&
96 		    (val & ICH_LR_PENDING_BIT)) {
97 			irq->pending = true;
98 
99 			if (vgic_irq_is_sgi(intid) &&
100 			    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
101 				u32 cpuid = val & GICH_LR_PHYSID_CPUID;
102 
103 				cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
104 				irq->source |= (1 << cpuid);
105 			}
106 		}
107 
108 		/*
109 		 * Clear soft pending state when level irqs have been acked.
110 		 * Always regenerate the pending state.
111 		 */
112 		if (irq->config == VGIC_CONFIG_LEVEL) {
113 			if (!(val & ICH_LR_PENDING_BIT))
114 				irq->soft_pending = false;
115 
116 			irq->pending = irq->line_level || irq->soft_pending;
117 		}
118 
119 		spin_unlock(&irq->irq_lock);
120 		vgic_put_irq(vcpu->kvm, irq);
121 	}
122 }
123 
124 /* Requires the irq to be locked already */
vgic_v3_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)125 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
126 {
127 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
128 	u64 val = irq->intid;
129 
130 	if (irq->pending) {
131 		val |= ICH_LR_PENDING_BIT;
132 
133 		if (irq->config == VGIC_CONFIG_EDGE)
134 			irq->pending = false;
135 
136 		if (vgic_irq_is_sgi(irq->intid) &&
137 		    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
138 			u32 src = ffs(irq->source);
139 
140 			BUG_ON(!src);
141 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
142 			irq->source &= ~(1 << (src - 1));
143 			if (irq->source)
144 				irq->pending = true;
145 		}
146 	}
147 
148 	if (irq->active)
149 		val |= ICH_LR_ACTIVE_BIT;
150 
151 	if (irq->hw) {
152 		val |= ICH_LR_HW;
153 		val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
154 		/*
155 		 * Never set pending+active on a HW interrupt, as the
156 		 * pending state is kept at the physical distributor
157 		 * level.
158 		 */
159 		if (irq->active && irq->pending)
160 			val &= ~ICH_LR_PENDING_BIT;
161 	} else {
162 		if (irq->config == VGIC_CONFIG_LEVEL)
163 			val |= ICH_LR_EOI;
164 	}
165 
166 	/*
167 	 * We currently only support Group1 interrupts, which is a
168 	 * known defect. This needs to be addressed at some point.
169 	 */
170 	if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
171 		val |= ICH_LR_GROUP;
172 
173 	val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
174 
175 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
176 }
177 
vgic_v3_clear_lr(struct kvm_vcpu * vcpu,int lr)178 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
179 {
180 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
181 }
182 
vgic_v3_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)183 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
184 {
185 	u32 vmcr;
186 
187 	vmcr  = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
188 	vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
189 	vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
190 	vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
191 
192 	vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
193 }
194 
vgic_v3_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)195 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
196 {
197 	u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
198 
199 	vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
200 	vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
201 	vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
202 	vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
203 }
204 
205 #define INITIAL_PENDBASER_VALUE						  \
206 	(GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)		| \
207 	GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner)	| \
208 	GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
209 
vgic_v3_enable(struct kvm_vcpu * vcpu)210 void vgic_v3_enable(struct kvm_vcpu *vcpu)
211 {
212 	struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
213 
214 	/*
215 	 * By forcing VMCR to zero, the GIC will restore the binary
216 	 * points to their reset values. Anything else resets to zero
217 	 * anyway.
218 	 */
219 	vgic_v3->vgic_vmcr = 0;
220 	vgic_v3->vgic_elrsr = ~0;
221 
222 	/*
223 	 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
224 	 * way, so we force SRE to 1 to demonstrate this to the guest.
225 	 * This goes with the spec allowing the value to be RAO/WI.
226 	 */
227 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
228 		vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
229 		vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
230 	} else {
231 		vgic_v3->vgic_sre = 0;
232 	}
233 
234 	/* Get the show on the road... */
235 	vgic_v3->vgic_hcr = ICH_HCR_EN;
236 }
237 
238 /* check for overlapping regions and for regions crossing the end of memory */
vgic_v3_check_base(struct kvm * kvm)239 static bool vgic_v3_check_base(struct kvm *kvm)
240 {
241 	struct vgic_dist *d = &kvm->arch.vgic;
242 	gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE;
243 
244 	redist_size *= atomic_read(&kvm->online_vcpus);
245 
246 	if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
247 		return false;
248 	if (d->vgic_redist_base + redist_size < d->vgic_redist_base)
249 		return false;
250 
251 	if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base)
252 		return true;
253 	if (d->vgic_redist_base + redist_size <= d->vgic_dist_base)
254 		return true;
255 
256 	return false;
257 }
258 
vgic_v3_map_resources(struct kvm * kvm)259 int vgic_v3_map_resources(struct kvm *kvm)
260 {
261 	int ret = 0;
262 	struct vgic_dist *dist = &kvm->arch.vgic;
263 
264 	if (vgic_ready(kvm))
265 		goto out;
266 
267 	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
268 	    IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
269 		kvm_err("Need to set vgic distributor addresses first\n");
270 		ret = -ENXIO;
271 		goto out;
272 	}
273 
274 	if (!vgic_v3_check_base(kvm)) {
275 		kvm_err("VGIC redist and dist frames overlap\n");
276 		ret = -EINVAL;
277 		goto out;
278 	}
279 
280 	/*
281 	 * For a VGICv3 we require the userland to explicitly initialize
282 	 * the VGIC before we need to use it.
283 	 */
284 	if (!vgic_initialized(kvm)) {
285 		ret = -EBUSY;
286 		goto out;
287 	}
288 
289 	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
290 	if (ret) {
291 		kvm_err("Unable to register VGICv3 dist MMIO regions\n");
292 		goto out;
293 	}
294 
295 	ret = vgic_register_redist_iodevs(kvm, dist->vgic_redist_base);
296 	if (ret) {
297 		kvm_err("Unable to register VGICv3 redist MMIO regions\n");
298 		goto out;
299 	}
300 
301 	if (vgic_has_its(kvm)) {
302 		ret = vgic_register_its_iodevs(kvm);
303 		if (ret) {
304 			kvm_err("Unable to register VGIC ITS MMIO regions\n");
305 			goto out;
306 		}
307 	}
308 
309 	dist->ready = true;
310 
311 out:
312 	return ret;
313 }
314 
315 /**
316  * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
317  * @node:	pointer to the DT node
318  *
319  * Returns 0 if a GICv3 has been found, returns an error code otherwise
320  */
vgic_v3_probe(const struct gic_kvm_info * info)321 int vgic_v3_probe(const struct gic_kvm_info *info)
322 {
323 	u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
324 	int ret;
325 
326 	/*
327 	 * The ListRegs field is 5 bits, but there is a architectural
328 	 * maximum of 16 list registers. Just ignore bit 4...
329 	 */
330 	kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
331 	kvm_vgic_global_state.can_emulate_gicv2 = false;
332 
333 	if (!info->vcpu.start) {
334 		kvm_info("GICv3: no GICV resource entry\n");
335 		kvm_vgic_global_state.vcpu_base = 0;
336 	} else if (!PAGE_ALIGNED(info->vcpu.start)) {
337 		pr_warn("GICV physical address 0x%llx not page aligned\n",
338 			(unsigned long long)info->vcpu.start);
339 		kvm_vgic_global_state.vcpu_base = 0;
340 	} else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
341 		pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
342 			(unsigned long long)resource_size(&info->vcpu),
343 			PAGE_SIZE);
344 		kvm_vgic_global_state.vcpu_base = 0;
345 	} else {
346 		kvm_vgic_global_state.vcpu_base = info->vcpu.start;
347 		kvm_vgic_global_state.can_emulate_gicv2 = true;
348 		ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
349 		if (ret) {
350 			kvm_err("Cannot register GICv2 KVM device.\n");
351 			return ret;
352 		}
353 		kvm_info("vgic-v2@%llx\n", info->vcpu.start);
354 	}
355 	ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
356 	if (ret) {
357 		kvm_err("Cannot register GICv3 KVM device.\n");
358 		kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
359 		return ret;
360 	}
361 
362 	if (kvm_vgic_global_state.vcpu_base == 0)
363 		kvm_info("disabling GICv2 emulation\n");
364 
365 	kvm_vgic_global_state.vctrl_base = NULL;
366 	kvm_vgic_global_state.type = VGIC_V3;
367 	kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
368 
369 	return 0;
370 }
371