• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015, 2016 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/irqchip/arm-gic.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_host.h>
20 #include <kvm/arm_vgic.h>
21 #include <asm/kvm_mmu.h>
22 
23 #include "vgic.h"
24 
25 /*
26  * Call this function to convert a u64 value to an unsigned long * bitmask
27  * in a way that works on both 32-bit and 64-bit LE and BE platforms.
28  *
29  * Warning: Calling this function may modify *val.
30  */
u64_to_bitmask(u64 * val)31 static unsigned long *u64_to_bitmask(u64 *val)
32 {
33 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
34 	*val = (*val >> 32) | (*val << 32);
35 #endif
36 	return (unsigned long *)val;
37 }
38 
vgic_v2_process_maintenance(struct kvm_vcpu * vcpu)39 void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
40 {
41 	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
42 
43 	if (cpuif->vgic_misr & GICH_MISR_EOI) {
44 		u64 eisr = cpuif->vgic_eisr;
45 		unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
46 		int lr;
47 
48 		for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
49 			u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
50 
51 			WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
52 
53 			/* Only SPIs require notification */
54 			if (vgic_valid_spi(vcpu->kvm, intid))
55 				kvm_notify_acked_irq(vcpu->kvm, 0,
56 						     intid - VGIC_NR_PRIVATE_IRQS);
57 		}
58 	}
59 
60 	/* check and disable underflow maintenance IRQ */
61 	cpuif->vgic_hcr &= ~GICH_HCR_UIE;
62 
63 	/*
64 	 * In the next iterations of the vcpu loop, if we sync the
65 	 * vgic state after flushing it, but before entering the guest
66 	 * (this happens for pending signals and vmid rollovers), then
67 	 * make sure we don't pick up any old maintenance interrupts
68 	 * here.
69 	 */
70 	cpuif->vgic_eisr = 0;
71 }
72 
vgic_v2_set_underflow(struct kvm_vcpu * vcpu)73 void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
74 {
75 	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
76 
77 	cpuif->vgic_hcr |= GICH_HCR_UIE;
78 }
79 
80 /*
81  * transfer the content of the LRs back into the corresponding ap_list:
82  * - active bit is transferred as is
83  * - pending bit is
84  *   - transferred as is in case of edge sensitive IRQs
85  *   - set to the line-level (resample time) for level sensitive IRQs
86  */
vgic_v2_fold_lr_state(struct kvm_vcpu * vcpu)87 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
88 {
89 	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
90 	int lr;
91 
92 	for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
93 		u32 val = cpuif->vgic_lr[lr];
94 		u32 intid = val & GICH_LR_VIRTUALID;
95 		struct vgic_irq *irq;
96 
97 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
98 
99 		spin_lock(&irq->irq_lock);
100 
101 		/* Always preserve the active bit */
102 		irq->active = !!(val & GICH_LR_ACTIVE_BIT);
103 
104 		/* Edge is the only case where we preserve the pending bit */
105 		if (irq->config == VGIC_CONFIG_EDGE &&
106 		    (val & GICH_LR_PENDING_BIT)) {
107 			irq->pending = true;
108 
109 			if (vgic_irq_is_sgi(intid)) {
110 				u32 cpuid = val & GICH_LR_PHYSID_CPUID;
111 
112 				cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
113 				irq->source |= (1 << cpuid);
114 			}
115 		}
116 
117 		/*
118 		 * Clear soft pending state when level irqs have been acked.
119 		 * Always regenerate the pending state.
120 		 */
121 		if (irq->config == VGIC_CONFIG_LEVEL) {
122 			if (!(val & GICH_LR_PENDING_BIT))
123 				irq->soft_pending = false;
124 
125 			irq->pending = irq->line_level || irq->soft_pending;
126 		}
127 
128 		spin_unlock(&irq->irq_lock);
129 		vgic_put_irq(vcpu->kvm, irq);
130 	}
131 }
132 
133 /*
134  * Populates the particular LR with the state of a given IRQ:
135  * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
136  * - for a level sensitive IRQ the pending state value is unchanged;
137  *   it is dictated directly by the input level
138  *
139  * If @irq describes an SGI with multiple sources, we choose the
140  * lowest-numbered source VCPU and clear that bit in the source bitmap.
141  *
142  * The irq_lock must be held by the caller.
143  */
vgic_v2_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)144 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
145 {
146 	u32 val = irq->intid;
147 
148 	if (irq->pending) {
149 		val |= GICH_LR_PENDING_BIT;
150 
151 		if (irq->config == VGIC_CONFIG_EDGE)
152 			irq->pending = false;
153 
154 		if (vgic_irq_is_sgi(irq->intid)) {
155 			u32 src = ffs(irq->source);
156 
157 			BUG_ON(!src);
158 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
159 			irq->source &= ~(1 << (src - 1));
160 			if (irq->source)
161 				irq->pending = true;
162 		}
163 	}
164 
165 	if (irq->active)
166 		val |= GICH_LR_ACTIVE_BIT;
167 
168 	if (irq->hw) {
169 		val |= GICH_LR_HW;
170 		val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
171 		/*
172 		 * Never set pending+active on a HW interrupt, as the
173 		 * pending state is kept at the physical distributor
174 		 * level.
175 		 */
176 		if (irq->active && irq->pending)
177 			val &= ~GICH_LR_PENDING_BIT;
178 	} else {
179 		if (irq->config == VGIC_CONFIG_LEVEL)
180 			val |= GICH_LR_EOI;
181 	}
182 
183 	/* The GICv2 LR only holds five bits of priority. */
184 	val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
185 
186 	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
187 }
188 
vgic_v2_clear_lr(struct kvm_vcpu * vcpu,int lr)189 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
190 {
191 	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
192 }
193 
vgic_v2_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)194 void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
195 {
196 	u32 vmcr;
197 
198 	vmcr  = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
199 	vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
200 		GICH_VMCR_ALIAS_BINPOINT_MASK;
201 	vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
202 		GICH_VMCR_BINPOINT_MASK;
203 	vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
204 		GICH_VMCR_PRIMASK_MASK;
205 
206 	vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
207 }
208 
vgic_v2_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)209 void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
210 {
211 	u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
212 
213 	vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
214 			GICH_VMCR_CTRL_SHIFT;
215 	vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
216 			GICH_VMCR_ALIAS_BINPOINT_SHIFT;
217 	vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
218 			GICH_VMCR_BINPOINT_SHIFT;
219 	vmcrp->pmr  = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
220 			GICH_VMCR_PRIMASK_SHIFT;
221 }
222 
vgic_v2_enable(struct kvm_vcpu * vcpu)223 void vgic_v2_enable(struct kvm_vcpu *vcpu)
224 {
225 	/*
226 	 * By forcing VMCR to zero, the GIC will restore the binary
227 	 * points to their reset values. Anything else resets to zero
228 	 * anyway.
229 	 */
230 	vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
231 	vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
232 
233 	/* Get the show on the road... */
234 	vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
235 }
236 
237 /* check for overlapping regions and for regions crossing the end of memory */
vgic_v2_check_base(gpa_t dist_base,gpa_t cpu_base)238 static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
239 {
240 	if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
241 		return false;
242 	if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
243 		return false;
244 
245 	if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
246 		return true;
247 	if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
248 		return true;
249 
250 	return false;
251 }
252 
vgic_v2_map_resources(struct kvm * kvm)253 int vgic_v2_map_resources(struct kvm *kvm)
254 {
255 	struct vgic_dist *dist = &kvm->arch.vgic;
256 	int ret = 0;
257 
258 	if (vgic_ready(kvm))
259 		goto out;
260 
261 	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
262 	    IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
263 		kvm_err("Need to set vgic cpu and dist addresses first\n");
264 		ret = -ENXIO;
265 		goto out;
266 	}
267 
268 	if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
269 		kvm_err("VGIC CPU and dist frames overlap\n");
270 		ret = -EINVAL;
271 		goto out;
272 	}
273 
274 	/*
275 	 * Initialize the vgic if this hasn't already been done on demand by
276 	 * accessing the vgic state from userspace.
277 	 */
278 	ret = vgic_init(kvm);
279 	if (ret) {
280 		kvm_err("Unable to initialize VGIC dynamic data structures\n");
281 		goto out;
282 	}
283 
284 	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
285 	if (ret) {
286 		kvm_err("Unable to register VGIC MMIO regions\n");
287 		goto out;
288 	}
289 
290 	if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
291 		ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
292 					    kvm_vgic_global_state.vcpu_base,
293 					    KVM_VGIC_V2_CPU_SIZE, true);
294 		if (ret) {
295 			kvm_err("Unable to remap VGIC CPU to VCPU\n");
296 			goto out;
297 		}
298 	}
299 
300 	dist->ready = true;
301 
302 out:
303 	return ret;
304 }
305 
306 DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
307 
308 /**
309  * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
310  * @node:	pointer to the DT node
311  *
312  * Returns 0 if a GICv2 has been found, returns an error code otherwise
313  */
vgic_v2_probe(const struct gic_kvm_info * info)314 int vgic_v2_probe(const struct gic_kvm_info *info)
315 {
316 	int ret;
317 	u32 vtr;
318 
319 	if (!info->vctrl.start) {
320 		kvm_err("GICH not present in the firmware table\n");
321 		return -ENXIO;
322 	}
323 
324 	if (!PAGE_ALIGNED(info->vcpu.start) ||
325 	    !PAGE_ALIGNED(resource_size(&info->vcpu))) {
326 		kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
327 		kvm_vgic_global_state.vcpu_base_va = ioremap(info->vcpu.start,
328 							     resource_size(&info->vcpu));
329 		if (!kvm_vgic_global_state.vcpu_base_va) {
330 			kvm_err("Cannot ioremap GICV\n");
331 			return -ENOMEM;
332 		}
333 
334 		ret = create_hyp_io_mappings(kvm_vgic_global_state.vcpu_base_va,
335 					     kvm_vgic_global_state.vcpu_base_va + resource_size(&info->vcpu),
336 					     info->vcpu.start);
337 		if (ret) {
338 			kvm_err("Cannot map GICV into hyp\n");
339 			goto out;
340 		}
341 
342 		static_branch_enable(&vgic_v2_cpuif_trap);
343 	}
344 
345 	kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start,
346 						   resource_size(&info->vctrl));
347 	if (!kvm_vgic_global_state.vctrl_base) {
348 		kvm_err("Cannot ioremap GICH\n");
349 		ret = -ENOMEM;
350 		goto out;
351 	}
352 
353 	vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
354 	kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
355 
356 	ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
357 				     kvm_vgic_global_state.vctrl_base +
358 					 resource_size(&info->vctrl),
359 				     info->vctrl.start);
360 	if (ret) {
361 		kvm_err("Cannot map VCTRL into hyp\n");
362 		goto out;
363 	}
364 
365 	ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
366 	if (ret) {
367 		kvm_err("Cannot register GICv2 KVM device\n");
368 		goto out;
369 	}
370 
371 	kvm_vgic_global_state.can_emulate_gicv2 = true;
372 	kvm_vgic_global_state.vcpu_base = info->vcpu.start;
373 	kvm_vgic_global_state.type = VGIC_V2;
374 	kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
375 
376 	kvm_info("vgic-v2@%llx\n", info->vctrl.start);
377 
378 	return 0;
379 out:
380 	if (kvm_vgic_global_state.vctrl_base)
381 		iounmap(kvm_vgic_global_state.vctrl_base);
382 	if (kvm_vgic_global_state.vcpu_base_va)
383 		iounmap(kvm_vgic_global_state.vcpu_base_va);
384 
385 	return ret;
386 }
387