• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012-2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/compiler.h>
8 #include <linux/irqchip/arm-gic-v3.h>
9 #include <linux/kvm_host.h>
10 
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
14 
15 #define vtr_to_max_lr_idx(v)		((v) & 0xf)
16 #define vtr_to_nr_pre_bits(v)		((((u32)(v) >> 26) & 7) + 1)
17 #define vtr_to_nr_apr_regs(v)		(1 << (vtr_to_nr_pre_bits(v) - 5))
18 
__gic_v3_get_lr(unsigned int lr)19 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
20 {
21 	switch (lr & 0xf) {
22 	case 0:
23 		return read_gicreg(ICH_LR0_EL2);
24 	case 1:
25 		return read_gicreg(ICH_LR1_EL2);
26 	case 2:
27 		return read_gicreg(ICH_LR2_EL2);
28 	case 3:
29 		return read_gicreg(ICH_LR3_EL2);
30 	case 4:
31 		return read_gicreg(ICH_LR4_EL2);
32 	case 5:
33 		return read_gicreg(ICH_LR5_EL2);
34 	case 6:
35 		return read_gicreg(ICH_LR6_EL2);
36 	case 7:
37 		return read_gicreg(ICH_LR7_EL2);
38 	case 8:
39 		return read_gicreg(ICH_LR8_EL2);
40 	case 9:
41 		return read_gicreg(ICH_LR9_EL2);
42 	case 10:
43 		return read_gicreg(ICH_LR10_EL2);
44 	case 11:
45 		return read_gicreg(ICH_LR11_EL2);
46 	case 12:
47 		return read_gicreg(ICH_LR12_EL2);
48 	case 13:
49 		return read_gicreg(ICH_LR13_EL2);
50 	case 14:
51 		return read_gicreg(ICH_LR14_EL2);
52 	case 15:
53 		return read_gicreg(ICH_LR15_EL2);
54 	}
55 
56 	unreachable();
57 }
58 
__gic_v3_set_lr(u64 val,int lr)59 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
60 {
61 	switch (lr & 0xf) {
62 	case 0:
63 		write_gicreg(val, ICH_LR0_EL2);
64 		break;
65 	case 1:
66 		write_gicreg(val, ICH_LR1_EL2);
67 		break;
68 	case 2:
69 		write_gicreg(val, ICH_LR2_EL2);
70 		break;
71 	case 3:
72 		write_gicreg(val, ICH_LR3_EL2);
73 		break;
74 	case 4:
75 		write_gicreg(val, ICH_LR4_EL2);
76 		break;
77 	case 5:
78 		write_gicreg(val, ICH_LR5_EL2);
79 		break;
80 	case 6:
81 		write_gicreg(val, ICH_LR6_EL2);
82 		break;
83 	case 7:
84 		write_gicreg(val, ICH_LR7_EL2);
85 		break;
86 	case 8:
87 		write_gicreg(val, ICH_LR8_EL2);
88 		break;
89 	case 9:
90 		write_gicreg(val, ICH_LR9_EL2);
91 		break;
92 	case 10:
93 		write_gicreg(val, ICH_LR10_EL2);
94 		break;
95 	case 11:
96 		write_gicreg(val, ICH_LR11_EL2);
97 		break;
98 	case 12:
99 		write_gicreg(val, ICH_LR12_EL2);
100 		break;
101 	case 13:
102 		write_gicreg(val, ICH_LR13_EL2);
103 		break;
104 	case 14:
105 		write_gicreg(val, ICH_LR14_EL2);
106 		break;
107 	case 15:
108 		write_gicreg(val, ICH_LR15_EL2);
109 		break;
110 	}
111 }
112 
__vgic_v3_write_ap0rn(u32 val,int n)113 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
114 {
115 	switch (n) {
116 	case 0:
117 		write_gicreg(val, ICH_AP0R0_EL2);
118 		break;
119 	case 1:
120 		write_gicreg(val, ICH_AP0R1_EL2);
121 		break;
122 	case 2:
123 		write_gicreg(val, ICH_AP0R2_EL2);
124 		break;
125 	case 3:
126 		write_gicreg(val, ICH_AP0R3_EL2);
127 		break;
128 	}
129 }
130 
__vgic_v3_write_ap1rn(u32 val,int n)131 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
132 {
133 	switch (n) {
134 	case 0:
135 		write_gicreg(val, ICH_AP1R0_EL2);
136 		break;
137 	case 1:
138 		write_gicreg(val, ICH_AP1R1_EL2);
139 		break;
140 	case 2:
141 		write_gicreg(val, ICH_AP1R2_EL2);
142 		break;
143 	case 3:
144 		write_gicreg(val, ICH_AP1R3_EL2);
145 		break;
146 	}
147 }
148 
__vgic_v3_read_ap0rn(int n)149 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
150 {
151 	u32 val;
152 
153 	switch (n) {
154 	case 0:
155 		val = read_gicreg(ICH_AP0R0_EL2);
156 		break;
157 	case 1:
158 		val = read_gicreg(ICH_AP0R1_EL2);
159 		break;
160 	case 2:
161 		val = read_gicreg(ICH_AP0R2_EL2);
162 		break;
163 	case 3:
164 		val = read_gicreg(ICH_AP0R3_EL2);
165 		break;
166 	default:
167 		unreachable();
168 	}
169 
170 	return val;
171 }
172 
__vgic_v3_read_ap1rn(int n)173 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
174 {
175 	u32 val;
176 
177 	switch (n) {
178 	case 0:
179 		val = read_gicreg(ICH_AP1R0_EL2);
180 		break;
181 	case 1:
182 		val = read_gicreg(ICH_AP1R1_EL2);
183 		break;
184 	case 2:
185 		val = read_gicreg(ICH_AP1R2_EL2);
186 		break;
187 	case 3:
188 		val = read_gicreg(ICH_AP1R3_EL2);
189 		break;
190 	default:
191 		unreachable();
192 	}
193 
194 	return val;
195 }
196 
__vgic_v3_save_state(struct kvm_vcpu * vcpu)197 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
198 {
199 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
200 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
201 
202 	/*
203 	 * Make sure stores to the GIC via the memory mapped interface
204 	 * are now visible to the system register interface when reading the
205 	 * LRs, and when reading back the VMCR on non-VHE systems.
206 	 */
207 	if (used_lrs || !has_vhe()) {
208 		if (!cpu_if->vgic_sre) {
209 			dsb(sy);
210 			isb();
211 		}
212 	}
213 
214 	if (used_lrs || cpu_if->its_vpe.its_vm) {
215 		int i;
216 		u32 elrsr;
217 
218 		elrsr = read_gicreg(ICH_ELRSR_EL2);
219 
220 		write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
221 
222 		for (i = 0; i < used_lrs; i++) {
223 			if (elrsr & (1 << i))
224 				cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
225 			else
226 				cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
227 
228 			__gic_v3_set_lr(0, i);
229 		}
230 	}
231 }
232 
__vgic_v3_restore_state(struct kvm_vcpu * vcpu)233 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
234 {
235 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
236 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
237 	int i;
238 
239 	if (used_lrs || cpu_if->its_vpe.its_vm) {
240 		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
241 
242 		for (i = 0; i < used_lrs; i++)
243 			__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
244 	}
245 
246 	/*
247 	 * Ensure that writes to the LRs, and on non-VHE systems ensure that
248 	 * the write to the VMCR in __vgic_v3_activate_traps(), will have
249 	 * reached the (re)distributors. This ensure the guest will read the
250 	 * correct values from the memory-mapped interface.
251 	 */
252 	if (used_lrs || !has_vhe()) {
253 		if (!cpu_if->vgic_sre) {
254 			isb();
255 			dsb(sy);
256 		}
257 	}
258 }
259 
__vgic_v3_activate_traps(struct kvm_vcpu * vcpu)260 void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
261 {
262 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
263 
264 	/*
265 	 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
266 	 * Group0 interrupt (as generated in GICv2 mode) to be
267 	 * delivered as a FIQ to the guest, with potentially fatal
268 	 * consequences. So we must make sure that ICC_SRE_EL1 has
269 	 * been actually programmed with the value we want before
270 	 * starting to mess with the rest of the GIC, and VMCR_EL2 in
271 	 * particular.  This logic must be called before
272 	 * __vgic_v3_restore_state().
273 	 */
274 	if (!cpu_if->vgic_sre) {
275 		write_gicreg(0, ICC_SRE_EL1);
276 		isb();
277 		write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
278 
279 
280 		if (has_vhe()) {
281 			/*
282 			 * Ensure that the write to the VMCR will have reached
283 			 * the (re)distributors. This ensure the guest will
284 			 * read the correct values from the memory-mapped
285 			 * interface.
286 			 */
287 			isb();
288 			dsb(sy);
289 		}
290 	}
291 
292 	/*
293 	 * Prevent the guest from touching the GIC system registers if
294 	 * SRE isn't enabled for GICv3 emulation.
295 	 */
296 	write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
297 		     ICC_SRE_EL2);
298 
299 	/*
300 	 * If we need to trap system registers, we must write
301 	 * ICH_HCR_EL2 anyway, even if no interrupts are being
302 	 * injected,
303 	 */
304 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
305 	    cpu_if->its_vpe.its_vm)
306 		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
307 }
308 
__vgic_v3_deactivate_traps(struct kvm_vcpu * vcpu)309 void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
310 {
311 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
312 	u64 val;
313 
314 	if (!cpu_if->vgic_sre) {
315 		cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
316 	}
317 
318 	val = read_gicreg(ICC_SRE_EL2);
319 	write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
320 
321 	if (!cpu_if->vgic_sre) {
322 		/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
323 		isb();
324 		write_gicreg(1, ICC_SRE_EL1);
325 	}
326 
327 	/*
328 	 * If we were trapping system registers, we enabled the VGIC even if
329 	 * no interrupts were being injected, and we disable it again here.
330 	 */
331 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
332 	    cpu_if->its_vpe.its_vm)
333 		write_gicreg(0, ICH_HCR_EL2);
334 }
335 
__vgic_v3_save_aprs(struct kvm_vcpu * vcpu)336 void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
337 {
338 	struct vgic_v3_cpu_if *cpu_if;
339 	u64 val;
340 	u32 nr_pre_bits;
341 
342 	vcpu = kern_hyp_va(vcpu);
343 	cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
344 
345 	val = read_gicreg(ICH_VTR_EL2);
346 	nr_pre_bits = vtr_to_nr_pre_bits(val);
347 
348 	switch (nr_pre_bits) {
349 	case 7:
350 		cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
351 		cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
352 		/* Fall through */
353 	case 6:
354 		cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
355 		/* Fall through */
356 	default:
357 		cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
358 	}
359 
360 	switch (nr_pre_bits) {
361 	case 7:
362 		cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
363 		cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
364 		/* Fall through */
365 	case 6:
366 		cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
367 		/* Fall through */
368 	default:
369 		cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
370 	}
371 }
372 
__vgic_v3_restore_aprs(struct kvm_vcpu * vcpu)373 void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
374 {
375 	struct vgic_v3_cpu_if *cpu_if;
376 	u64 val;
377 	u32 nr_pre_bits;
378 
379 	vcpu = kern_hyp_va(vcpu);
380 	cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
381 
382 	val = read_gicreg(ICH_VTR_EL2);
383 	nr_pre_bits = vtr_to_nr_pre_bits(val);
384 
385 	switch (nr_pre_bits) {
386 	case 7:
387 		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
388 		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
389 		/* Fall through */
390 	case 6:
391 		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
392 		/* Fall through */
393 	default:
394 		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
395 	}
396 
397 	switch (nr_pre_bits) {
398 	case 7:
399 		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
400 		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
401 		/* Fall through */
402 	case 6:
403 		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
404 		/* Fall through */
405 	default:
406 		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
407 	}
408 }
409 
__vgic_v3_init_lrs(void)410 void __hyp_text __vgic_v3_init_lrs(void)
411 {
412 	int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
413 	int i;
414 
415 	for (i = 0; i <= max_lr_idx; i++)
416 		__gic_v3_set_lr(0, i);
417 }
418 
__vgic_v3_get_ich_vtr_el2(void)419 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
420 {
421 	return read_gicreg(ICH_VTR_EL2);
422 }
423 
__vgic_v3_read_vmcr(void)424 u64 __hyp_text __vgic_v3_read_vmcr(void)
425 {
426 	return read_gicreg(ICH_VMCR_EL2);
427 }
428 
__vgic_v3_write_vmcr(u32 vmcr)429 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
430 {
431 	write_gicreg(vmcr, ICH_VMCR_EL2);
432 }
433 
434 #ifdef CONFIG_ARM64
435 
__vgic_v3_bpr_min(void)436 static int __hyp_text __vgic_v3_bpr_min(void)
437 {
438 	/* See Pseudocode for VPriorityGroup */
439 	return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
440 }
441 
__vgic_v3_get_group(struct kvm_vcpu * vcpu)442 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
443 {
444 	u32 esr = kvm_vcpu_get_hsr(vcpu);
445 	u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
446 
447 	return crm != 8;
448 }
449 
450 #define GICv3_IDLE_PRIORITY	0xff
451 
__vgic_v3_highest_priority_lr(struct kvm_vcpu * vcpu,u32 vmcr,u64 * lr_val)452 static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
453 						    u32 vmcr,
454 						    u64 *lr_val)
455 {
456 	unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
457 	u8 priority = GICv3_IDLE_PRIORITY;
458 	int i, lr = -1;
459 
460 	for (i = 0; i < used_lrs; i++) {
461 		u64 val = __gic_v3_get_lr(i);
462 		u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
463 
464 		/* Not pending in the state? */
465 		if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
466 			continue;
467 
468 		/* Group-0 interrupt, but Group-0 disabled? */
469 		if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
470 			continue;
471 
472 		/* Group-1 interrupt, but Group-1 disabled? */
473 		if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
474 			continue;
475 
476 		/* Not the highest priority? */
477 		if (lr_prio >= priority)
478 			continue;
479 
480 		/* This is a candidate */
481 		priority = lr_prio;
482 		*lr_val = val;
483 		lr = i;
484 	}
485 
486 	if (lr == -1)
487 		*lr_val = ICC_IAR1_EL1_SPURIOUS;
488 
489 	return lr;
490 }
491 
__vgic_v3_find_active_lr(struct kvm_vcpu * vcpu,int intid,u64 * lr_val)492 static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
493 					       int intid, u64 *lr_val)
494 {
495 	unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
496 	int i;
497 
498 	for (i = 0; i < used_lrs; i++) {
499 		u64 val = __gic_v3_get_lr(i);
500 
501 		if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
502 		    (val & ICH_LR_ACTIVE_BIT)) {
503 			*lr_val = val;
504 			return i;
505 		}
506 	}
507 
508 	*lr_val = ICC_IAR1_EL1_SPURIOUS;
509 	return -1;
510 }
511 
__vgic_v3_get_highest_active_priority(void)512 static int __hyp_text __vgic_v3_get_highest_active_priority(void)
513 {
514 	u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
515 	u32 hap = 0;
516 	int i;
517 
518 	for (i = 0; i < nr_apr_regs; i++) {
519 		u32 val;
520 
521 		/*
522 		 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
523 		 * contain the active priority levels for this VCPU
524 		 * for the maximum number of supported priority
525 		 * levels, and we return the full priority level only
526 		 * if the BPR is programmed to its minimum, otherwise
527 		 * we return a combination of the priority level and
528 		 * subpriority, as determined by the setting of the
529 		 * BPR, but without the full subpriority.
530 		 */
531 		val  = __vgic_v3_read_ap0rn(i);
532 		val |= __vgic_v3_read_ap1rn(i);
533 		if (!val) {
534 			hap += 32;
535 			continue;
536 		}
537 
538 		return (hap + __ffs(val)) << __vgic_v3_bpr_min();
539 	}
540 
541 	return GICv3_IDLE_PRIORITY;
542 }
543 
__vgic_v3_get_bpr0(u32 vmcr)544 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
545 {
546 	return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
547 }
548 
__vgic_v3_get_bpr1(u32 vmcr)549 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
550 {
551 	unsigned int bpr;
552 
553 	if (vmcr & ICH_VMCR_CBPR_MASK) {
554 		bpr = __vgic_v3_get_bpr0(vmcr);
555 		if (bpr < 7)
556 			bpr++;
557 	} else {
558 		bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
559 	}
560 
561 	return bpr;
562 }
563 
564 /*
565  * Convert a priority to a preemption level, taking the relevant BPR
566  * into account by zeroing the sub-priority bits.
567  */
__vgic_v3_pri_to_pre(u8 pri,u32 vmcr,int grp)568 static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
569 {
570 	unsigned int bpr;
571 
572 	if (!grp)
573 		bpr = __vgic_v3_get_bpr0(vmcr) + 1;
574 	else
575 		bpr = __vgic_v3_get_bpr1(vmcr);
576 
577 	return pri & (GENMASK(7, 0) << bpr);
578 }
579 
580 /*
581  * The priority value is independent of any of the BPR values, so we
582  * normalize it using the minumal BPR value. This guarantees that no
583  * matter what the guest does with its BPR, we can always set/get the
584  * same value of a priority.
585  */
__vgic_v3_set_active_priority(u8 pri,u32 vmcr,int grp)586 static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
587 {
588 	u8 pre, ap;
589 	u32 val;
590 	int apr;
591 
592 	pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
593 	ap = pre >> __vgic_v3_bpr_min();
594 	apr = ap / 32;
595 
596 	if (!grp) {
597 		val = __vgic_v3_read_ap0rn(apr);
598 		__vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
599 	} else {
600 		val = __vgic_v3_read_ap1rn(apr);
601 		__vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
602 	}
603 }
604 
__vgic_v3_clear_highest_active_priority(void)605 static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
606 {
607 	u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
608 	u32 hap = 0;
609 	int i;
610 
611 	for (i = 0; i < nr_apr_regs; i++) {
612 		u32 ap0, ap1;
613 		int c0, c1;
614 
615 		ap0 = __vgic_v3_read_ap0rn(i);
616 		ap1 = __vgic_v3_read_ap1rn(i);
617 		if (!ap0 && !ap1) {
618 			hap += 32;
619 			continue;
620 		}
621 
622 		c0 = ap0 ? __ffs(ap0) : 32;
623 		c1 = ap1 ? __ffs(ap1) : 32;
624 
625 		/* Always clear the LSB, which is the highest priority */
626 		if (c0 < c1) {
627 			ap0 &= ~BIT(c0);
628 			__vgic_v3_write_ap0rn(ap0, i);
629 			hap += c0;
630 		} else {
631 			ap1 &= ~BIT(c1);
632 			__vgic_v3_write_ap1rn(ap1, i);
633 			hap += c1;
634 		}
635 
636 		/* Rescale to 8 bits of priority */
637 		return hap << __vgic_v3_bpr_min();
638 	}
639 
640 	return GICv3_IDLE_PRIORITY;
641 }
642 
__vgic_v3_read_iar(struct kvm_vcpu * vcpu,u32 vmcr,int rt)643 static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
644 {
645 	u64 lr_val;
646 	u8 lr_prio, pmr;
647 	int lr, grp;
648 
649 	grp = __vgic_v3_get_group(vcpu);
650 
651 	lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
652 	if (lr < 0)
653 		goto spurious;
654 
655 	if (grp != !!(lr_val & ICH_LR_GROUP))
656 		goto spurious;
657 
658 	pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
659 	lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
660 	if (pmr <= lr_prio)
661 		goto spurious;
662 
663 	if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
664 		goto spurious;
665 
666 	lr_val &= ~ICH_LR_STATE;
667 	/* No active state for LPIs */
668 	if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
669 		lr_val |= ICH_LR_ACTIVE_BIT;
670 	__gic_v3_set_lr(lr_val, lr);
671 	__vgic_v3_set_active_priority(lr_prio, vmcr, grp);
672 	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
673 	return;
674 
675 spurious:
676 	vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
677 }
678 
__vgic_v3_clear_active_lr(int lr,u64 lr_val)679 static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
680 {
681 	lr_val &= ~ICH_LR_ACTIVE_BIT;
682 	if (lr_val & ICH_LR_HW) {
683 		u32 pid;
684 
685 		pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
686 		gic_write_dir(pid);
687 	}
688 
689 	__gic_v3_set_lr(lr_val, lr);
690 }
691 
__vgic_v3_bump_eoicount(void)692 static void __hyp_text __vgic_v3_bump_eoicount(void)
693 {
694 	u32 hcr;
695 
696 	hcr = read_gicreg(ICH_HCR_EL2);
697 	hcr += 1 << ICH_HCR_EOIcount_SHIFT;
698 	write_gicreg(hcr, ICH_HCR_EL2);
699 }
700 
__vgic_v3_write_dir(struct kvm_vcpu * vcpu,u32 vmcr,int rt)701 static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
702 					   u32 vmcr, int rt)
703 {
704 	u32 vid = vcpu_get_reg(vcpu, rt);
705 	u64 lr_val;
706 	int lr;
707 
708 	/* EOImode == 0, nothing to be done here */
709 	if (!(vmcr & ICH_VMCR_EOIM_MASK))
710 		return;
711 
712 	/* No deactivate to be performed on an LPI */
713 	if (vid >= VGIC_MIN_LPI)
714 		return;
715 
716 	lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
717 	if (lr == -1) {
718 		__vgic_v3_bump_eoicount();
719 		return;
720 	}
721 
722 	__vgic_v3_clear_active_lr(lr, lr_val);
723 }
724 
__vgic_v3_write_eoir(struct kvm_vcpu * vcpu,u32 vmcr,int rt)725 static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
726 {
727 	u32 vid = vcpu_get_reg(vcpu, rt);
728 	u64 lr_val;
729 	u8 lr_prio, act_prio;
730 	int lr, grp;
731 
732 	grp = __vgic_v3_get_group(vcpu);
733 
734 	/* Drop priority in any case */
735 	act_prio = __vgic_v3_clear_highest_active_priority();
736 
737 	/* If EOIing an LPI, no deactivate to be performed */
738 	if (vid >= VGIC_MIN_LPI)
739 		return;
740 
741 	/* EOImode == 1, nothing to be done here */
742 	if (vmcr & ICH_VMCR_EOIM_MASK)
743 		return;
744 
745 	lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
746 	if (lr == -1) {
747 		__vgic_v3_bump_eoicount();
748 		return;
749 	}
750 
751 	lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
752 
753 	/* If priorities or group do not match, the guest has fscked-up. */
754 	if (grp != !!(lr_val & ICH_LR_GROUP) ||
755 	    __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
756 		return;
757 
758 	/* Let's now perform the deactivation */
759 	__vgic_v3_clear_active_lr(lr, lr_val);
760 }
761 
__vgic_v3_read_igrpen0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)762 static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
763 {
764 	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
765 }
766 
__vgic_v3_read_igrpen1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)767 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
768 {
769 	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
770 }
771 
__vgic_v3_write_igrpen0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)772 static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
773 {
774 	u64 val = vcpu_get_reg(vcpu, rt);
775 
776 	if (val & 1)
777 		vmcr |= ICH_VMCR_ENG0_MASK;
778 	else
779 		vmcr &= ~ICH_VMCR_ENG0_MASK;
780 
781 	__vgic_v3_write_vmcr(vmcr);
782 }
783 
__vgic_v3_write_igrpen1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)784 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
785 {
786 	u64 val = vcpu_get_reg(vcpu, rt);
787 
788 	if (val & 1)
789 		vmcr |= ICH_VMCR_ENG1_MASK;
790 	else
791 		vmcr &= ~ICH_VMCR_ENG1_MASK;
792 
793 	__vgic_v3_write_vmcr(vmcr);
794 }
795 
__vgic_v3_read_bpr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)796 static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
797 {
798 	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
799 }
800 
__vgic_v3_read_bpr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)801 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
802 {
803 	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
804 }
805 
__vgic_v3_write_bpr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)806 static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
807 {
808 	u64 val = vcpu_get_reg(vcpu, rt);
809 	u8 bpr_min = __vgic_v3_bpr_min() - 1;
810 
811 	/* Enforce BPR limiting */
812 	if (val < bpr_min)
813 		val = bpr_min;
814 
815 	val <<= ICH_VMCR_BPR0_SHIFT;
816 	val &= ICH_VMCR_BPR0_MASK;
817 	vmcr &= ~ICH_VMCR_BPR0_MASK;
818 	vmcr |= val;
819 
820 	__vgic_v3_write_vmcr(vmcr);
821 }
822 
__vgic_v3_write_bpr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)823 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
824 {
825 	u64 val = vcpu_get_reg(vcpu, rt);
826 	u8 bpr_min = __vgic_v3_bpr_min();
827 
828 	if (vmcr & ICH_VMCR_CBPR_MASK)
829 		return;
830 
831 	/* Enforce BPR limiting */
832 	if (val < bpr_min)
833 		val = bpr_min;
834 
835 	val <<= ICH_VMCR_BPR1_SHIFT;
836 	val &= ICH_VMCR_BPR1_MASK;
837 	vmcr &= ~ICH_VMCR_BPR1_MASK;
838 	vmcr |= val;
839 
840 	__vgic_v3_write_vmcr(vmcr);
841 }
842 
__vgic_v3_read_apxrn(struct kvm_vcpu * vcpu,int rt,int n)843 static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
844 {
845 	u32 val;
846 
847 	if (!__vgic_v3_get_group(vcpu))
848 		val = __vgic_v3_read_ap0rn(n);
849 	else
850 		val = __vgic_v3_read_ap1rn(n);
851 
852 	vcpu_set_reg(vcpu, rt, val);
853 }
854 
__vgic_v3_write_apxrn(struct kvm_vcpu * vcpu,int rt,int n)855 static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
856 {
857 	u32 val = vcpu_get_reg(vcpu, rt);
858 
859 	if (!__vgic_v3_get_group(vcpu))
860 		__vgic_v3_write_ap0rn(val, n);
861 	else
862 		__vgic_v3_write_ap1rn(val, n);
863 }
864 
__vgic_v3_read_apxr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)865 static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
866 					    u32 vmcr, int rt)
867 {
868 	__vgic_v3_read_apxrn(vcpu, rt, 0);
869 }
870 
__vgic_v3_read_apxr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)871 static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
872 					    u32 vmcr, int rt)
873 {
874 	__vgic_v3_read_apxrn(vcpu, rt, 1);
875 }
876 
__vgic_v3_read_apxr2(struct kvm_vcpu * vcpu,u32 vmcr,int rt)877 static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
878 					    u32 vmcr, int rt)
879 {
880 	__vgic_v3_read_apxrn(vcpu, rt, 2);
881 }
882 
__vgic_v3_read_apxr3(struct kvm_vcpu * vcpu,u32 vmcr,int rt)883 static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
884 					    u32 vmcr, int rt)
885 {
886 	__vgic_v3_read_apxrn(vcpu, rt, 3);
887 }
888 
__vgic_v3_write_apxr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)889 static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
890 					     u32 vmcr, int rt)
891 {
892 	__vgic_v3_write_apxrn(vcpu, rt, 0);
893 }
894 
__vgic_v3_write_apxr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)895 static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
896 					     u32 vmcr, int rt)
897 {
898 	__vgic_v3_write_apxrn(vcpu, rt, 1);
899 }
900 
__vgic_v3_write_apxr2(struct kvm_vcpu * vcpu,u32 vmcr,int rt)901 static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
902 					     u32 vmcr, int rt)
903 {
904 	__vgic_v3_write_apxrn(vcpu, rt, 2);
905 }
906 
__vgic_v3_write_apxr3(struct kvm_vcpu * vcpu,u32 vmcr,int rt)907 static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
908 					     u32 vmcr, int rt)
909 {
910 	__vgic_v3_write_apxrn(vcpu, rt, 3);
911 }
912 
__vgic_v3_read_hppir(struct kvm_vcpu * vcpu,u32 vmcr,int rt)913 static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
914 					    u32 vmcr, int rt)
915 {
916 	u64 lr_val;
917 	int lr, lr_grp, grp;
918 
919 	grp = __vgic_v3_get_group(vcpu);
920 
921 	lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
922 	if (lr == -1)
923 		goto spurious;
924 
925 	lr_grp = !!(lr_val & ICH_LR_GROUP);
926 	if (lr_grp != grp)
927 		lr_val = ICC_IAR1_EL1_SPURIOUS;
928 
929 spurious:
930 	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
931 }
932 
__vgic_v3_read_pmr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)933 static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
934 					  u32 vmcr, int rt)
935 {
936 	vmcr &= ICH_VMCR_PMR_MASK;
937 	vmcr >>= ICH_VMCR_PMR_SHIFT;
938 	vcpu_set_reg(vcpu, rt, vmcr);
939 }
940 
__vgic_v3_write_pmr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)941 static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
942 					   u32 vmcr, int rt)
943 {
944 	u32 val = vcpu_get_reg(vcpu, rt);
945 
946 	val <<= ICH_VMCR_PMR_SHIFT;
947 	val &= ICH_VMCR_PMR_MASK;
948 	vmcr &= ~ICH_VMCR_PMR_MASK;
949 	vmcr |= val;
950 
951 	write_gicreg(vmcr, ICH_VMCR_EL2);
952 }
953 
__vgic_v3_read_rpr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)954 static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
955 					  u32 vmcr, int rt)
956 {
957 	u32 val = __vgic_v3_get_highest_active_priority();
958 	vcpu_set_reg(vcpu, rt, val);
959 }
960 
__vgic_v3_read_ctlr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)961 static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
962 					   u32 vmcr, int rt)
963 {
964 	u32 vtr, val;
965 
966 	vtr = read_gicreg(ICH_VTR_EL2);
967 	/* PRIbits */
968 	val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
969 	/* IDbits */
970 	val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
971 	/* SEIS */
972 	val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
973 	/* A3V */
974 	val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
975 	/* EOImode */
976 	val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
977 	/* CBPR */
978 	val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
979 
980 	vcpu_set_reg(vcpu, rt, val);
981 }
982 
__vgic_v3_write_ctlr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)983 static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
984 					    u32 vmcr, int rt)
985 {
986 	u32 val = vcpu_get_reg(vcpu, rt);
987 
988 	if (val & ICC_CTLR_EL1_CBPR_MASK)
989 		vmcr |= ICH_VMCR_CBPR_MASK;
990 	else
991 		vmcr &= ~ICH_VMCR_CBPR_MASK;
992 
993 	if (val & ICC_CTLR_EL1_EOImode_MASK)
994 		vmcr |= ICH_VMCR_EOIM_MASK;
995 	else
996 		vmcr &= ~ICH_VMCR_EOIM_MASK;
997 
998 	write_gicreg(vmcr, ICH_VMCR_EL2);
999 }
1000 
__vgic_v3_perform_cpuif_access(struct kvm_vcpu * vcpu)1001 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1002 {
1003 	int rt;
1004 	u32 esr;
1005 	u32 vmcr;
1006 	void (*fn)(struct kvm_vcpu *, u32, int);
1007 	bool is_read;
1008 	u32 sysreg;
1009 
1010 	esr = kvm_vcpu_get_hsr(vcpu);
1011 	if (vcpu_mode_is_32bit(vcpu)) {
1012 		if (!kvm_condition_valid(vcpu)) {
1013 			__kvm_skip_instr(vcpu);
1014 			return 1;
1015 		}
1016 
1017 		sysreg = esr_cp15_to_sysreg(esr);
1018 	} else {
1019 		sysreg = esr_sys64_to_sysreg(esr);
1020 	}
1021 
1022 	is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1023 
1024 	switch (sysreg) {
1025 	case SYS_ICC_IAR0_EL1:
1026 	case SYS_ICC_IAR1_EL1:
1027 		if (unlikely(!is_read))
1028 			return 0;
1029 		fn = __vgic_v3_read_iar;
1030 		break;
1031 	case SYS_ICC_EOIR0_EL1:
1032 	case SYS_ICC_EOIR1_EL1:
1033 		if (unlikely(is_read))
1034 			return 0;
1035 		fn = __vgic_v3_write_eoir;
1036 		break;
1037 	case SYS_ICC_IGRPEN1_EL1:
1038 		if (is_read)
1039 			fn = __vgic_v3_read_igrpen1;
1040 		else
1041 			fn = __vgic_v3_write_igrpen1;
1042 		break;
1043 	case SYS_ICC_BPR1_EL1:
1044 		if (is_read)
1045 			fn = __vgic_v3_read_bpr1;
1046 		else
1047 			fn = __vgic_v3_write_bpr1;
1048 		break;
1049 	case SYS_ICC_AP0Rn_EL1(0):
1050 	case SYS_ICC_AP1Rn_EL1(0):
1051 		if (is_read)
1052 			fn = __vgic_v3_read_apxr0;
1053 		else
1054 			fn = __vgic_v3_write_apxr0;
1055 		break;
1056 	case SYS_ICC_AP0Rn_EL1(1):
1057 	case SYS_ICC_AP1Rn_EL1(1):
1058 		if (is_read)
1059 			fn = __vgic_v3_read_apxr1;
1060 		else
1061 			fn = __vgic_v3_write_apxr1;
1062 		break;
1063 	case SYS_ICC_AP0Rn_EL1(2):
1064 	case SYS_ICC_AP1Rn_EL1(2):
1065 		if (is_read)
1066 			fn = __vgic_v3_read_apxr2;
1067 		else
1068 			fn = __vgic_v3_write_apxr2;
1069 		break;
1070 	case SYS_ICC_AP0Rn_EL1(3):
1071 	case SYS_ICC_AP1Rn_EL1(3):
1072 		if (is_read)
1073 			fn = __vgic_v3_read_apxr3;
1074 		else
1075 			fn = __vgic_v3_write_apxr3;
1076 		break;
1077 	case SYS_ICC_HPPIR0_EL1:
1078 	case SYS_ICC_HPPIR1_EL1:
1079 		if (unlikely(!is_read))
1080 			return 0;
1081 		fn = __vgic_v3_read_hppir;
1082 		break;
1083 	case SYS_ICC_IGRPEN0_EL1:
1084 		if (is_read)
1085 			fn = __vgic_v3_read_igrpen0;
1086 		else
1087 			fn = __vgic_v3_write_igrpen0;
1088 		break;
1089 	case SYS_ICC_BPR0_EL1:
1090 		if (is_read)
1091 			fn = __vgic_v3_read_bpr0;
1092 		else
1093 			fn = __vgic_v3_write_bpr0;
1094 		break;
1095 	case SYS_ICC_DIR_EL1:
1096 		if (unlikely(is_read))
1097 			return 0;
1098 		fn = __vgic_v3_write_dir;
1099 		break;
1100 	case SYS_ICC_RPR_EL1:
1101 		if (unlikely(!is_read))
1102 			return 0;
1103 		fn = __vgic_v3_read_rpr;
1104 		break;
1105 	case SYS_ICC_CTLR_EL1:
1106 		if (is_read)
1107 			fn = __vgic_v3_read_ctlr;
1108 		else
1109 			fn = __vgic_v3_write_ctlr;
1110 		break;
1111 	case SYS_ICC_PMR_EL1:
1112 		if (is_read)
1113 			fn = __vgic_v3_read_pmr;
1114 		else
1115 			fn = __vgic_v3_write_pmr;
1116 		break;
1117 	default:
1118 		return 0;
1119 	}
1120 
1121 	vmcr = __vgic_v3_read_vmcr();
1122 	rt = kvm_vcpu_sys_get_rt(vcpu);
1123 	fn(vcpu, vmcr, rt);
1124 
1125 	__kvm_skip_instr(vcpu);
1126 
1127 	return 1;
1128 }
1129 
1130 #endif
1131