• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4  *
5  * Author: Yu Liu, <yu.liu@freescale.com>
6  *
7  * Description:
8  * This file is derived from arch/powerpc/kvm/44x_emulate.c,
9  * by Hollis Blanchard <hollisb@us.ibm.com>.
10  */
11 
12 #include <asm/kvm_ppc.h>
13 #include <asm/disassemble.h>
14 #include <asm/dbell.h>
15 #include <asm/reg_booke.h>
16 
17 #include "booke.h"
18 #include "e500.h"
19 
20 #define XOP_DCBTLS  166
21 #define XOP_MSGSND  206
22 #define XOP_MSGCLR  238
23 #define XOP_MFTMR   366
24 #define XOP_TLBIVAX 786
25 #define XOP_TLBSX   914
26 #define XOP_TLBRE   946
27 #define XOP_TLBWE   978
28 #define XOP_TLBILX  18
29 #define XOP_EHPRIV  270
30 
31 #ifdef CONFIG_KVM_E500MC
dbell2prio(ulong param)32 static int dbell2prio(ulong param)
33 {
34 	int msg = param & PPC_DBELL_TYPE_MASK;
35 	int prio = -1;
36 
37 	switch (msg) {
38 	case PPC_DBELL_TYPE(PPC_DBELL):
39 		prio = BOOKE_IRQPRIO_DBELL;
40 		break;
41 	case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
42 		prio = BOOKE_IRQPRIO_DBELL_CRIT;
43 		break;
44 	default:
45 		break;
46 	}
47 
48 	return prio;
49 }
50 
kvmppc_e500_emul_msgclr(struct kvm_vcpu * vcpu,int rb)51 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
52 {
53 	ulong param = vcpu->arch.regs.gpr[rb];
54 	int prio = dbell2prio(param);
55 
56 	if (prio < 0)
57 		return EMULATE_FAIL;
58 
59 	clear_bit(prio, &vcpu->arch.pending_exceptions);
60 	return EMULATE_DONE;
61 }
62 
kvmppc_e500_emul_msgsnd(struct kvm_vcpu * vcpu,int rb)63 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
64 {
65 	ulong param = vcpu->arch.regs.gpr[rb];
66 	int prio = dbell2prio(rb);
67 	int pir = param & PPC_DBELL_PIR_MASK;
68 	int i;
69 	struct kvm_vcpu *cvcpu;
70 
71 	if (prio < 0)
72 		return EMULATE_FAIL;
73 
74 	kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
75 		int cpir = cvcpu->arch.shared->pir;
76 		if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
77 			set_bit(prio, &cvcpu->arch.pending_exceptions);
78 			kvm_vcpu_kick(cvcpu);
79 		}
80 	}
81 
82 	return EMULATE_DONE;
83 }
84 #endif
85 
kvmppc_e500_emul_ehpriv(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int inst,int * advance)86 static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 				   unsigned int inst, int *advance)
88 {
89 	int emulated = EMULATE_DONE;
90 
91 	switch (get_oc(inst)) {
92 	case EHPRIV_OC_DEBUG:
93 		run->exit_reason = KVM_EXIT_DEBUG;
94 		run->debug.arch.address = vcpu->arch.regs.nip;
95 		run->debug.arch.status = 0;
96 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
97 		emulated = EMULATE_EXIT_USER;
98 		*advance = 0;
99 		break;
100 	default:
101 		emulated = EMULATE_FAIL;
102 	}
103 	return emulated;
104 }
105 
kvmppc_e500_emul_dcbtls(struct kvm_vcpu * vcpu)106 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
107 {
108 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
109 
110 	/* Always fail to lock the cache */
111 	vcpu_e500->l1csr0 |= L1CSR0_CUL;
112 	return EMULATE_DONE;
113 }
114 
kvmppc_e500_emul_mftmr(struct kvm_vcpu * vcpu,unsigned int inst,int rt)115 static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
116 				  int rt)
117 {
118 	/* Expose one thread per vcpu */
119 	if (get_tmrn(inst) == TMRN_TMCFG0) {
120 		kvmppc_set_gpr(vcpu, rt,
121 			       1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
122 		return EMULATE_DONE;
123 	}
124 
125 	return EMULATE_FAIL;
126 }
127 
kvmppc_core_emulate_op_e500(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int inst,int * advance)128 int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
129 				unsigned int inst, int *advance)
130 {
131 	int emulated = EMULATE_DONE;
132 	int ra = get_ra(inst);
133 	int rb = get_rb(inst);
134 	int rt = get_rt(inst);
135 	gva_t ea;
136 
137 	switch (get_op(inst)) {
138 	case 31:
139 		switch (get_xop(inst)) {
140 
141 		case XOP_DCBTLS:
142 			emulated = kvmppc_e500_emul_dcbtls(vcpu);
143 			break;
144 
145 #ifdef CONFIG_KVM_E500MC
146 		case XOP_MSGSND:
147 			emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
148 			break;
149 
150 		case XOP_MSGCLR:
151 			emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
152 			break;
153 #endif
154 
155 		case XOP_TLBRE:
156 			emulated = kvmppc_e500_emul_tlbre(vcpu);
157 			break;
158 
159 		case XOP_TLBWE:
160 			emulated = kvmppc_e500_emul_tlbwe(vcpu);
161 			break;
162 
163 		case XOP_TLBSX:
164 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
165 			emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
166 			break;
167 
168 		case XOP_TLBILX: {
169 			int type = rt & 0x3;
170 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
171 			emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
172 			break;
173 		}
174 
175 		case XOP_TLBIVAX:
176 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
177 			emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
178 			break;
179 
180 		case XOP_MFTMR:
181 			emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
182 			break;
183 
184 		case XOP_EHPRIV:
185 			emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
186 							   advance);
187 			break;
188 
189 		default:
190 			emulated = EMULATE_FAIL;
191 		}
192 
193 		break;
194 
195 	default:
196 		emulated = EMULATE_FAIL;
197 	}
198 
199 	if (emulated == EMULATE_FAIL)
200 		emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
201 
202 	return emulated;
203 }
204 
kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu * vcpu,int sprn,ulong spr_val)205 int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
206 {
207 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
208 	int emulated = EMULATE_DONE;
209 
210 	switch (sprn) {
211 #ifndef CONFIG_KVM_BOOKE_HV
212 	case SPRN_PID:
213 		kvmppc_set_pid(vcpu, spr_val);
214 		break;
215 	case SPRN_PID1:
216 		if (spr_val != 0)
217 			return EMULATE_FAIL;
218 		vcpu_e500->pid[1] = spr_val;
219 		break;
220 	case SPRN_PID2:
221 		if (spr_val != 0)
222 			return EMULATE_FAIL;
223 		vcpu_e500->pid[2] = spr_val;
224 		break;
225 	case SPRN_MAS0:
226 		vcpu->arch.shared->mas0 = spr_val;
227 		break;
228 	case SPRN_MAS1:
229 		vcpu->arch.shared->mas1 = spr_val;
230 		break;
231 	case SPRN_MAS2:
232 		vcpu->arch.shared->mas2 = spr_val;
233 		break;
234 	case SPRN_MAS3:
235 		vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
236 		vcpu->arch.shared->mas7_3 |= spr_val;
237 		break;
238 	case SPRN_MAS4:
239 		vcpu->arch.shared->mas4 = spr_val;
240 		break;
241 	case SPRN_MAS6:
242 		vcpu->arch.shared->mas6 = spr_val;
243 		break;
244 	case SPRN_MAS7:
245 		vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
246 		vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
247 		break;
248 #endif
249 	case SPRN_L1CSR0:
250 		vcpu_e500->l1csr0 = spr_val;
251 		vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
252 		break;
253 	case SPRN_L1CSR1:
254 		vcpu_e500->l1csr1 = spr_val;
255 		vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
256 		break;
257 	case SPRN_HID0:
258 		vcpu_e500->hid0 = spr_val;
259 		break;
260 	case SPRN_HID1:
261 		vcpu_e500->hid1 = spr_val;
262 		break;
263 
264 	case SPRN_MMUCSR0:
265 		emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
266 				spr_val);
267 		break;
268 
269 	case SPRN_PWRMGTCR0:
270 		/*
271 		 * Guest relies on host power management configurations
272 		 * Treat the request as a general store
273 		 */
274 		vcpu->arch.pwrmgtcr0 = spr_val;
275 		break;
276 
277 	case SPRN_BUCSR:
278 		/*
279 		 * If we are here, it means that we have already flushed the
280 		 * branch predictor, so just return to guest.
281 		 */
282 		break;
283 
284 	/* extra exceptions */
285 #ifdef CONFIG_SPE_POSSIBLE
286 	case SPRN_IVOR32:
287 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
288 		break;
289 	case SPRN_IVOR33:
290 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
291 		break;
292 	case SPRN_IVOR34:
293 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
294 		break;
295 #endif
296 #ifdef CONFIG_ALTIVEC
297 	case SPRN_IVOR32:
298 		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
299 		break;
300 	case SPRN_IVOR33:
301 		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
302 		break;
303 #endif
304 	case SPRN_IVOR35:
305 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
306 		break;
307 #ifdef CONFIG_KVM_BOOKE_HV
308 	case SPRN_IVOR36:
309 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
310 		break;
311 	case SPRN_IVOR37:
312 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
313 		break;
314 #endif
315 	default:
316 		emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
317 	}
318 
319 	return emulated;
320 }
321 
kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu * vcpu,int sprn,ulong * spr_val)322 int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
323 {
324 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
325 	int emulated = EMULATE_DONE;
326 
327 	switch (sprn) {
328 #ifndef CONFIG_KVM_BOOKE_HV
329 	case SPRN_PID:
330 		*spr_val = vcpu_e500->pid[0];
331 		break;
332 	case SPRN_PID1:
333 		*spr_val = vcpu_e500->pid[1];
334 		break;
335 	case SPRN_PID2:
336 		*spr_val = vcpu_e500->pid[2];
337 		break;
338 	case SPRN_MAS0:
339 		*spr_val = vcpu->arch.shared->mas0;
340 		break;
341 	case SPRN_MAS1:
342 		*spr_val = vcpu->arch.shared->mas1;
343 		break;
344 	case SPRN_MAS2:
345 		*spr_val = vcpu->arch.shared->mas2;
346 		break;
347 	case SPRN_MAS3:
348 		*spr_val = (u32)vcpu->arch.shared->mas7_3;
349 		break;
350 	case SPRN_MAS4:
351 		*spr_val = vcpu->arch.shared->mas4;
352 		break;
353 	case SPRN_MAS6:
354 		*spr_val = vcpu->arch.shared->mas6;
355 		break;
356 	case SPRN_MAS7:
357 		*spr_val = vcpu->arch.shared->mas7_3 >> 32;
358 		break;
359 #endif
360 	case SPRN_DECAR:
361 		*spr_val = vcpu->arch.decar;
362 		break;
363 	case SPRN_TLB0CFG:
364 		*spr_val = vcpu->arch.tlbcfg[0];
365 		break;
366 	case SPRN_TLB1CFG:
367 		*spr_val = vcpu->arch.tlbcfg[1];
368 		break;
369 	case SPRN_TLB0PS:
370 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
371 			return EMULATE_FAIL;
372 		*spr_val = vcpu->arch.tlbps[0];
373 		break;
374 	case SPRN_TLB1PS:
375 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
376 			return EMULATE_FAIL;
377 		*spr_val = vcpu->arch.tlbps[1];
378 		break;
379 	case SPRN_L1CSR0:
380 		*spr_val = vcpu_e500->l1csr0;
381 		break;
382 	case SPRN_L1CSR1:
383 		*spr_val = vcpu_e500->l1csr1;
384 		break;
385 	case SPRN_HID0:
386 		*spr_val = vcpu_e500->hid0;
387 		break;
388 	case SPRN_HID1:
389 		*spr_val = vcpu_e500->hid1;
390 		break;
391 	case SPRN_SVR:
392 		*spr_val = vcpu_e500->svr;
393 		break;
394 
395 	case SPRN_MMUCSR0:
396 		*spr_val = 0;
397 		break;
398 
399 	case SPRN_MMUCFG:
400 		*spr_val = vcpu->arch.mmucfg;
401 		break;
402 	case SPRN_EPTCFG:
403 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
404 			return EMULATE_FAIL;
405 		/*
406 		 * Legacy Linux guests access EPTCFG register even if the E.PT
407 		 * category is disabled in the VM. Give them a chance to live.
408 		 */
409 		*spr_val = vcpu->arch.eptcfg;
410 		break;
411 
412 	case SPRN_PWRMGTCR0:
413 		*spr_val = vcpu->arch.pwrmgtcr0;
414 		break;
415 
416 	/* extra exceptions */
417 #ifdef CONFIG_SPE_POSSIBLE
418 	case SPRN_IVOR32:
419 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
420 		break;
421 	case SPRN_IVOR33:
422 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
423 		break;
424 	case SPRN_IVOR34:
425 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
426 		break;
427 #endif
428 #ifdef CONFIG_ALTIVEC
429 	case SPRN_IVOR32:
430 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
431 		break;
432 	case SPRN_IVOR33:
433 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
434 		break;
435 #endif
436 	case SPRN_IVOR35:
437 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
438 		break;
439 #ifdef CONFIG_KVM_BOOKE_HV
440 	case SPRN_IVOR36:
441 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
442 		break;
443 	case SPRN_IVOR37:
444 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
445 		break;
446 #endif
447 	default:
448 		emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
449 	}
450 
451 	return emulated;
452 }
453 
454