• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  * Copyright 2010-2011 Freescale Semiconductor, Inc.
6  *
7  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
9  *          Scott Wood <scottwood@freescale.com>
10  *          Varun Sethi <varun.sethi@freescale.com>
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kvm_host.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/fs.h>
20 
21 #include <asm/cputable.h>
22 #include <linux/uaccess.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/dbell.h>
26 #include <asm/hw_irq.h>
27 #include <asm/irq.h>
28 #include <asm/time.h>
29 
30 #include "timing.h"
31 #include "booke.h"
32 
33 #define CREATE_TRACE_POINTS
34 #include "trace_booke.h"
35 
36 unsigned long kvmppc_booke_handlers;
37 
38 struct kvm_stats_debugfs_item debugfs_entries[] = {
39 	VCPU_STAT("mmio", mmio_exits),
40 	VCPU_STAT("sig", signal_exits),
41 	VCPU_STAT("itlb_r", itlb_real_miss_exits),
42 	VCPU_STAT("itlb_v", itlb_virt_miss_exits),
43 	VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
44 	VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
45 	VCPU_STAT("sysc", syscall_exits),
46 	VCPU_STAT("isi", isi_exits),
47 	VCPU_STAT("dsi", dsi_exits),
48 	VCPU_STAT("inst_emu", emulated_inst_exits),
49 	VCPU_STAT("dec", dec_exits),
50 	VCPU_STAT("ext_intr", ext_intr_exits),
51 	VCPU_STAT("halt_successful_poll", halt_successful_poll),
52 	VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
53 	VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
54 	VCPU_STAT("halt_wakeup", halt_wakeup),
55 	VCPU_STAT("doorbell", dbell_exits),
56 	VCPU_STAT("guest doorbell", gdbell_exits),
57 	VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
58 	VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
59 	VM_STAT("remote_tlb_flush", remote_tlb_flush),
60 	{ NULL }
61 };
62 
63 /* TODO: use vcpu_printf() */
kvmppc_dump_vcpu(struct kvm_vcpu * vcpu)64 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
65 {
66 	int i;
67 
68 	printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.regs.nip,
69 			vcpu->arch.shared->msr);
70 	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.regs.link,
71 			vcpu->arch.regs.ctr);
72 	printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
73 					    vcpu->arch.shared->srr1);
74 
75 	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
76 
77 	for (i = 0; i < 32; i += 4) {
78 		printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
79 		       kvmppc_get_gpr(vcpu, i),
80 		       kvmppc_get_gpr(vcpu, i+1),
81 		       kvmppc_get_gpr(vcpu, i+2),
82 		       kvmppc_get_gpr(vcpu, i+3));
83 	}
84 }
85 
86 #ifdef CONFIG_SPE
kvmppc_vcpu_disable_spe(struct kvm_vcpu * vcpu)87 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
88 {
89 	preempt_disable();
90 	enable_kernel_spe();
91 	kvmppc_save_guest_spe(vcpu);
92 	disable_kernel_spe();
93 	vcpu->arch.shadow_msr &= ~MSR_SPE;
94 	preempt_enable();
95 }
96 
kvmppc_vcpu_enable_spe(struct kvm_vcpu * vcpu)97 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
98 {
99 	preempt_disable();
100 	enable_kernel_spe();
101 	kvmppc_load_guest_spe(vcpu);
102 	disable_kernel_spe();
103 	vcpu->arch.shadow_msr |= MSR_SPE;
104 	preempt_enable();
105 }
106 
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)107 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
108 {
109 	if (vcpu->arch.shared->msr & MSR_SPE) {
110 		if (!(vcpu->arch.shadow_msr & MSR_SPE))
111 			kvmppc_vcpu_enable_spe(vcpu);
112 	} else if (vcpu->arch.shadow_msr & MSR_SPE) {
113 		kvmppc_vcpu_disable_spe(vcpu);
114 	}
115 }
116 #else
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)117 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
118 {
119 }
120 #endif
121 
122 /*
123  * Load up guest vcpu FP state if it's needed.
124  * It also set the MSR_FP in thread so that host know
125  * we're holding FPU, and then host can help to save
126  * guest vcpu FP state if other threads require to use FPU.
127  * This simulates an FP unavailable fault.
128  *
129  * It requires to be called with preemption disabled.
130  */
kvmppc_load_guest_fp(struct kvm_vcpu * vcpu)131 static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
132 {
133 #ifdef CONFIG_PPC_FPU
134 	if (!(current->thread.regs->msr & MSR_FP)) {
135 		enable_kernel_fp();
136 		load_fp_state(&vcpu->arch.fp);
137 		disable_kernel_fp();
138 		current->thread.fp_save_area = &vcpu->arch.fp;
139 		current->thread.regs->msr |= MSR_FP;
140 	}
141 #endif
142 }
143 
144 /*
145  * Save guest vcpu FP state into thread.
146  * It requires to be called with preemption disabled.
147  */
kvmppc_save_guest_fp(struct kvm_vcpu * vcpu)148 static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
149 {
150 #ifdef CONFIG_PPC_FPU
151 	if (current->thread.regs->msr & MSR_FP)
152 		giveup_fpu(current);
153 	current->thread.fp_save_area = NULL;
154 #endif
155 }
156 
kvmppc_vcpu_sync_fpu(struct kvm_vcpu * vcpu)157 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
158 {
159 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
160 	/* We always treat the FP bit as enabled from the host
161 	   perspective, so only need to adjust the shadow MSR */
162 	vcpu->arch.shadow_msr &= ~MSR_FP;
163 	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
164 #endif
165 }
166 
167 /*
168  * Simulate AltiVec unavailable fault to load guest state
169  * from thread to AltiVec unit.
170  * It requires to be called with preemption disabled.
171  */
kvmppc_load_guest_altivec(struct kvm_vcpu * vcpu)172 static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
173 {
174 #ifdef CONFIG_ALTIVEC
175 	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
176 		if (!(current->thread.regs->msr & MSR_VEC)) {
177 			enable_kernel_altivec();
178 			load_vr_state(&vcpu->arch.vr);
179 			disable_kernel_altivec();
180 			current->thread.vr_save_area = &vcpu->arch.vr;
181 			current->thread.regs->msr |= MSR_VEC;
182 		}
183 	}
184 #endif
185 }
186 
187 /*
188  * Save guest vcpu AltiVec state into thread.
189  * It requires to be called with preemption disabled.
190  */
kvmppc_save_guest_altivec(struct kvm_vcpu * vcpu)191 static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
192 {
193 #ifdef CONFIG_ALTIVEC
194 	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
195 		if (current->thread.regs->msr & MSR_VEC)
196 			giveup_altivec(current);
197 		current->thread.vr_save_area = NULL;
198 	}
199 #endif
200 }
201 
kvmppc_vcpu_sync_debug(struct kvm_vcpu * vcpu)202 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
203 {
204 	/* Synchronize guest's desire to get debug interrupts into shadow MSR */
205 #ifndef CONFIG_KVM_BOOKE_HV
206 	vcpu->arch.shadow_msr &= ~MSR_DE;
207 	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
208 #endif
209 
210 	/* Force enable debug interrupts when user space wants to debug */
211 	if (vcpu->guest_debug) {
212 #ifdef CONFIG_KVM_BOOKE_HV
213 		/*
214 		 * Since there is no shadow MSR, sync MSR_DE into the guest
215 		 * visible MSR.
216 		 */
217 		vcpu->arch.shared->msr |= MSR_DE;
218 #else
219 		vcpu->arch.shadow_msr |= MSR_DE;
220 		vcpu->arch.shared->msr &= ~MSR_DE;
221 #endif
222 	}
223 }
224 
225 /*
226  * Helper function for "full" MSR writes.  No need to call this if only
227  * EE/CE/ME/DE/RI are changing.
228  */
kvmppc_set_msr(struct kvm_vcpu * vcpu,u32 new_msr)229 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
230 {
231 	u32 old_msr = vcpu->arch.shared->msr;
232 
233 #ifdef CONFIG_KVM_BOOKE_HV
234 	new_msr |= MSR_GS;
235 #endif
236 
237 	vcpu->arch.shared->msr = new_msr;
238 
239 	kvmppc_mmu_msr_notify(vcpu, old_msr);
240 	kvmppc_vcpu_sync_spe(vcpu);
241 	kvmppc_vcpu_sync_fpu(vcpu);
242 	kvmppc_vcpu_sync_debug(vcpu);
243 }
244 
kvmppc_booke_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)245 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
246                                        unsigned int priority)
247 {
248 	trace_kvm_booke_queue_irqprio(vcpu, priority);
249 	set_bit(priority, &vcpu->arch.pending_exceptions);
250 }
251 
kvmppc_core_queue_dtlb_miss(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)252 void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
253 				 ulong dear_flags, ulong esr_flags)
254 {
255 	vcpu->arch.queued_dear = dear_flags;
256 	vcpu->arch.queued_esr = esr_flags;
257 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
258 }
259 
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)260 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
261 				    ulong dear_flags, ulong esr_flags)
262 {
263 	vcpu->arch.queued_dear = dear_flags;
264 	vcpu->arch.queued_esr = esr_flags;
265 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
266 }
267 
kvmppc_core_queue_itlb_miss(struct kvm_vcpu * vcpu)268 void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
269 {
270 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
271 }
272 
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong esr_flags)273 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
274 {
275 	vcpu->arch.queued_esr = esr_flags;
276 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
277 }
278 
kvmppc_core_queue_alignment(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)279 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
280 					ulong esr_flags)
281 {
282 	vcpu->arch.queued_dear = dear_flags;
283 	vcpu->arch.queued_esr = esr_flags;
284 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
285 }
286 
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong esr_flags)287 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
288 {
289 	vcpu->arch.queued_esr = esr_flags;
290 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
291 }
292 
kvmppc_core_queue_fpunavail(struct kvm_vcpu * vcpu)293 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
294 {
295 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
296 }
297 
298 #ifdef CONFIG_ALTIVEC
kvmppc_core_queue_vec_unavail(struct kvm_vcpu * vcpu)299 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
300 {
301 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
302 }
303 #endif
304 
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)305 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
306 {
307 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
308 }
309 
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)310 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
311 {
312 	return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
313 }
314 
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)315 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
316 {
317 	clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
318 }
319 
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)320 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
321                                 struct kvm_interrupt *irq)
322 {
323 	unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
324 
325 	if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
326 		prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
327 
328 	kvmppc_booke_queue_irqprio(vcpu, prio);
329 }
330 
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu)331 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
332 {
333 	clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
334 	clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
335 }
336 
kvmppc_core_queue_watchdog(struct kvm_vcpu * vcpu)337 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
338 {
339 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
340 }
341 
kvmppc_core_dequeue_watchdog(struct kvm_vcpu * vcpu)342 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
343 {
344 	clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
345 }
346 
kvmppc_core_queue_debug(struct kvm_vcpu * vcpu)347 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
348 {
349 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
350 }
351 
kvmppc_core_dequeue_debug(struct kvm_vcpu * vcpu)352 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
353 {
354 	clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
355 }
356 
set_guest_srr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)357 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
358 {
359 	kvmppc_set_srr0(vcpu, srr0);
360 	kvmppc_set_srr1(vcpu, srr1);
361 }
362 
set_guest_csrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)363 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
364 {
365 	vcpu->arch.csrr0 = srr0;
366 	vcpu->arch.csrr1 = srr1;
367 }
368 
set_guest_dsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)369 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
370 {
371 	if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
372 		vcpu->arch.dsrr0 = srr0;
373 		vcpu->arch.dsrr1 = srr1;
374 	} else {
375 		set_guest_csrr(vcpu, srr0, srr1);
376 	}
377 }
378 
set_guest_mcsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)379 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
380 {
381 	vcpu->arch.mcsrr0 = srr0;
382 	vcpu->arch.mcsrr1 = srr1;
383 }
384 
385 /* Deliver the interrupt of the corresponding priority, if possible. */
kvmppc_booke_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)386 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
387                                         unsigned int priority)
388 {
389 	int allowed = 0;
390 	ulong msr_mask = 0;
391 	bool update_esr = false, update_dear = false, update_epr = false;
392 	ulong crit_raw = vcpu->arch.shared->critical;
393 	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
394 	bool crit;
395 	bool keep_irq = false;
396 	enum int_class int_class;
397 	ulong new_msr = vcpu->arch.shared->msr;
398 
399 	/* Truncate crit indicators in 32 bit mode */
400 	if (!(vcpu->arch.shared->msr & MSR_SF)) {
401 		crit_raw &= 0xffffffff;
402 		crit_r1 &= 0xffffffff;
403 	}
404 
405 	/* Critical section when crit == r1 */
406 	crit = (crit_raw == crit_r1);
407 	/* ... and we're in supervisor mode */
408 	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
409 
410 	if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
411 		priority = BOOKE_IRQPRIO_EXTERNAL;
412 		keep_irq = true;
413 	}
414 
415 	if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
416 		update_epr = true;
417 
418 	switch (priority) {
419 	case BOOKE_IRQPRIO_DTLB_MISS:
420 	case BOOKE_IRQPRIO_DATA_STORAGE:
421 	case BOOKE_IRQPRIO_ALIGNMENT:
422 		update_dear = true;
423 		fallthrough;
424 	case BOOKE_IRQPRIO_INST_STORAGE:
425 	case BOOKE_IRQPRIO_PROGRAM:
426 		update_esr = true;
427 		fallthrough;
428 	case BOOKE_IRQPRIO_ITLB_MISS:
429 	case BOOKE_IRQPRIO_SYSCALL:
430 	case BOOKE_IRQPRIO_FP_UNAVAIL:
431 #ifdef CONFIG_SPE_POSSIBLE
432 	case BOOKE_IRQPRIO_SPE_UNAVAIL:
433 	case BOOKE_IRQPRIO_SPE_FP_DATA:
434 	case BOOKE_IRQPRIO_SPE_FP_ROUND:
435 #endif
436 #ifdef CONFIG_ALTIVEC
437 	case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
438 	case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
439 #endif
440 	case BOOKE_IRQPRIO_AP_UNAVAIL:
441 		allowed = 1;
442 		msr_mask = MSR_CE | MSR_ME | MSR_DE;
443 		int_class = INT_CLASS_NONCRIT;
444 		break;
445 	case BOOKE_IRQPRIO_WATCHDOG:
446 	case BOOKE_IRQPRIO_CRITICAL:
447 	case BOOKE_IRQPRIO_DBELL_CRIT:
448 		allowed = vcpu->arch.shared->msr & MSR_CE;
449 		allowed = allowed && !crit;
450 		msr_mask = MSR_ME;
451 		int_class = INT_CLASS_CRIT;
452 		break;
453 	case BOOKE_IRQPRIO_MACHINE_CHECK:
454 		allowed = vcpu->arch.shared->msr & MSR_ME;
455 		allowed = allowed && !crit;
456 		int_class = INT_CLASS_MC;
457 		break;
458 	case BOOKE_IRQPRIO_DECREMENTER:
459 	case BOOKE_IRQPRIO_FIT:
460 		keep_irq = true;
461 		fallthrough;
462 	case BOOKE_IRQPRIO_EXTERNAL:
463 	case BOOKE_IRQPRIO_DBELL:
464 		allowed = vcpu->arch.shared->msr & MSR_EE;
465 		allowed = allowed && !crit;
466 		msr_mask = MSR_CE | MSR_ME | MSR_DE;
467 		int_class = INT_CLASS_NONCRIT;
468 		break;
469 	case BOOKE_IRQPRIO_DEBUG:
470 		allowed = vcpu->arch.shared->msr & MSR_DE;
471 		allowed = allowed && !crit;
472 		msr_mask = MSR_ME;
473 		if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
474 			int_class = INT_CLASS_DBG;
475 		else
476 			int_class = INT_CLASS_CRIT;
477 
478 		break;
479 	}
480 
481 	if (allowed) {
482 		switch (int_class) {
483 		case INT_CLASS_NONCRIT:
484 			set_guest_srr(vcpu, vcpu->arch.regs.nip,
485 				      vcpu->arch.shared->msr);
486 			break;
487 		case INT_CLASS_CRIT:
488 			set_guest_csrr(vcpu, vcpu->arch.regs.nip,
489 				       vcpu->arch.shared->msr);
490 			break;
491 		case INT_CLASS_DBG:
492 			set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
493 				       vcpu->arch.shared->msr);
494 			break;
495 		case INT_CLASS_MC:
496 			set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
497 					vcpu->arch.shared->msr);
498 			break;
499 		}
500 
501 		vcpu->arch.regs.nip = vcpu->arch.ivpr |
502 					vcpu->arch.ivor[priority];
503 		if (update_esr == true)
504 			kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
505 		if (update_dear == true)
506 			kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
507 		if (update_epr == true) {
508 			if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
509 				kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
510 			else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
511 				BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
512 				kvmppc_mpic_set_epr(vcpu);
513 			}
514 		}
515 
516 		new_msr &= msr_mask;
517 #if defined(CONFIG_64BIT)
518 		if (vcpu->arch.epcr & SPRN_EPCR_ICM)
519 			new_msr |= MSR_CM;
520 #endif
521 		kvmppc_set_msr(vcpu, new_msr);
522 
523 		if (!keep_irq)
524 			clear_bit(priority, &vcpu->arch.pending_exceptions);
525 	}
526 
527 #ifdef CONFIG_KVM_BOOKE_HV
528 	/*
529 	 * If an interrupt is pending but masked, raise a guest doorbell
530 	 * so that we are notified when the guest enables the relevant
531 	 * MSR bit.
532 	 */
533 	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
534 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
535 	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
536 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
537 	if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
538 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
539 #endif
540 
541 	return allowed;
542 }
543 
544 /*
545  * Return the number of jiffies until the next timeout.  If the timeout is
546  * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
547  * because the larger value can break the timer APIs.
548  */
watchdog_next_timeout(struct kvm_vcpu * vcpu)549 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
550 {
551 	u64 tb, wdt_tb, wdt_ticks = 0;
552 	u64 nr_jiffies = 0;
553 	u32 period = TCR_GET_WP(vcpu->arch.tcr);
554 
555 	wdt_tb = 1ULL << (63 - period);
556 	tb = get_tb();
557 	/*
558 	 * The watchdog timeout will hapeen when TB bit corresponding
559 	 * to watchdog will toggle from 0 to 1.
560 	 */
561 	if (tb & wdt_tb)
562 		wdt_ticks = wdt_tb;
563 
564 	wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
565 
566 	/* Convert timebase ticks to jiffies */
567 	nr_jiffies = wdt_ticks;
568 
569 	if (do_div(nr_jiffies, tb_ticks_per_jiffy))
570 		nr_jiffies++;
571 
572 	return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
573 }
574 
arm_next_watchdog(struct kvm_vcpu * vcpu)575 static void arm_next_watchdog(struct kvm_vcpu *vcpu)
576 {
577 	unsigned long nr_jiffies;
578 	unsigned long flags;
579 
580 	/*
581 	 * If TSR_ENW and TSR_WIS are not set then no need to exit to
582 	 * userspace, so clear the KVM_REQ_WATCHDOG request.
583 	 */
584 	if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
585 		kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
586 
587 	spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
588 	nr_jiffies = watchdog_next_timeout(vcpu);
589 	/*
590 	 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
591 	 * then do not run the watchdog timer as this can break timer APIs.
592 	 */
593 	if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
594 		mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
595 	else
596 		del_timer(&vcpu->arch.wdt_timer);
597 	spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
598 }
599 
kvmppc_watchdog_func(struct timer_list * t)600 void kvmppc_watchdog_func(struct timer_list *t)
601 {
602 	struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
603 	u32 tsr, new_tsr;
604 	int final;
605 
606 	do {
607 		new_tsr = tsr = vcpu->arch.tsr;
608 		final = 0;
609 
610 		/* Time out event */
611 		if (tsr & TSR_ENW) {
612 			if (tsr & TSR_WIS)
613 				final = 1;
614 			else
615 				new_tsr = tsr | TSR_WIS;
616 		} else {
617 			new_tsr = tsr | TSR_ENW;
618 		}
619 	} while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
620 
621 	if (new_tsr & TSR_WIS) {
622 		smp_wmb();
623 		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
624 		kvm_vcpu_kick(vcpu);
625 	}
626 
627 	/*
628 	 * If this is final watchdog expiry and some action is required
629 	 * then exit to userspace.
630 	 */
631 	if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
632 	    vcpu->arch.watchdog_enabled) {
633 		smp_wmb();
634 		kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
635 		kvm_vcpu_kick(vcpu);
636 	}
637 
638 	/*
639 	 * Stop running the watchdog timer after final expiration to
640 	 * prevent the host from being flooded with timers if the
641 	 * guest sets a short period.
642 	 * Timers will resume when TSR/TCR is updated next time.
643 	 */
644 	if (!final)
645 		arm_next_watchdog(vcpu);
646 }
647 
update_timer_ints(struct kvm_vcpu * vcpu)648 static void update_timer_ints(struct kvm_vcpu *vcpu)
649 {
650 	if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
651 		kvmppc_core_queue_dec(vcpu);
652 	else
653 		kvmppc_core_dequeue_dec(vcpu);
654 
655 	if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
656 		kvmppc_core_queue_watchdog(vcpu);
657 	else
658 		kvmppc_core_dequeue_watchdog(vcpu);
659 }
660 
kvmppc_core_check_exceptions(struct kvm_vcpu * vcpu)661 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
662 {
663 	unsigned long *pending = &vcpu->arch.pending_exceptions;
664 	unsigned int priority;
665 
666 	priority = __ffs(*pending);
667 	while (priority < BOOKE_IRQPRIO_MAX) {
668 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
669 			break;
670 
671 		priority = find_next_bit(pending,
672 		                         BITS_PER_BYTE * sizeof(*pending),
673 		                         priority + 1);
674 	}
675 
676 	/* Tell the guest about our interrupt status */
677 	vcpu->arch.shared->int_pending = !!*pending;
678 }
679 
680 /* Check pending exceptions and deliver one, if possible. */
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)681 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
682 {
683 	int r = 0;
684 	WARN_ON_ONCE(!irqs_disabled());
685 
686 	kvmppc_core_check_exceptions(vcpu);
687 
688 	if (kvm_request_pending(vcpu)) {
689 		/* Exception delivery raised request; start over */
690 		return 1;
691 	}
692 
693 	if (vcpu->arch.shared->msr & MSR_WE) {
694 		local_irq_enable();
695 		kvm_vcpu_block(vcpu);
696 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
697 		hard_irq_disable();
698 
699 		kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
700 		r = 1;
701 	};
702 
703 	return r;
704 }
705 
kvmppc_core_check_requests(struct kvm_vcpu * vcpu)706 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
707 {
708 	int r = 1; /* Indicate we want to get back into the guest */
709 
710 	if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
711 		update_timer_ints(vcpu);
712 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
713 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
714 		kvmppc_core_flush_tlb(vcpu);
715 #endif
716 
717 	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
718 		vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
719 		r = 0;
720 	}
721 
722 	if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
723 		vcpu->run->epr.epr = 0;
724 		vcpu->arch.epr_needed = true;
725 		vcpu->run->exit_reason = KVM_EXIT_EPR;
726 		r = 0;
727 	}
728 
729 	return r;
730 }
731 
kvmppc_vcpu_run(struct kvm_vcpu * vcpu)732 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
733 {
734 	int ret, s;
735 	struct debug_reg debug;
736 
737 	if (!vcpu->arch.sane) {
738 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
739 		return -EINVAL;
740 	}
741 
742 	s = kvmppc_prepare_to_enter(vcpu);
743 	if (s <= 0) {
744 		ret = s;
745 		goto out;
746 	}
747 	/* interrupts now hard-disabled */
748 
749 #ifdef CONFIG_PPC_FPU
750 	/* Save userspace FPU state in stack */
751 	enable_kernel_fp();
752 
753 	/*
754 	 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
755 	 * as always using the FPU.
756 	 */
757 	kvmppc_load_guest_fp(vcpu);
758 #endif
759 
760 #ifdef CONFIG_ALTIVEC
761 	/* Save userspace AltiVec state in stack */
762 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
763 		enable_kernel_altivec();
764 	/*
765 	 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
766 	 * as always using the AltiVec.
767 	 */
768 	kvmppc_load_guest_altivec(vcpu);
769 #endif
770 
771 	/* Switch to guest debug context */
772 	debug = vcpu->arch.dbg_reg;
773 	switch_booke_debug_regs(&debug);
774 	debug = current->thread.debug;
775 	current->thread.debug = vcpu->arch.dbg_reg;
776 
777 	vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
778 	kvmppc_fix_ee_before_entry();
779 
780 	ret = __kvmppc_vcpu_run(vcpu);
781 
782 	/* No need for guest_exit. It's done in handle_exit.
783 	   We also get here with interrupts enabled. */
784 
785 	/* Switch back to user space debug context */
786 	switch_booke_debug_regs(&debug);
787 	current->thread.debug = debug;
788 
789 #ifdef CONFIG_PPC_FPU
790 	kvmppc_save_guest_fp(vcpu);
791 #endif
792 
793 #ifdef CONFIG_ALTIVEC
794 	kvmppc_save_guest_altivec(vcpu);
795 #endif
796 
797 out:
798 	vcpu->mode = OUTSIDE_GUEST_MODE;
799 	return ret;
800 }
801 
emulation_exit(struct kvm_vcpu * vcpu)802 static int emulation_exit(struct kvm_vcpu *vcpu)
803 {
804 	enum emulation_result er;
805 
806 	er = kvmppc_emulate_instruction(vcpu);
807 	switch (er) {
808 	case EMULATE_DONE:
809 		/* don't overwrite subtypes, just account kvm_stats */
810 		kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
811 		/* Future optimization: only reload non-volatiles if
812 		 * they were actually modified by emulation. */
813 		return RESUME_GUEST_NV;
814 
815 	case EMULATE_AGAIN:
816 		return RESUME_GUEST;
817 
818 	case EMULATE_FAIL:
819 		printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
820 		       __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
821 		/* For debugging, encode the failing instruction and
822 		 * report it to userspace. */
823 		vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
824 		vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
825 		kvmppc_core_queue_program(vcpu, ESR_PIL);
826 		return RESUME_HOST;
827 
828 	case EMULATE_EXIT_USER:
829 		return RESUME_HOST;
830 
831 	default:
832 		BUG();
833 	}
834 }
835 
kvmppc_handle_debug(struct kvm_vcpu * vcpu)836 static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
837 {
838 	struct kvm_run *run = vcpu->run;
839 	struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
840 	u32 dbsr = vcpu->arch.dbsr;
841 
842 	if (vcpu->guest_debug == 0) {
843 		/*
844 		 * Debug resources belong to Guest.
845 		 * Imprecise debug event is not injected
846 		 */
847 		if (dbsr & DBSR_IDE) {
848 			dbsr &= ~DBSR_IDE;
849 			if (!dbsr)
850 				return RESUME_GUEST;
851 		}
852 
853 		if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
854 			    (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
855 			kvmppc_core_queue_debug(vcpu);
856 
857 		/* Inject a program interrupt if trap debug is not allowed */
858 		if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
859 			kvmppc_core_queue_program(vcpu, ESR_PTR);
860 
861 		return RESUME_GUEST;
862 	}
863 
864 	/*
865 	 * Debug resource owned by userspace.
866 	 * Clear guest dbsr (vcpu->arch.dbsr)
867 	 */
868 	vcpu->arch.dbsr = 0;
869 	run->debug.arch.status = 0;
870 	run->debug.arch.address = vcpu->arch.regs.nip;
871 
872 	if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
873 		run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
874 	} else {
875 		if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
876 			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
877 		else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
878 			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
879 		if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
880 			run->debug.arch.address = dbg_reg->dac1;
881 		else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
882 			run->debug.arch.address = dbg_reg->dac2;
883 	}
884 
885 	return RESUME_HOST;
886 }
887 
kvmppc_fill_pt_regs(struct pt_regs * regs)888 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
889 {
890 	ulong r1, ip, msr, lr;
891 
892 	asm("mr %0, 1" : "=r"(r1));
893 	asm("mflr %0" : "=r"(lr));
894 	asm("mfmsr %0" : "=r"(msr));
895 	asm("bl 1f; 1: mflr %0" : "=r"(ip));
896 
897 	memset(regs, 0, sizeof(*regs));
898 	regs->gpr[1] = r1;
899 	regs->nip = ip;
900 	regs->msr = msr;
901 	regs->link = lr;
902 }
903 
904 /*
905  * For interrupts needed to be handled by host interrupt handlers,
906  * corresponding host handler are called from here in similar way
907  * (but not exact) as they are called from low level handler
908  * (such as from arch/powerpc/kernel/head_fsl_booke.S).
909  */
kvmppc_restart_interrupt(struct kvm_vcpu * vcpu,unsigned int exit_nr)910 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
911 				     unsigned int exit_nr)
912 {
913 	struct pt_regs regs;
914 
915 	switch (exit_nr) {
916 	case BOOKE_INTERRUPT_EXTERNAL:
917 		kvmppc_fill_pt_regs(&regs);
918 		do_IRQ(&regs);
919 		break;
920 	case BOOKE_INTERRUPT_DECREMENTER:
921 		kvmppc_fill_pt_regs(&regs);
922 		timer_interrupt(&regs);
923 		break;
924 #if defined(CONFIG_PPC_DOORBELL)
925 	case BOOKE_INTERRUPT_DOORBELL:
926 		kvmppc_fill_pt_regs(&regs);
927 		doorbell_exception(&regs);
928 		break;
929 #endif
930 	case BOOKE_INTERRUPT_MACHINE_CHECK:
931 		/* FIXME */
932 		break;
933 	case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
934 		kvmppc_fill_pt_regs(&regs);
935 		performance_monitor_exception(&regs);
936 		break;
937 	case BOOKE_INTERRUPT_WATCHDOG:
938 		kvmppc_fill_pt_regs(&regs);
939 #ifdef CONFIG_BOOKE_WDT
940 		WatchdogException(&regs);
941 #else
942 		unknown_exception(&regs);
943 #endif
944 		break;
945 	case BOOKE_INTERRUPT_CRITICAL:
946 		kvmppc_fill_pt_regs(&regs);
947 		unknown_exception(&regs);
948 		break;
949 	case BOOKE_INTERRUPT_DEBUG:
950 		/* Save DBSR before preemption is enabled */
951 		vcpu->arch.dbsr = mfspr(SPRN_DBSR);
952 		kvmppc_clear_dbsr();
953 		break;
954 	}
955 }
956 
kvmppc_resume_inst_load(struct kvm_vcpu * vcpu,enum emulation_result emulated,u32 last_inst)957 static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
958 				  enum emulation_result emulated, u32 last_inst)
959 {
960 	switch (emulated) {
961 	case EMULATE_AGAIN:
962 		return RESUME_GUEST;
963 
964 	case EMULATE_FAIL:
965 		pr_debug("%s: load instruction from guest address %lx failed\n",
966 		       __func__, vcpu->arch.regs.nip);
967 		/* For debugging, encode the failing instruction and
968 		 * report it to userspace. */
969 		vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
970 		vcpu->run->hw.hardware_exit_reason |= last_inst;
971 		kvmppc_core_queue_program(vcpu, ESR_PIL);
972 		return RESUME_HOST;
973 
974 	default:
975 		BUG();
976 	}
977 }
978 
979 /**
980  * kvmppc_handle_exit
981  *
982  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
983  */
kvmppc_handle_exit(struct kvm_vcpu * vcpu,unsigned int exit_nr)984 int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
985 {
986 	struct kvm_run *run = vcpu->run;
987 	int r = RESUME_HOST;
988 	int s;
989 	int idx;
990 	u32 last_inst = KVM_INST_FETCH_FAILED;
991 	enum emulation_result emulated = EMULATE_DONE;
992 
993 	/* update before a new last_exit_type is rewritten */
994 	kvmppc_update_timing_stats(vcpu);
995 
996 	/* restart interrupts if they were meant for the host */
997 	kvmppc_restart_interrupt(vcpu, exit_nr);
998 
999 	/*
1000 	 * get last instruction before being preempted
1001 	 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
1002 	 */
1003 	switch (exit_nr) {
1004 	case BOOKE_INTERRUPT_DATA_STORAGE:
1005 	case BOOKE_INTERRUPT_DTLB_MISS:
1006 	case BOOKE_INTERRUPT_HV_PRIV:
1007 		emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1008 		break;
1009 	case BOOKE_INTERRUPT_PROGRAM:
1010 		/* SW breakpoints arrive as illegal instructions on HV */
1011 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1012 			emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1013 		break;
1014 	default:
1015 		break;
1016 	}
1017 
1018 	trace_kvm_exit(exit_nr, vcpu);
1019 
1020 	context_tracking_guest_exit();
1021 	if (!vtime_accounting_enabled_this_cpu()) {
1022 		local_irq_enable();
1023 		/*
1024 		 * Service IRQs here before vtime_account_guest_exit() so any
1025 		 * ticks that occurred while running the guest are accounted to
1026 		 * the guest. If vtime accounting is enabled, accounting uses
1027 		 * TB rather than ticks, so it can be done without enabling
1028 		 * interrupts here, which has the problem that it accounts
1029 		 * interrupt processing overhead to the host.
1030 		 */
1031 		local_irq_disable();
1032 	}
1033 	vtime_account_guest_exit();
1034 
1035 	local_irq_enable();
1036 
1037 	run->exit_reason = KVM_EXIT_UNKNOWN;
1038 	run->ready_for_interrupt_injection = 1;
1039 
1040 	if (emulated != EMULATE_DONE) {
1041 		r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
1042 		goto out;
1043 	}
1044 
1045 	switch (exit_nr) {
1046 	case BOOKE_INTERRUPT_MACHINE_CHECK:
1047 		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1048 		kvmppc_dump_vcpu(vcpu);
1049 		/* For debugging, send invalid exit reason to user space */
1050 		run->hw.hardware_exit_reason = ~1ULL << 32;
1051 		run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1052 		r = RESUME_HOST;
1053 		break;
1054 
1055 	case BOOKE_INTERRUPT_EXTERNAL:
1056 		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1057 		r = RESUME_GUEST;
1058 		break;
1059 
1060 	case BOOKE_INTERRUPT_DECREMENTER:
1061 		kvmppc_account_exit(vcpu, DEC_EXITS);
1062 		r = RESUME_GUEST;
1063 		break;
1064 
1065 	case BOOKE_INTERRUPT_WATCHDOG:
1066 		r = RESUME_GUEST;
1067 		break;
1068 
1069 	case BOOKE_INTERRUPT_DOORBELL:
1070 		kvmppc_account_exit(vcpu, DBELL_EXITS);
1071 		r = RESUME_GUEST;
1072 		break;
1073 
1074 	case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1075 		kvmppc_account_exit(vcpu, GDBELL_EXITS);
1076 
1077 		/*
1078 		 * We are here because there is a pending guest interrupt
1079 		 * which could not be delivered as MSR_CE or MSR_ME was not
1080 		 * set.  Once we break from here we will retry delivery.
1081 		 */
1082 		r = RESUME_GUEST;
1083 		break;
1084 
1085 	case BOOKE_INTERRUPT_GUEST_DBELL:
1086 		kvmppc_account_exit(vcpu, GDBELL_EXITS);
1087 
1088 		/*
1089 		 * We are here because there is a pending guest interrupt
1090 		 * which could not be delivered as MSR_EE was not set.  Once
1091 		 * we break from here we will retry delivery.
1092 		 */
1093 		r = RESUME_GUEST;
1094 		break;
1095 
1096 	case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1097 		r = RESUME_GUEST;
1098 		break;
1099 
1100 	case BOOKE_INTERRUPT_HV_PRIV:
1101 		r = emulation_exit(vcpu);
1102 		break;
1103 
1104 	case BOOKE_INTERRUPT_PROGRAM:
1105 		if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1106 			(last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1107 			/*
1108 			 * We are here because of an SW breakpoint instr,
1109 			 * so lets return to host to handle.
1110 			 */
1111 			r = kvmppc_handle_debug(vcpu);
1112 			run->exit_reason = KVM_EXIT_DEBUG;
1113 			kvmppc_account_exit(vcpu, DEBUG_EXITS);
1114 			break;
1115 		}
1116 
1117 		if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1118 			/*
1119 			 * Program traps generated by user-level software must
1120 			 * be handled by the guest kernel.
1121 			 *
1122 			 * In GS mode, hypervisor privileged instructions trap
1123 			 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1124 			 * actual program interrupts, handled by the guest.
1125 			 */
1126 			kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1127 			r = RESUME_GUEST;
1128 			kvmppc_account_exit(vcpu, USR_PR_INST);
1129 			break;
1130 		}
1131 
1132 		r = emulation_exit(vcpu);
1133 		break;
1134 
1135 	case BOOKE_INTERRUPT_FP_UNAVAIL:
1136 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1137 		kvmppc_account_exit(vcpu, FP_UNAVAIL);
1138 		r = RESUME_GUEST;
1139 		break;
1140 
1141 #ifdef CONFIG_SPE
1142 	case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1143 		if (vcpu->arch.shared->msr & MSR_SPE)
1144 			kvmppc_vcpu_enable_spe(vcpu);
1145 		else
1146 			kvmppc_booke_queue_irqprio(vcpu,
1147 						   BOOKE_IRQPRIO_SPE_UNAVAIL);
1148 		r = RESUME_GUEST;
1149 		break;
1150 	}
1151 
1152 	case BOOKE_INTERRUPT_SPE_FP_DATA:
1153 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1154 		r = RESUME_GUEST;
1155 		break;
1156 
1157 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
1158 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1159 		r = RESUME_GUEST;
1160 		break;
1161 #elif defined(CONFIG_SPE_POSSIBLE)
1162 	case BOOKE_INTERRUPT_SPE_UNAVAIL:
1163 		/*
1164 		 * Guest wants SPE, but host kernel doesn't support it.  Send
1165 		 * an "unimplemented operation" program check to the guest.
1166 		 */
1167 		kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1168 		r = RESUME_GUEST;
1169 		break;
1170 
1171 	/*
1172 	 * These really should never happen without CONFIG_SPE,
1173 	 * as we should never enable the real MSR[SPE] in the guest.
1174 	 */
1175 	case BOOKE_INTERRUPT_SPE_FP_DATA:
1176 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
1177 		printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1178 		       __func__, exit_nr, vcpu->arch.regs.nip);
1179 		run->hw.hardware_exit_reason = exit_nr;
1180 		r = RESUME_HOST;
1181 		break;
1182 #endif /* CONFIG_SPE_POSSIBLE */
1183 
1184 /*
1185  * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1186  * see kvmppc_core_check_processor_compat().
1187  */
1188 #ifdef CONFIG_ALTIVEC
1189 	case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1190 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1191 		r = RESUME_GUEST;
1192 		break;
1193 
1194 	case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1195 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1196 		r = RESUME_GUEST;
1197 		break;
1198 #endif
1199 
1200 	case BOOKE_INTERRUPT_DATA_STORAGE:
1201 		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1202 		                               vcpu->arch.fault_esr);
1203 		kvmppc_account_exit(vcpu, DSI_EXITS);
1204 		r = RESUME_GUEST;
1205 		break;
1206 
1207 	case BOOKE_INTERRUPT_INST_STORAGE:
1208 		kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1209 		kvmppc_account_exit(vcpu, ISI_EXITS);
1210 		r = RESUME_GUEST;
1211 		break;
1212 
1213 	case BOOKE_INTERRUPT_ALIGNMENT:
1214 		kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1215 		                            vcpu->arch.fault_esr);
1216 		r = RESUME_GUEST;
1217 		break;
1218 
1219 #ifdef CONFIG_KVM_BOOKE_HV
1220 	case BOOKE_INTERRUPT_HV_SYSCALL:
1221 		if (!(vcpu->arch.shared->msr & MSR_PR)) {
1222 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1223 		} else {
1224 			/*
1225 			 * hcall from guest userspace -- send privileged
1226 			 * instruction program check.
1227 			 */
1228 			kvmppc_core_queue_program(vcpu, ESR_PPR);
1229 		}
1230 
1231 		r = RESUME_GUEST;
1232 		break;
1233 #else
1234 	case BOOKE_INTERRUPT_SYSCALL:
1235 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
1236 		    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1237 			/* KVM PV hypercalls */
1238 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1239 			r = RESUME_GUEST;
1240 		} else {
1241 			/* Guest syscalls */
1242 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1243 		}
1244 		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1245 		r = RESUME_GUEST;
1246 		break;
1247 #endif
1248 
1249 	case BOOKE_INTERRUPT_DTLB_MISS: {
1250 		unsigned long eaddr = vcpu->arch.fault_dear;
1251 		int gtlb_index;
1252 		gpa_t gpaddr;
1253 		gfn_t gfn;
1254 
1255 #ifdef CONFIG_KVM_E500V2
1256 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
1257 		    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1258 			kvmppc_map_magic(vcpu);
1259 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1260 			r = RESUME_GUEST;
1261 
1262 			break;
1263 		}
1264 #endif
1265 
1266 		/* Check the guest TLB. */
1267 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1268 		if (gtlb_index < 0) {
1269 			/* The guest didn't have a mapping for it. */
1270 			kvmppc_core_queue_dtlb_miss(vcpu,
1271 			                            vcpu->arch.fault_dear,
1272 			                            vcpu->arch.fault_esr);
1273 			kvmppc_mmu_dtlb_miss(vcpu);
1274 			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1275 			r = RESUME_GUEST;
1276 			break;
1277 		}
1278 
1279 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1280 
1281 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1282 		gfn = gpaddr >> PAGE_SHIFT;
1283 
1284 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1285 			/* The guest TLB had a mapping, but the shadow TLB
1286 			 * didn't, and it is RAM. This could be because:
1287 			 * a) the entry is mapping the host kernel, or
1288 			 * b) the guest used a large mapping which we're faking
1289 			 * Either way, we need to satisfy the fault without
1290 			 * invoking the guest. */
1291 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1292 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1293 			r = RESUME_GUEST;
1294 		} else {
1295 			/* Guest has mapped and accessed a page which is not
1296 			 * actually RAM. */
1297 			vcpu->arch.paddr_accessed = gpaddr;
1298 			vcpu->arch.vaddr_accessed = eaddr;
1299 			r = kvmppc_emulate_mmio(vcpu);
1300 			kvmppc_account_exit(vcpu, MMIO_EXITS);
1301 		}
1302 
1303 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1304 		break;
1305 	}
1306 
1307 	case BOOKE_INTERRUPT_ITLB_MISS: {
1308 		unsigned long eaddr = vcpu->arch.regs.nip;
1309 		gpa_t gpaddr;
1310 		gfn_t gfn;
1311 		int gtlb_index;
1312 
1313 		r = RESUME_GUEST;
1314 
1315 		/* Check the guest TLB. */
1316 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1317 		if (gtlb_index < 0) {
1318 			/* The guest didn't have a mapping for it. */
1319 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1320 			kvmppc_mmu_itlb_miss(vcpu);
1321 			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1322 			break;
1323 		}
1324 
1325 		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1326 
1327 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1328 
1329 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1330 		gfn = gpaddr >> PAGE_SHIFT;
1331 
1332 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1333 			/* The guest TLB had a mapping, but the shadow TLB
1334 			 * didn't. This could be because:
1335 			 * a) the entry is mapping the host kernel, or
1336 			 * b) the guest used a large mapping which we're faking
1337 			 * Either way, we need to satisfy the fault without
1338 			 * invoking the guest. */
1339 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1340 		} else {
1341 			/* Guest mapped and leaped at non-RAM! */
1342 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1343 		}
1344 
1345 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1346 		break;
1347 	}
1348 
1349 	case BOOKE_INTERRUPT_DEBUG: {
1350 		r = kvmppc_handle_debug(vcpu);
1351 		if (r == RESUME_HOST)
1352 			run->exit_reason = KVM_EXIT_DEBUG;
1353 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
1354 		break;
1355 	}
1356 
1357 	default:
1358 		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1359 		BUG();
1360 	}
1361 
1362 out:
1363 	/*
1364 	 * To avoid clobbering exit_reason, only check for signals if we
1365 	 * aren't already exiting to userspace for some other reason.
1366 	 */
1367 	if (!(r & RESUME_HOST)) {
1368 		s = kvmppc_prepare_to_enter(vcpu);
1369 		if (s <= 0)
1370 			r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1371 		else {
1372 			/* interrupts now hard-disabled */
1373 			kvmppc_fix_ee_before_entry();
1374 			kvmppc_load_guest_fp(vcpu);
1375 			kvmppc_load_guest_altivec(vcpu);
1376 		}
1377 	}
1378 
1379 	return r;
1380 }
1381 
kvmppc_set_tsr(struct kvm_vcpu * vcpu,u32 new_tsr)1382 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1383 {
1384 	u32 old_tsr = vcpu->arch.tsr;
1385 
1386 	vcpu->arch.tsr = new_tsr;
1387 
1388 	if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1389 		arm_next_watchdog(vcpu);
1390 
1391 	update_timer_ints(vcpu);
1392 }
1393 
kvmppc_subarch_vcpu_init(struct kvm_vcpu * vcpu)1394 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1395 {
1396 	/* setup watchdog timer once */
1397 	spin_lock_init(&vcpu->arch.wdt_lock);
1398 	timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
1399 
1400 	/*
1401 	 * Clear DBSR.MRR to avoid guest debug interrupt as
1402 	 * this is of host interest
1403 	 */
1404 	mtspr(SPRN_DBSR, DBSR_MRR);
1405 	return 0;
1406 }
1407 
kvmppc_subarch_vcpu_uninit(struct kvm_vcpu * vcpu)1408 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1409 {
1410 	del_timer_sync(&vcpu->arch.wdt_timer);
1411 }
1412 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1413 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1414 {
1415 	int i;
1416 
1417 	vcpu_load(vcpu);
1418 
1419 	regs->pc = vcpu->arch.regs.nip;
1420 	regs->cr = kvmppc_get_cr(vcpu);
1421 	regs->ctr = vcpu->arch.regs.ctr;
1422 	regs->lr = vcpu->arch.regs.link;
1423 	regs->xer = kvmppc_get_xer(vcpu);
1424 	regs->msr = vcpu->arch.shared->msr;
1425 	regs->srr0 = kvmppc_get_srr0(vcpu);
1426 	regs->srr1 = kvmppc_get_srr1(vcpu);
1427 	regs->pid = vcpu->arch.pid;
1428 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
1429 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
1430 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
1431 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
1432 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
1433 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
1434 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
1435 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
1436 
1437 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1438 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1439 
1440 	vcpu_put(vcpu);
1441 	return 0;
1442 }
1443 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1444 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1445 {
1446 	int i;
1447 
1448 	vcpu_load(vcpu);
1449 
1450 	vcpu->arch.regs.nip = regs->pc;
1451 	kvmppc_set_cr(vcpu, regs->cr);
1452 	vcpu->arch.regs.ctr = regs->ctr;
1453 	vcpu->arch.regs.link = regs->lr;
1454 	kvmppc_set_xer(vcpu, regs->xer);
1455 	kvmppc_set_msr(vcpu, regs->msr);
1456 	kvmppc_set_srr0(vcpu, regs->srr0);
1457 	kvmppc_set_srr1(vcpu, regs->srr1);
1458 	kvmppc_set_pid(vcpu, regs->pid);
1459 	kvmppc_set_sprg0(vcpu, regs->sprg0);
1460 	kvmppc_set_sprg1(vcpu, regs->sprg1);
1461 	kvmppc_set_sprg2(vcpu, regs->sprg2);
1462 	kvmppc_set_sprg3(vcpu, regs->sprg3);
1463 	kvmppc_set_sprg4(vcpu, regs->sprg4);
1464 	kvmppc_set_sprg5(vcpu, regs->sprg5);
1465 	kvmppc_set_sprg6(vcpu, regs->sprg6);
1466 	kvmppc_set_sprg7(vcpu, regs->sprg7);
1467 
1468 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1469 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1470 
1471 	vcpu_put(vcpu);
1472 	return 0;
1473 }
1474 
get_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1475 static void get_sregs_base(struct kvm_vcpu *vcpu,
1476                            struct kvm_sregs *sregs)
1477 {
1478 	u64 tb = get_tb();
1479 
1480 	sregs->u.e.features |= KVM_SREGS_E_BASE;
1481 
1482 	sregs->u.e.csrr0 = vcpu->arch.csrr0;
1483 	sregs->u.e.csrr1 = vcpu->arch.csrr1;
1484 	sregs->u.e.mcsr = vcpu->arch.mcsr;
1485 	sregs->u.e.esr = kvmppc_get_esr(vcpu);
1486 	sregs->u.e.dear = kvmppc_get_dar(vcpu);
1487 	sregs->u.e.tsr = vcpu->arch.tsr;
1488 	sregs->u.e.tcr = vcpu->arch.tcr;
1489 	sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1490 	sregs->u.e.tb = tb;
1491 	sregs->u.e.vrsave = vcpu->arch.vrsave;
1492 }
1493 
set_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1494 static int set_sregs_base(struct kvm_vcpu *vcpu,
1495                           struct kvm_sregs *sregs)
1496 {
1497 	if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1498 		return 0;
1499 
1500 	vcpu->arch.csrr0 = sregs->u.e.csrr0;
1501 	vcpu->arch.csrr1 = sregs->u.e.csrr1;
1502 	vcpu->arch.mcsr = sregs->u.e.mcsr;
1503 	kvmppc_set_esr(vcpu, sregs->u.e.esr);
1504 	kvmppc_set_dar(vcpu, sregs->u.e.dear);
1505 	vcpu->arch.vrsave = sregs->u.e.vrsave;
1506 	kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1507 
1508 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1509 		vcpu->arch.dec = sregs->u.e.dec;
1510 		kvmppc_emulate_dec(vcpu);
1511 	}
1512 
1513 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1514 		kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1515 
1516 	return 0;
1517 }
1518 
get_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1519 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1520                               struct kvm_sregs *sregs)
1521 {
1522 	sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1523 
1524 	sregs->u.e.pir = vcpu->vcpu_id;
1525 	sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1526 	sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1527 	sregs->u.e.decar = vcpu->arch.decar;
1528 	sregs->u.e.ivpr = vcpu->arch.ivpr;
1529 }
1530 
set_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1531 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1532                              struct kvm_sregs *sregs)
1533 {
1534 	if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1535 		return 0;
1536 
1537 	if (sregs->u.e.pir != vcpu->vcpu_id)
1538 		return -EINVAL;
1539 
1540 	vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1541 	vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1542 	vcpu->arch.decar = sregs->u.e.decar;
1543 	vcpu->arch.ivpr = sregs->u.e.ivpr;
1544 
1545 	return 0;
1546 }
1547 
kvmppc_get_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1548 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1549 {
1550 	sregs->u.e.features |= KVM_SREGS_E_IVOR;
1551 
1552 	sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1553 	sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1554 	sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1555 	sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1556 	sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1557 	sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1558 	sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1559 	sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1560 	sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1561 	sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1562 	sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1563 	sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1564 	sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1565 	sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1566 	sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1567 	sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1568 	return 0;
1569 }
1570 
kvmppc_set_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1571 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1572 {
1573 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1574 		return 0;
1575 
1576 	vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1577 	vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1578 	vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1579 	vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1580 	vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1581 	vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1582 	vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1583 	vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1584 	vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1585 	vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1586 	vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1587 	vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1588 	vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1589 	vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1590 	vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1591 	vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1592 
1593 	return 0;
1594 }
1595 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1596 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1597                                   struct kvm_sregs *sregs)
1598 {
1599 	int ret;
1600 
1601 	vcpu_load(vcpu);
1602 
1603 	sregs->pvr = vcpu->arch.pvr;
1604 
1605 	get_sregs_base(vcpu, sregs);
1606 	get_sregs_arch206(vcpu, sregs);
1607 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1608 
1609 	vcpu_put(vcpu);
1610 	return ret;
1611 }
1612 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1613 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1614                                   struct kvm_sregs *sregs)
1615 {
1616 	int ret = -EINVAL;
1617 
1618 	vcpu_load(vcpu);
1619 	if (vcpu->arch.pvr != sregs->pvr)
1620 		goto out;
1621 
1622 	ret = set_sregs_base(vcpu, sregs);
1623 	if (ret < 0)
1624 		goto out;
1625 
1626 	ret = set_sregs_arch206(vcpu, sregs);
1627 	if (ret < 0)
1628 		goto out;
1629 
1630 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1631 
1632 out:
1633 	vcpu_put(vcpu);
1634 	return ret;
1635 }
1636 
kvmppc_get_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1637 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1638 			union kvmppc_one_reg *val)
1639 {
1640 	int r = 0;
1641 
1642 	switch (id) {
1643 	case KVM_REG_PPC_IAC1:
1644 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1645 		break;
1646 	case KVM_REG_PPC_IAC2:
1647 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1648 		break;
1649 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1650 	case KVM_REG_PPC_IAC3:
1651 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1652 		break;
1653 	case KVM_REG_PPC_IAC4:
1654 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1655 		break;
1656 #endif
1657 	case KVM_REG_PPC_DAC1:
1658 		*val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1659 		break;
1660 	case KVM_REG_PPC_DAC2:
1661 		*val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1662 		break;
1663 	case KVM_REG_PPC_EPR: {
1664 		u32 epr = kvmppc_get_epr(vcpu);
1665 		*val = get_reg_val(id, epr);
1666 		break;
1667 	}
1668 #if defined(CONFIG_64BIT)
1669 	case KVM_REG_PPC_EPCR:
1670 		*val = get_reg_val(id, vcpu->arch.epcr);
1671 		break;
1672 #endif
1673 	case KVM_REG_PPC_TCR:
1674 		*val = get_reg_val(id, vcpu->arch.tcr);
1675 		break;
1676 	case KVM_REG_PPC_TSR:
1677 		*val = get_reg_val(id, vcpu->arch.tsr);
1678 		break;
1679 	case KVM_REG_PPC_DEBUG_INST:
1680 		*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1681 		break;
1682 	case KVM_REG_PPC_VRSAVE:
1683 		*val = get_reg_val(id, vcpu->arch.vrsave);
1684 		break;
1685 	default:
1686 		r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1687 		break;
1688 	}
1689 
1690 	return r;
1691 }
1692 
kvmppc_set_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1693 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1694 			union kvmppc_one_reg *val)
1695 {
1696 	int r = 0;
1697 
1698 	switch (id) {
1699 	case KVM_REG_PPC_IAC1:
1700 		vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1701 		break;
1702 	case KVM_REG_PPC_IAC2:
1703 		vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1704 		break;
1705 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1706 	case KVM_REG_PPC_IAC3:
1707 		vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1708 		break;
1709 	case KVM_REG_PPC_IAC4:
1710 		vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1711 		break;
1712 #endif
1713 	case KVM_REG_PPC_DAC1:
1714 		vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1715 		break;
1716 	case KVM_REG_PPC_DAC2:
1717 		vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1718 		break;
1719 	case KVM_REG_PPC_EPR: {
1720 		u32 new_epr = set_reg_val(id, *val);
1721 		kvmppc_set_epr(vcpu, new_epr);
1722 		break;
1723 	}
1724 #if defined(CONFIG_64BIT)
1725 	case KVM_REG_PPC_EPCR: {
1726 		u32 new_epcr = set_reg_val(id, *val);
1727 		kvmppc_set_epcr(vcpu, new_epcr);
1728 		break;
1729 	}
1730 #endif
1731 	case KVM_REG_PPC_OR_TSR: {
1732 		u32 tsr_bits = set_reg_val(id, *val);
1733 		kvmppc_set_tsr_bits(vcpu, tsr_bits);
1734 		break;
1735 	}
1736 	case KVM_REG_PPC_CLEAR_TSR: {
1737 		u32 tsr_bits = set_reg_val(id, *val);
1738 		kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1739 		break;
1740 	}
1741 	case KVM_REG_PPC_TSR: {
1742 		u32 tsr = set_reg_val(id, *val);
1743 		kvmppc_set_tsr(vcpu, tsr);
1744 		break;
1745 	}
1746 	case KVM_REG_PPC_TCR: {
1747 		u32 tcr = set_reg_val(id, *val);
1748 		kvmppc_set_tcr(vcpu, tcr);
1749 		break;
1750 	}
1751 	case KVM_REG_PPC_VRSAVE:
1752 		vcpu->arch.vrsave = set_reg_val(id, *val);
1753 		break;
1754 	default:
1755 		r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1756 		break;
1757 	}
1758 
1759 	return r;
1760 }
1761 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1762 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1763 {
1764 	return -EOPNOTSUPP;
1765 }
1766 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1767 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1768 {
1769 	return -EOPNOTSUPP;
1770 }
1771 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)1772 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1773                                   struct kvm_translation *tr)
1774 {
1775 	int r;
1776 
1777 	vcpu_load(vcpu);
1778 	r = kvmppc_core_vcpu_translate(vcpu, tr);
1779 	vcpu_put(vcpu);
1780 	return r;
1781 }
1782 
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)1783 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1784 {
1785 
1786 }
1787 
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)1788 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1789 {
1790 	return -EOPNOTSUPP;
1791 }
1792 
kvmppc_core_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)1793 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1794 {
1795 }
1796 
kvmppc_core_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)1797 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1798 				      struct kvm_memory_slot *memslot,
1799 				      const struct kvm_userspace_memory_region *mem,
1800 				      enum kvm_mr_change change)
1801 {
1802 	return 0;
1803 }
1804 
kvmppc_core_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1805 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1806 				const struct kvm_userspace_memory_region *mem,
1807 				const struct kvm_memory_slot *old,
1808 				const struct kvm_memory_slot *new,
1809 				enum kvm_mr_change change)
1810 {
1811 }
1812 
kvmppc_core_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)1813 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1814 {
1815 }
1816 
kvmppc_set_epcr(struct kvm_vcpu * vcpu,u32 new_epcr)1817 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1818 {
1819 #if defined(CONFIG_64BIT)
1820 	vcpu->arch.epcr = new_epcr;
1821 #ifdef CONFIG_KVM_BOOKE_HV
1822 	vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1823 	if (vcpu->arch.epcr  & SPRN_EPCR_ICM)
1824 		vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1825 #endif
1826 #endif
1827 }
1828 
kvmppc_set_tcr(struct kvm_vcpu * vcpu,u32 new_tcr)1829 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1830 {
1831 	vcpu->arch.tcr = new_tcr;
1832 	arm_next_watchdog(vcpu);
1833 	update_timer_ints(vcpu);
1834 }
1835 
kvmppc_set_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1836 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1837 {
1838 	set_bits(tsr_bits, &vcpu->arch.tsr);
1839 	smp_wmb();
1840 	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1841 	kvm_vcpu_kick(vcpu);
1842 }
1843 
kvmppc_clr_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1844 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1845 {
1846 	clear_bits(tsr_bits, &vcpu->arch.tsr);
1847 
1848 	/*
1849 	 * We may have stopped the watchdog due to
1850 	 * being stuck on final expiration.
1851 	 */
1852 	if (tsr_bits & (TSR_ENW | TSR_WIS))
1853 		arm_next_watchdog(vcpu);
1854 
1855 	update_timer_ints(vcpu);
1856 }
1857 
kvmppc_decrementer_func(struct kvm_vcpu * vcpu)1858 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1859 {
1860 	if (vcpu->arch.tcr & TCR_ARE) {
1861 		vcpu->arch.dec = vcpu->arch.decar;
1862 		kvmppc_emulate_dec(vcpu);
1863 	}
1864 
1865 	kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1866 }
1867 
kvmppc_booke_add_breakpoint(struct debug_reg * dbg_reg,uint64_t addr,int index)1868 static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1869 				       uint64_t addr, int index)
1870 {
1871 	switch (index) {
1872 	case 0:
1873 		dbg_reg->dbcr0 |= DBCR0_IAC1;
1874 		dbg_reg->iac1 = addr;
1875 		break;
1876 	case 1:
1877 		dbg_reg->dbcr0 |= DBCR0_IAC2;
1878 		dbg_reg->iac2 = addr;
1879 		break;
1880 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1881 	case 2:
1882 		dbg_reg->dbcr0 |= DBCR0_IAC3;
1883 		dbg_reg->iac3 = addr;
1884 		break;
1885 	case 3:
1886 		dbg_reg->dbcr0 |= DBCR0_IAC4;
1887 		dbg_reg->iac4 = addr;
1888 		break;
1889 #endif
1890 	default:
1891 		return -EINVAL;
1892 	}
1893 
1894 	dbg_reg->dbcr0 |= DBCR0_IDM;
1895 	return 0;
1896 }
1897 
kvmppc_booke_add_watchpoint(struct debug_reg * dbg_reg,uint64_t addr,int type,int index)1898 static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1899 				       int type, int index)
1900 {
1901 	switch (index) {
1902 	case 0:
1903 		if (type & KVMPPC_DEBUG_WATCH_READ)
1904 			dbg_reg->dbcr0 |= DBCR0_DAC1R;
1905 		if (type & KVMPPC_DEBUG_WATCH_WRITE)
1906 			dbg_reg->dbcr0 |= DBCR0_DAC1W;
1907 		dbg_reg->dac1 = addr;
1908 		break;
1909 	case 1:
1910 		if (type & KVMPPC_DEBUG_WATCH_READ)
1911 			dbg_reg->dbcr0 |= DBCR0_DAC2R;
1912 		if (type & KVMPPC_DEBUG_WATCH_WRITE)
1913 			dbg_reg->dbcr0 |= DBCR0_DAC2W;
1914 		dbg_reg->dac2 = addr;
1915 		break;
1916 	default:
1917 		return -EINVAL;
1918 	}
1919 
1920 	dbg_reg->dbcr0 |= DBCR0_IDM;
1921 	return 0;
1922 }
kvm_guest_protect_msr(struct kvm_vcpu * vcpu,ulong prot_bitmap,bool set)1923 void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1924 {
1925 	/* XXX: Add similar MSR protection for BookE-PR */
1926 #ifdef CONFIG_KVM_BOOKE_HV
1927 	BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1928 	if (set) {
1929 		if (prot_bitmap & MSR_UCLE)
1930 			vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1931 		if (prot_bitmap & MSR_DE)
1932 			vcpu->arch.shadow_msrp |= MSRP_DEP;
1933 		if (prot_bitmap & MSR_PMM)
1934 			vcpu->arch.shadow_msrp |= MSRP_PMMP;
1935 	} else {
1936 		if (prot_bitmap & MSR_UCLE)
1937 			vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1938 		if (prot_bitmap & MSR_DE)
1939 			vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1940 		if (prot_bitmap & MSR_PMM)
1941 			vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1942 	}
1943 #endif
1944 }
1945 
kvmppc_xlate(struct kvm_vcpu * vcpu,ulong eaddr,enum xlate_instdata xlid,enum xlate_readwrite xlrw,struct kvmppc_pte * pte)1946 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1947 		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1948 {
1949 	int gtlb_index;
1950 	gpa_t gpaddr;
1951 
1952 #ifdef CONFIG_KVM_E500V2
1953 	if (!(vcpu->arch.shared->msr & MSR_PR) &&
1954 	    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1955 		pte->eaddr = eaddr;
1956 		pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1957 			     (eaddr & ~PAGE_MASK);
1958 		pte->vpage = eaddr >> PAGE_SHIFT;
1959 		pte->may_read = true;
1960 		pte->may_write = true;
1961 		pte->may_execute = true;
1962 
1963 		return 0;
1964 	}
1965 #endif
1966 
1967 	/* Check the guest TLB. */
1968 	switch (xlid) {
1969 	case XLATE_INST:
1970 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1971 		break;
1972 	case XLATE_DATA:
1973 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1974 		break;
1975 	default:
1976 		BUG();
1977 	}
1978 
1979 	/* Do we have a TLB entry at all? */
1980 	if (gtlb_index < 0)
1981 		return -ENOENT;
1982 
1983 	gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1984 
1985 	pte->eaddr = eaddr;
1986 	pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1987 	pte->vpage = eaddr >> PAGE_SHIFT;
1988 
1989 	/* XXX read permissions from the guest TLB */
1990 	pte->may_read = true;
1991 	pte->may_write = true;
1992 	pte->may_execute = true;
1993 
1994 	return 0;
1995 }
1996 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)1997 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1998 					 struct kvm_guest_debug *dbg)
1999 {
2000 	struct debug_reg *dbg_reg;
2001 	int n, b = 0, w = 0;
2002 	int ret = 0;
2003 
2004 	vcpu_load(vcpu);
2005 
2006 	if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
2007 		vcpu->arch.dbg_reg.dbcr0 = 0;
2008 		vcpu->guest_debug = 0;
2009 		kvm_guest_protect_msr(vcpu, MSR_DE, false);
2010 		goto out;
2011 	}
2012 
2013 	kvm_guest_protect_msr(vcpu, MSR_DE, true);
2014 	vcpu->guest_debug = dbg->control;
2015 	vcpu->arch.dbg_reg.dbcr0 = 0;
2016 
2017 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2018 		vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2019 
2020 	/* Code below handles only HW breakpoints */
2021 	dbg_reg = &(vcpu->arch.dbg_reg);
2022 
2023 #ifdef CONFIG_KVM_BOOKE_HV
2024 	/*
2025 	 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2026 	 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2027 	 */
2028 	dbg_reg->dbcr1 = 0;
2029 	dbg_reg->dbcr2 = 0;
2030 #else
2031 	/*
2032 	 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2033 	 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2034 	 * is set.
2035 	 */
2036 	dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2037 			  DBCR1_IAC4US;
2038 	dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2039 #endif
2040 
2041 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2042 		goto out;
2043 
2044 	ret = -EINVAL;
2045 	for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2046 		uint64_t addr = dbg->arch.bp[n].addr;
2047 		uint32_t type = dbg->arch.bp[n].type;
2048 
2049 		if (type == KVMPPC_DEBUG_NONE)
2050 			continue;
2051 
2052 		if (type & ~(KVMPPC_DEBUG_WATCH_READ |
2053 			     KVMPPC_DEBUG_WATCH_WRITE |
2054 			     KVMPPC_DEBUG_BREAKPOINT))
2055 			goto out;
2056 
2057 		if (type & KVMPPC_DEBUG_BREAKPOINT) {
2058 			/* Setting H/W breakpoint */
2059 			if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2060 				goto out;
2061 		} else {
2062 			/* Setting H/W watchpoint */
2063 			if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2064 							type, w++))
2065 				goto out;
2066 		}
2067 	}
2068 
2069 	ret = 0;
2070 out:
2071 	vcpu_put(vcpu);
2072 	return ret;
2073 }
2074 
kvmppc_booke_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2075 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2076 {
2077 	vcpu->cpu = smp_processor_id();
2078 	current->thread.kvm_vcpu = vcpu;
2079 }
2080 
kvmppc_booke_vcpu_put(struct kvm_vcpu * vcpu)2081 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2082 {
2083 	current->thread.kvm_vcpu = NULL;
2084 	vcpu->cpu = -1;
2085 
2086 	/* Clear pending debug event in DBSR */
2087 	kvmppc_clear_dbsr();
2088 }
2089 
kvmppc_core_init_vm(struct kvm * kvm)2090 int kvmppc_core_init_vm(struct kvm *kvm)
2091 {
2092 	return kvm->arch.kvm_ops->init_vm(kvm);
2093 }
2094 
kvmppc_core_vcpu_create(struct kvm_vcpu * vcpu)2095 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
2096 {
2097 	int i;
2098 	int r;
2099 
2100 	r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
2101 	if (r)
2102 		return r;
2103 
2104 	/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
2105 	vcpu->arch.regs.nip = 0;
2106 	vcpu->arch.shared->pir = vcpu->vcpu_id;
2107 	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
2108 	kvmppc_set_msr(vcpu, 0);
2109 
2110 #ifndef CONFIG_KVM_BOOKE_HV
2111 	vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
2112 	vcpu->arch.shadow_pid = 1;
2113 	vcpu->arch.shared->msr = 0;
2114 #endif
2115 
2116 	/* Eye-catching numbers so we know if the guest takes an interrupt
2117 	 * before it's programmed its own IVPR/IVORs. */
2118 	vcpu->arch.ivpr = 0x55550000;
2119 	for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
2120 		vcpu->arch.ivor[i] = 0x7700 | i * 4;
2121 
2122 	kvmppc_init_timing_stats(vcpu);
2123 
2124 	r = kvmppc_core_vcpu_setup(vcpu);
2125 	if (r)
2126 		vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2127 	kvmppc_sanity_check(vcpu);
2128 	return r;
2129 }
2130 
kvmppc_core_vcpu_free(struct kvm_vcpu * vcpu)2131 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2132 {
2133 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2134 }
2135 
kvmppc_core_destroy_vm(struct kvm * kvm)2136 void kvmppc_core_destroy_vm(struct kvm *kvm)
2137 {
2138 	kvm->arch.kvm_ops->destroy_vm(kvm);
2139 }
2140 
kvmppc_core_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2141 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2142 {
2143 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2144 }
2145 
kvmppc_core_vcpu_put(struct kvm_vcpu * vcpu)2146 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2147 {
2148 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
2149 }
2150 
kvmppc_booke_init(void)2151 int __init kvmppc_booke_init(void)
2152 {
2153 #ifndef CONFIG_KVM_BOOKE_HV
2154 	unsigned long ivor[16];
2155 	unsigned long *handler = kvmppc_booke_handler_addr;
2156 	unsigned long max_ivor = 0;
2157 	unsigned long handler_len;
2158 	int i;
2159 
2160 	/* We install our own exception handlers by hijacking IVPR. IVPR must
2161 	 * be 16-bit aligned, so we need a 64KB allocation. */
2162 	kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2163 	                                         VCPU_SIZE_ORDER);
2164 	if (!kvmppc_booke_handlers)
2165 		return -ENOMEM;
2166 
2167 	/* XXX make sure our handlers are smaller than Linux's */
2168 
2169 	/* Copy our interrupt handlers to match host IVORs. That way we don't
2170 	 * have to swap the IVORs on every guest/host transition. */
2171 	ivor[0] = mfspr(SPRN_IVOR0);
2172 	ivor[1] = mfspr(SPRN_IVOR1);
2173 	ivor[2] = mfspr(SPRN_IVOR2);
2174 	ivor[3] = mfspr(SPRN_IVOR3);
2175 	ivor[4] = mfspr(SPRN_IVOR4);
2176 	ivor[5] = mfspr(SPRN_IVOR5);
2177 	ivor[6] = mfspr(SPRN_IVOR6);
2178 	ivor[7] = mfspr(SPRN_IVOR7);
2179 	ivor[8] = mfspr(SPRN_IVOR8);
2180 	ivor[9] = mfspr(SPRN_IVOR9);
2181 	ivor[10] = mfspr(SPRN_IVOR10);
2182 	ivor[11] = mfspr(SPRN_IVOR11);
2183 	ivor[12] = mfspr(SPRN_IVOR12);
2184 	ivor[13] = mfspr(SPRN_IVOR13);
2185 	ivor[14] = mfspr(SPRN_IVOR14);
2186 	ivor[15] = mfspr(SPRN_IVOR15);
2187 
2188 	for (i = 0; i < 16; i++) {
2189 		if (ivor[i] > max_ivor)
2190 			max_ivor = i;
2191 
2192 		handler_len = handler[i + 1] - handler[i];
2193 		memcpy((void *)kvmppc_booke_handlers + ivor[i],
2194 		       (void *)handler[i], handler_len);
2195 	}
2196 
2197 	handler_len = handler[max_ivor + 1] - handler[max_ivor];
2198 	flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2199 			   ivor[max_ivor] + handler_len);
2200 #endif /* !BOOKE_HV */
2201 	return 0;
2202 }
2203 
kvmppc_booke_exit(void)2204 void __exit kvmppc_booke_exit(void)
2205 {
2206 	free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2207 	kvm_exit();
2208 }
2209