• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <hollisb@us.ibm.com>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23 
24 #include <asm/reg.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
33 #include <asm/page.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
38 
39 #include "book3s.h"
40 #include "trace.h"
41 
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 
44 /* #define EXIT_DEBUG */
45 
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47 	{ "exits",       VCPU_STAT(sum_exits) },
48 	{ "mmio",        VCPU_STAT(mmio_exits) },
49 	{ "sig",         VCPU_STAT(signal_exits) },
50 	{ "sysc",        VCPU_STAT(syscall_exits) },
51 	{ "inst_emu",    VCPU_STAT(emulated_inst_exits) },
52 	{ "dec",         VCPU_STAT(dec_exits) },
53 	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
54 	{ "queue_intr",  VCPU_STAT(queue_intr) },
55 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
56 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
57 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
58 	{ "pf_storage",  VCPU_STAT(pf_storage) },
59 	{ "sp_storage",  VCPU_STAT(sp_storage) },
60 	{ "pf_instruc",  VCPU_STAT(pf_instruc) },
61 	{ "sp_instruc",  VCPU_STAT(sp_instruc) },
62 	{ "ld",          VCPU_STAT(ld) },
63 	{ "ld_slow",     VCPU_STAT(ld_slow) },
64 	{ "st",          VCPU_STAT(st) },
65 	{ "st_slow",     VCPU_STAT(st_slow) },
66 	{ NULL }
67 };
68 
kvmppc_unfixup_split_real(struct kvm_vcpu * vcpu)69 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
70 {
71 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
72 		ulong pc = kvmppc_get_pc(vcpu);
73 		ulong lr = kvmppc_get_lr(vcpu);
74 		if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
75 			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
76 		if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
77 			kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
78 		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
79 	}
80 }
81 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
82 
kvmppc_interrupt_offset(struct kvm_vcpu * vcpu)83 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
84 {
85 	if (!is_kvmppc_hv_enabled(vcpu->kvm))
86 		return to_book3s(vcpu)->hior;
87 	return 0;
88 }
89 
kvmppc_update_int_pending(struct kvm_vcpu * vcpu,unsigned long pending_now,unsigned long old_pending)90 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
91 			unsigned long pending_now, unsigned long old_pending)
92 {
93 	if (is_kvmppc_hv_enabled(vcpu->kvm))
94 		return;
95 	if (pending_now)
96 		kvmppc_set_int_pending(vcpu, 1);
97 	else if (old_pending)
98 		kvmppc_set_int_pending(vcpu, 0);
99 }
100 
kvmppc_critical_section(struct kvm_vcpu * vcpu)101 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
102 {
103 	ulong crit_raw;
104 	ulong crit_r1;
105 	bool crit;
106 
107 	if (is_kvmppc_hv_enabled(vcpu->kvm))
108 		return false;
109 
110 	crit_raw = kvmppc_get_critical(vcpu);
111 	crit_r1 = kvmppc_get_gpr(vcpu, 1);
112 
113 	/* Truncate crit indicators in 32 bit mode */
114 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
115 		crit_raw &= 0xffffffff;
116 		crit_r1 &= 0xffffffff;
117 	}
118 
119 	/* Critical section when crit == r1 */
120 	crit = (crit_raw == crit_r1);
121 	/* ... and we're in supervisor mode */
122 	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
123 
124 	return crit;
125 }
126 
kvmppc_inject_interrupt(struct kvm_vcpu * vcpu,int vec,u64 flags)127 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
128 {
129 	kvmppc_unfixup_split_real(vcpu);
130 	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
131 	kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
132 	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
133 	vcpu->arch.mmu.reset_msr(vcpu);
134 }
135 
kvmppc_book3s_vec2irqprio(unsigned int vec)136 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
137 {
138 	unsigned int prio;
139 
140 	switch (vec) {
141 	case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;		break;
142 	case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;	break;
143 	case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;		break;
144 	case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;		break;
145 	case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;		break;
146 	case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;		break;
147 	case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;		break;
148 	case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL;	break;
149 	case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;		break;
150 	case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;		break;
151 	case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;		break;
152 	case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;		break;
153 	case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;		break;
154 	case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;		break;
155 	case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;		break;
156 	case 0xf40: prio = BOOK3S_IRQPRIO_VSX;			break;
157 	case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;		break;
158 	default:    prio = BOOK3S_IRQPRIO_MAX;			break;
159 	}
160 
161 	return prio;
162 }
163 
kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu * vcpu,unsigned int vec)164 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
165 					  unsigned int vec)
166 {
167 	unsigned long old_pending = vcpu->arch.pending_exceptions;
168 
169 	clear_bit(kvmppc_book3s_vec2irqprio(vec),
170 		  &vcpu->arch.pending_exceptions);
171 
172 	kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
173 				  old_pending);
174 }
175 
kvmppc_book3s_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int vec)176 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
177 {
178 	vcpu->stat.queue_intr++;
179 
180 	set_bit(kvmppc_book3s_vec2irqprio(vec),
181 		&vcpu->arch.pending_exceptions);
182 #ifdef EXIT_DEBUG
183 	printk(KERN_INFO "Queueing interrupt %x\n", vec);
184 #endif
185 }
186 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
187 
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong flags)188 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
189 {
190 	/* might as well deliver this straight away */
191 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
192 }
193 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
194 
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)195 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
196 {
197 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
198 }
199 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
200 
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)201 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
202 {
203 	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
204 }
205 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
206 
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)207 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
208 {
209 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
210 }
211 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
212 
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)213 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
214                                 struct kvm_interrupt *irq)
215 {
216 	unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
217 
218 	if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
219 		vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
220 
221 	kvmppc_book3s_queue_irqprio(vcpu, vec);
222 }
223 
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu)224 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
225 {
226 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
227 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
228 }
229 
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dar,ulong flags)230 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
231 				    ulong flags)
232 {
233 	kvmppc_set_dar(vcpu, dar);
234 	kvmppc_set_dsisr(vcpu, flags);
235 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
236 }
237 
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong flags)238 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
239 {
240 	u64 msr = kvmppc_get_msr(vcpu);
241 	msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
242 	msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
243 	kvmppc_set_msr_fast(vcpu, msr);
244 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
245 }
246 
kvmppc_book3s_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)247 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
248 					 unsigned int priority)
249 {
250 	int deliver = 1;
251 	int vec = 0;
252 	bool crit = kvmppc_critical_section(vcpu);
253 
254 	switch (priority) {
255 	case BOOK3S_IRQPRIO_DECREMENTER:
256 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
257 		vec = BOOK3S_INTERRUPT_DECREMENTER;
258 		break;
259 	case BOOK3S_IRQPRIO_EXTERNAL:
260 	case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
261 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
262 		vec = BOOK3S_INTERRUPT_EXTERNAL;
263 		break;
264 	case BOOK3S_IRQPRIO_SYSTEM_RESET:
265 		vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
266 		break;
267 	case BOOK3S_IRQPRIO_MACHINE_CHECK:
268 		vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
269 		break;
270 	case BOOK3S_IRQPRIO_DATA_STORAGE:
271 		vec = BOOK3S_INTERRUPT_DATA_STORAGE;
272 		break;
273 	case BOOK3S_IRQPRIO_INST_STORAGE:
274 		vec = BOOK3S_INTERRUPT_INST_STORAGE;
275 		break;
276 	case BOOK3S_IRQPRIO_DATA_SEGMENT:
277 		vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
278 		break;
279 	case BOOK3S_IRQPRIO_INST_SEGMENT:
280 		vec = BOOK3S_INTERRUPT_INST_SEGMENT;
281 		break;
282 	case BOOK3S_IRQPRIO_ALIGNMENT:
283 		vec = BOOK3S_INTERRUPT_ALIGNMENT;
284 		break;
285 	case BOOK3S_IRQPRIO_PROGRAM:
286 		vec = BOOK3S_INTERRUPT_PROGRAM;
287 		break;
288 	case BOOK3S_IRQPRIO_VSX:
289 		vec = BOOK3S_INTERRUPT_VSX;
290 		break;
291 	case BOOK3S_IRQPRIO_ALTIVEC:
292 		vec = BOOK3S_INTERRUPT_ALTIVEC;
293 		break;
294 	case BOOK3S_IRQPRIO_FP_UNAVAIL:
295 		vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
296 		break;
297 	case BOOK3S_IRQPRIO_SYSCALL:
298 		vec = BOOK3S_INTERRUPT_SYSCALL;
299 		break;
300 	case BOOK3S_IRQPRIO_DEBUG:
301 		vec = BOOK3S_INTERRUPT_TRACE;
302 		break;
303 	case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
304 		vec = BOOK3S_INTERRUPT_PERFMON;
305 		break;
306 	case BOOK3S_IRQPRIO_FAC_UNAVAIL:
307 		vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
308 		break;
309 	default:
310 		deliver = 0;
311 		printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
312 		break;
313 	}
314 
315 #if 0
316 	printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
317 #endif
318 
319 	if (deliver)
320 		kvmppc_inject_interrupt(vcpu, vec, 0);
321 
322 	return deliver;
323 }
324 
325 /*
326  * This function determines if an irqprio should be cleared once issued.
327  */
clear_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)328 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
329 {
330 	switch (priority) {
331 		case BOOK3S_IRQPRIO_DECREMENTER:
332 			/* DEC interrupts get cleared by mtdec */
333 			return false;
334 		case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
335 			/* External interrupts get cleared by userspace */
336 			return false;
337 	}
338 
339 	return true;
340 }
341 
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)342 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
343 {
344 	unsigned long *pending = &vcpu->arch.pending_exceptions;
345 	unsigned long old_pending = vcpu->arch.pending_exceptions;
346 	unsigned int priority;
347 
348 #ifdef EXIT_DEBUG
349 	if (vcpu->arch.pending_exceptions)
350 		printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
351 #endif
352 	priority = __ffs(*pending);
353 	while (priority < BOOK3S_IRQPRIO_MAX) {
354 		if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
355 		    clear_irqprio(vcpu, priority)) {
356 			clear_bit(priority, &vcpu->arch.pending_exceptions);
357 			break;
358 		}
359 
360 		priority = find_next_bit(pending,
361 					 BITS_PER_BYTE * sizeof(*pending),
362 					 priority + 1);
363 	}
364 
365 	/* Tell the guest about our interrupt status */
366 	kvmppc_update_int_pending(vcpu, *pending, old_pending);
367 
368 	return 0;
369 }
370 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
371 
kvmppc_gpa_to_pfn(struct kvm_vcpu * vcpu,gpa_t gpa,bool writing,bool * writable)372 pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
373 			bool *writable)
374 {
375 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
376 	gfn_t gfn = gpa >> PAGE_SHIFT;
377 
378 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
379 		mp_pa = (uint32_t)mp_pa;
380 
381 	/* Magic page override */
382 	gpa &= ~0xFFFULL;
383 	if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
384 		ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
385 		pfn_t pfn;
386 
387 		pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
388 		get_page(pfn_to_page(pfn));
389 		if (writable)
390 			*writable = true;
391 		return pfn;
392 	}
393 
394 	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
395 }
396 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
397 
kvmppc_xlate(struct kvm_vcpu * vcpu,ulong eaddr,enum xlate_instdata xlid,enum xlate_readwrite xlrw,struct kvmppc_pte * pte)398 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
399 		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
400 {
401 	bool data = (xlid == XLATE_DATA);
402 	bool iswrite = (xlrw == XLATE_WRITE);
403 	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
404 	int r;
405 
406 	if (relocated) {
407 		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
408 	} else {
409 		pte->eaddr = eaddr;
410 		pte->raddr = eaddr & KVM_PAM;
411 		pte->vpage = VSID_REAL | eaddr >> 12;
412 		pte->may_read = true;
413 		pte->may_write = true;
414 		pte->may_execute = true;
415 		r = 0;
416 
417 		if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
418 		    !data) {
419 			if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
420 			    ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
421 			pte->raddr &= ~SPLIT_HACK_MASK;
422 		}
423 	}
424 
425 	return r;
426 }
427 
kvmppc_load_last_inst(struct kvm_vcpu * vcpu,enum instruction_type type,u32 * inst)428 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
429 					 u32 *inst)
430 {
431 	ulong pc = kvmppc_get_pc(vcpu);
432 	int r;
433 
434 	if (type == INST_SC)
435 		pc -= 4;
436 
437 	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
438 	if (r == EMULATE_DONE)
439 		return r;
440 	else
441 		return EMULATE_AGAIN;
442 }
443 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
444 
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)445 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
446 {
447 	return 0;
448 }
449 
kvmppc_subarch_vcpu_init(struct kvm_vcpu * vcpu)450 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
451 {
452 	return 0;
453 }
454 
kvmppc_subarch_vcpu_uninit(struct kvm_vcpu * vcpu)455 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
456 {
457 }
458 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)459 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
460 				  struct kvm_sregs *sregs)
461 {
462 	return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
463 }
464 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)465 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
466 				  struct kvm_sregs *sregs)
467 {
468 	return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
469 }
470 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)471 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
472 {
473 	int i;
474 
475 	regs->pc = kvmppc_get_pc(vcpu);
476 	regs->cr = kvmppc_get_cr(vcpu);
477 	regs->ctr = kvmppc_get_ctr(vcpu);
478 	regs->lr = kvmppc_get_lr(vcpu);
479 	regs->xer = kvmppc_get_xer(vcpu);
480 	regs->msr = kvmppc_get_msr(vcpu);
481 	regs->srr0 = kvmppc_get_srr0(vcpu);
482 	regs->srr1 = kvmppc_get_srr1(vcpu);
483 	regs->pid = vcpu->arch.pid;
484 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
485 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
486 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
487 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
488 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
489 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
490 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
491 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
492 
493 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
494 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
495 
496 	return 0;
497 }
498 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)499 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
500 {
501 	int i;
502 
503 	kvmppc_set_pc(vcpu, regs->pc);
504 	kvmppc_set_cr(vcpu, regs->cr);
505 	kvmppc_set_ctr(vcpu, regs->ctr);
506 	kvmppc_set_lr(vcpu, regs->lr);
507 	kvmppc_set_xer(vcpu, regs->xer);
508 	kvmppc_set_msr(vcpu, regs->msr);
509 	kvmppc_set_srr0(vcpu, regs->srr0);
510 	kvmppc_set_srr1(vcpu, regs->srr1);
511 	kvmppc_set_sprg0(vcpu, regs->sprg0);
512 	kvmppc_set_sprg1(vcpu, regs->sprg1);
513 	kvmppc_set_sprg2(vcpu, regs->sprg2);
514 	kvmppc_set_sprg3(vcpu, regs->sprg3);
515 	kvmppc_set_sprg4(vcpu, regs->sprg4);
516 	kvmppc_set_sprg5(vcpu, regs->sprg5);
517 	kvmppc_set_sprg6(vcpu, regs->sprg6);
518 	kvmppc_set_sprg7(vcpu, regs->sprg7);
519 
520 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
521 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
522 
523 	return 0;
524 }
525 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)526 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
527 {
528 	return -ENOTSUPP;
529 }
530 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)531 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
532 {
533 	return -ENOTSUPP;
534 }
535 
kvmppc_get_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)536 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
537 			union kvmppc_one_reg *val)
538 {
539 	int r = 0;
540 	long int i;
541 
542 	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
543 	if (r == -EINVAL) {
544 		r = 0;
545 		switch (id) {
546 		case KVM_REG_PPC_DAR:
547 			*val = get_reg_val(id, kvmppc_get_dar(vcpu));
548 			break;
549 		case KVM_REG_PPC_DSISR:
550 			*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
551 			break;
552 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
553 			i = id - KVM_REG_PPC_FPR0;
554 			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
555 			break;
556 		case KVM_REG_PPC_FPSCR:
557 			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
558 			break;
559 #ifdef CONFIG_VSX
560 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
561 			if (cpu_has_feature(CPU_FTR_VSX)) {
562 				i = id - KVM_REG_PPC_VSR0;
563 				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
564 				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
565 			} else {
566 				r = -ENXIO;
567 			}
568 			break;
569 #endif /* CONFIG_VSX */
570 		case KVM_REG_PPC_DEBUG_INST:
571 			*val = get_reg_val(id, INS_TW);
572 			break;
573 #ifdef CONFIG_KVM_XICS
574 		case KVM_REG_PPC_ICP_STATE:
575 			if (!vcpu->arch.icp) {
576 				r = -ENXIO;
577 				break;
578 			}
579 			*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
580 			break;
581 #endif /* CONFIG_KVM_XICS */
582 		case KVM_REG_PPC_FSCR:
583 			*val = get_reg_val(id, vcpu->arch.fscr);
584 			break;
585 		case KVM_REG_PPC_TAR:
586 			*val = get_reg_val(id, vcpu->arch.tar);
587 			break;
588 		case KVM_REG_PPC_EBBHR:
589 			*val = get_reg_val(id, vcpu->arch.ebbhr);
590 			break;
591 		case KVM_REG_PPC_EBBRR:
592 			*val = get_reg_val(id, vcpu->arch.ebbrr);
593 			break;
594 		case KVM_REG_PPC_BESCR:
595 			*val = get_reg_val(id, vcpu->arch.bescr);
596 			break;
597 		case KVM_REG_PPC_VTB:
598 			*val = get_reg_val(id, vcpu->arch.vtb);
599 			break;
600 		case KVM_REG_PPC_IC:
601 			*val = get_reg_val(id, vcpu->arch.ic);
602 			break;
603 		default:
604 			r = -EINVAL;
605 			break;
606 		}
607 	}
608 
609 	return r;
610 }
611 
kvmppc_set_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)612 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
613 			union kvmppc_one_reg *val)
614 {
615 	int r = 0;
616 	long int i;
617 
618 	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
619 	if (r == -EINVAL) {
620 		r = 0;
621 		switch (id) {
622 		case KVM_REG_PPC_DAR:
623 			kvmppc_set_dar(vcpu, set_reg_val(id, *val));
624 			break;
625 		case KVM_REG_PPC_DSISR:
626 			kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
627 			break;
628 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
629 			i = id - KVM_REG_PPC_FPR0;
630 			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
631 			break;
632 		case KVM_REG_PPC_FPSCR:
633 			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
634 			break;
635 #ifdef CONFIG_VSX
636 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
637 			if (cpu_has_feature(CPU_FTR_VSX)) {
638 				i = id - KVM_REG_PPC_VSR0;
639 				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
640 				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
641 			} else {
642 				r = -ENXIO;
643 			}
644 			break;
645 #endif /* CONFIG_VSX */
646 #ifdef CONFIG_KVM_XICS
647 		case KVM_REG_PPC_ICP_STATE:
648 			if (!vcpu->arch.icp) {
649 				r = -ENXIO;
650 				break;
651 			}
652 			r = kvmppc_xics_set_icp(vcpu,
653 						set_reg_val(id, *val));
654 			break;
655 #endif /* CONFIG_KVM_XICS */
656 		case KVM_REG_PPC_FSCR:
657 			vcpu->arch.fscr = set_reg_val(id, *val);
658 			break;
659 		case KVM_REG_PPC_TAR:
660 			vcpu->arch.tar = set_reg_val(id, *val);
661 			break;
662 		case KVM_REG_PPC_EBBHR:
663 			vcpu->arch.ebbhr = set_reg_val(id, *val);
664 			break;
665 		case KVM_REG_PPC_EBBRR:
666 			vcpu->arch.ebbrr = set_reg_val(id, *val);
667 			break;
668 		case KVM_REG_PPC_BESCR:
669 			vcpu->arch.bescr = set_reg_val(id, *val);
670 			break;
671 		case KVM_REG_PPC_VTB:
672 			vcpu->arch.vtb = set_reg_val(id, *val);
673 			break;
674 		case KVM_REG_PPC_IC:
675 			vcpu->arch.ic = set_reg_val(id, *val);
676 			break;
677 		default:
678 			r = -EINVAL;
679 			break;
680 		}
681 	}
682 
683 	return r;
684 }
685 
kvmppc_core_vcpu_load(struct kvm_vcpu * vcpu,int cpu)686 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
687 {
688 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
689 }
690 
kvmppc_core_vcpu_put(struct kvm_vcpu * vcpu)691 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
692 {
693 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
694 }
695 
kvmppc_set_msr(struct kvm_vcpu * vcpu,u64 msr)696 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
697 {
698 	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
699 }
700 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
701 
kvmppc_vcpu_run(struct kvm_run * kvm_run,struct kvm_vcpu * vcpu)702 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
703 {
704 	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
705 }
706 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)707 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
708                                   struct kvm_translation *tr)
709 {
710 	return 0;
711 }
712 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)713 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
714 					struct kvm_guest_debug *dbg)
715 {
716 	vcpu->guest_debug = dbg->control;
717 	return 0;
718 }
719 
kvmppc_decrementer_func(struct kvm_vcpu * vcpu)720 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
721 {
722 	kvmppc_core_queue_dec(vcpu);
723 	kvm_vcpu_kick(vcpu);
724 }
725 
kvmppc_core_vcpu_create(struct kvm * kvm,unsigned int id)726 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
727 {
728 	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
729 }
730 
kvmppc_core_vcpu_free(struct kvm_vcpu * vcpu)731 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
732 {
733 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
734 }
735 
kvmppc_core_check_requests(struct kvm_vcpu * vcpu)736 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
737 {
738 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
739 }
740 
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)741 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
742 {
743 	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
744 }
745 
kvmppc_core_free_memslot(struct kvm * kvm,struct kvm_memory_slot * free,struct kvm_memory_slot * dont)746 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
747 			      struct kvm_memory_slot *dont)
748 {
749 	kvm->arch.kvm_ops->free_memslot(free, dont);
750 }
751 
kvmppc_core_create_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned long npages)752 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
753 			       unsigned long npages)
754 {
755 	return kvm->arch.kvm_ops->create_memslot(slot, npages);
756 }
757 
kvmppc_core_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)758 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
759 {
760 	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
761 }
762 
kvmppc_core_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem)763 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
764 				struct kvm_memory_slot *memslot,
765 				const struct kvm_userspace_memory_region *mem)
766 {
767 	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
768 }
769 
kvmppc_core_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new)770 void kvmppc_core_commit_memory_region(struct kvm *kvm,
771 				const struct kvm_userspace_memory_region *mem,
772 				const struct kvm_memory_slot *old,
773 				const struct kvm_memory_slot *new)
774 {
775 	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
776 }
777 
kvm_unmap_hva(struct kvm * kvm,unsigned long hva)778 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
779 {
780 	return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
781 }
782 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
783 
kvm_unmap_hva_range(struct kvm * kvm,unsigned long start,unsigned long end)784 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
785 {
786 	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
787 }
788 
kvm_age_hva(struct kvm * kvm,unsigned long start,unsigned long end)789 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
790 {
791 	return kvm->arch.kvm_ops->age_hva(kvm, start, end);
792 }
793 
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)794 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
795 {
796 	return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
797 }
798 
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)799 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
800 {
801 	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
802 }
803 
kvmppc_mmu_destroy(struct kvm_vcpu * vcpu)804 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
805 {
806 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
807 }
808 
kvmppc_core_init_vm(struct kvm * kvm)809 int kvmppc_core_init_vm(struct kvm *kvm)
810 {
811 
812 #ifdef CONFIG_PPC64
813 	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
814 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
815 	mutex_init(&kvm->arch.rtas_token_lock);
816 #endif
817 
818 	return kvm->arch.kvm_ops->init_vm(kvm);
819 }
820 
kvmppc_core_destroy_vm(struct kvm * kvm)821 void kvmppc_core_destroy_vm(struct kvm *kvm)
822 {
823 	kvm->arch.kvm_ops->destroy_vm(kvm);
824 
825 #ifdef CONFIG_PPC64
826 	kvmppc_rtas_tokens_free(kvm);
827 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
828 #endif
829 }
830 
kvmppc_h_logical_ci_load(struct kvm_vcpu * vcpu)831 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
832 {
833 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
834 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
835 	u64 buf;
836 	int srcu_idx;
837 	int ret;
838 
839 	if (!is_power_of_2(size) || (size > sizeof(buf)))
840 		return H_TOO_HARD;
841 
842 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
843 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
844 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
845 	if (ret != 0)
846 		return H_TOO_HARD;
847 
848 	switch (size) {
849 	case 1:
850 		kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
851 		break;
852 
853 	case 2:
854 		kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
855 		break;
856 
857 	case 4:
858 		kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
859 		break;
860 
861 	case 8:
862 		kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
863 		break;
864 
865 	default:
866 		BUG();
867 	}
868 
869 	return H_SUCCESS;
870 }
871 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
872 
kvmppc_h_logical_ci_store(struct kvm_vcpu * vcpu)873 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
874 {
875 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
876 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
877 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
878 	u64 buf;
879 	int srcu_idx;
880 	int ret;
881 
882 	switch (size) {
883 	case 1:
884 		*(u8 *)&buf = val;
885 		break;
886 
887 	case 2:
888 		*(__be16 *)&buf = cpu_to_be16(val);
889 		break;
890 
891 	case 4:
892 		*(__be32 *)&buf = cpu_to_be32(val);
893 		break;
894 
895 	case 8:
896 		*(__be64 *)&buf = cpu_to_be64(val);
897 		break;
898 
899 	default:
900 		return H_TOO_HARD;
901 	}
902 
903 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
904 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
905 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
906 	if (ret != 0)
907 		return H_TOO_HARD;
908 
909 	return H_SUCCESS;
910 }
911 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
912 
kvmppc_core_check_processor_compat(void)913 int kvmppc_core_check_processor_compat(void)
914 {
915 	/*
916 	 * We always return 0 for book3s. We check
917 	 * for compatibility while loading the HV
918 	 * or PR module
919 	 */
920 	return 0;
921 }
922 
kvmppc_book3s_hcall_implemented(struct kvm * kvm,unsigned long hcall)923 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
924 {
925 	return kvm->arch.kvm_ops->hcall_implemented(hcall);
926 }
927 
kvmppc_book3s_init(void)928 static int kvmppc_book3s_init(void)
929 {
930 	int r;
931 
932 	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
933 	if (r)
934 		return r;
935 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
936 	r = kvmppc_book3s_init_pr();
937 #endif
938 	return r;
939 
940 }
941 
kvmppc_book3s_exit(void)942 static void kvmppc_book3s_exit(void)
943 {
944 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
945 	kvmppc_book3s_exit_pr();
946 #endif
947 	kvm_exit();
948 }
949 
950 module_init(kvmppc_book3s_init);
951 module_exit(kvmppc_book3s_exit);
952 
953 /* On 32bit this is our one and only kernel module */
954 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
955 MODULE_ALIAS_MISCDEV(KVM_MINOR);
956 MODULE_ALIAS("devname:kvm");
957 #endif
958