• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <hollisb@us.ibm.com>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23 
24 #include <asm/reg.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
33 #include <asm/page.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
38 
39 #include "book3s.h"
40 #include "trace.h"
41 
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 
44 /* #define EXIT_DEBUG */
45 
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47 	{ "exits",       VCPU_STAT(sum_exits) },
48 	{ "mmio",        VCPU_STAT(mmio_exits) },
49 	{ "sig",         VCPU_STAT(signal_exits) },
50 	{ "sysc",        VCPU_STAT(syscall_exits) },
51 	{ "inst_emu",    VCPU_STAT(emulated_inst_exits) },
52 	{ "dec",         VCPU_STAT(dec_exits) },
53 	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
54 	{ "queue_intr",  VCPU_STAT(queue_intr) },
55 	{ "halt_poll_success_ns",	VCPU_STAT(halt_poll_success_ns) },
56 	{ "halt_poll_fail_ns",		VCPU_STAT(halt_poll_fail_ns) },
57 	{ "halt_wait_ns",		VCPU_STAT(halt_wait_ns) },
58 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
59 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
60 	{ "halt_successful_wait",	VCPU_STAT(halt_successful_wait) },
61 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
62 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 	{ "pf_storage",  VCPU_STAT(pf_storage) },
64 	{ "sp_storage",  VCPU_STAT(sp_storage) },
65 	{ "pf_instruc",  VCPU_STAT(pf_instruc) },
66 	{ "sp_instruc",  VCPU_STAT(sp_instruc) },
67 	{ "ld",          VCPU_STAT(ld) },
68 	{ "ld_slow",     VCPU_STAT(ld_slow) },
69 	{ "st",          VCPU_STAT(st) },
70 	{ "st_slow",     VCPU_STAT(st_slow) },
71 	{ "pthru_all",       VCPU_STAT(pthru_all) },
72 	{ "pthru_host",      VCPU_STAT(pthru_host) },
73 	{ "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
74 	{ NULL }
75 };
76 
kvmppc_unfixup_split_real(struct kvm_vcpu * vcpu)77 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
78 {
79 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
80 		ulong pc = kvmppc_get_pc(vcpu);
81 		if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
82 			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
83 		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
84 	}
85 }
86 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
87 
kvmppc_interrupt_offset(struct kvm_vcpu * vcpu)88 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
89 {
90 	if (!is_kvmppc_hv_enabled(vcpu->kvm))
91 		return to_book3s(vcpu)->hior;
92 	return 0;
93 }
94 
kvmppc_update_int_pending(struct kvm_vcpu * vcpu,unsigned long pending_now,unsigned long old_pending)95 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
96 			unsigned long pending_now, unsigned long old_pending)
97 {
98 	if (is_kvmppc_hv_enabled(vcpu->kvm))
99 		return;
100 	if (pending_now)
101 		kvmppc_set_int_pending(vcpu, 1);
102 	else if (old_pending)
103 		kvmppc_set_int_pending(vcpu, 0);
104 }
105 
kvmppc_critical_section(struct kvm_vcpu * vcpu)106 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
107 {
108 	ulong crit_raw;
109 	ulong crit_r1;
110 	bool crit;
111 
112 	if (is_kvmppc_hv_enabled(vcpu->kvm))
113 		return false;
114 
115 	crit_raw = kvmppc_get_critical(vcpu);
116 	crit_r1 = kvmppc_get_gpr(vcpu, 1);
117 
118 	/* Truncate crit indicators in 32 bit mode */
119 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
120 		crit_raw &= 0xffffffff;
121 		crit_r1 &= 0xffffffff;
122 	}
123 
124 	/* Critical section when crit == r1 */
125 	crit = (crit_raw == crit_r1);
126 	/* ... and we're in supervisor mode */
127 	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
128 
129 	return crit;
130 }
131 
kvmppc_inject_interrupt(struct kvm_vcpu * vcpu,int vec,u64 flags)132 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
133 {
134 	kvmppc_unfixup_split_real(vcpu);
135 	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
136 	kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
137 	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
138 	vcpu->arch.mmu.reset_msr(vcpu);
139 }
140 
kvmppc_book3s_vec2irqprio(unsigned int vec)141 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
142 {
143 	unsigned int prio;
144 
145 	switch (vec) {
146 	case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;		break;
147 	case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;	break;
148 	case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;		break;
149 	case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;		break;
150 	case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;		break;
151 	case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;		break;
152 	case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;		break;
153 	case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL;	break;
154 	case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;		break;
155 	case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;		break;
156 	case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;		break;
157 	case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;		break;
158 	case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;		break;
159 	case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;		break;
160 	case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;		break;
161 	case 0xf40: prio = BOOK3S_IRQPRIO_VSX;			break;
162 	case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;		break;
163 	default:    prio = BOOK3S_IRQPRIO_MAX;			break;
164 	}
165 
166 	return prio;
167 }
168 
kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu * vcpu,unsigned int vec)169 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
170 					  unsigned int vec)
171 {
172 	unsigned long old_pending = vcpu->arch.pending_exceptions;
173 
174 	clear_bit(kvmppc_book3s_vec2irqprio(vec),
175 		  &vcpu->arch.pending_exceptions);
176 
177 	kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
178 				  old_pending);
179 }
180 
kvmppc_book3s_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int vec)181 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
182 {
183 	vcpu->stat.queue_intr++;
184 
185 	set_bit(kvmppc_book3s_vec2irqprio(vec),
186 		&vcpu->arch.pending_exceptions);
187 #ifdef EXIT_DEBUG
188 	printk(KERN_INFO "Queueing interrupt %x\n", vec);
189 #endif
190 }
191 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
192 
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong flags)193 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
194 {
195 	/* might as well deliver this straight away */
196 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
197 }
198 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
199 
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)200 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
201 {
202 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
203 }
204 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
205 
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)206 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
207 {
208 	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
209 }
210 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
211 
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)212 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
213 {
214 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
215 }
216 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
217 
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)218 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
219                                 struct kvm_interrupt *irq)
220 {
221 	unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
222 
223 	if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
224 		vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
225 
226 	kvmppc_book3s_queue_irqprio(vcpu, vec);
227 }
228 
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu)229 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
230 {
231 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
232 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
233 }
234 
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dar,ulong flags)235 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
236 				    ulong flags)
237 {
238 	kvmppc_set_dar(vcpu, dar);
239 	kvmppc_set_dsisr(vcpu, flags);
240 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
241 }
242 
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong flags)243 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
244 {
245 	u64 msr = kvmppc_get_msr(vcpu);
246 	msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
247 	msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
248 	kvmppc_set_msr_fast(vcpu, msr);
249 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
250 }
251 
kvmppc_book3s_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)252 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
253 					 unsigned int priority)
254 {
255 	int deliver = 1;
256 	int vec = 0;
257 	bool crit = kvmppc_critical_section(vcpu);
258 
259 	switch (priority) {
260 	case BOOK3S_IRQPRIO_DECREMENTER:
261 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
262 		vec = BOOK3S_INTERRUPT_DECREMENTER;
263 		break;
264 	case BOOK3S_IRQPRIO_EXTERNAL:
265 	case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
266 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
267 		vec = BOOK3S_INTERRUPT_EXTERNAL;
268 		break;
269 	case BOOK3S_IRQPRIO_SYSTEM_RESET:
270 		vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
271 		break;
272 	case BOOK3S_IRQPRIO_MACHINE_CHECK:
273 		vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
274 		break;
275 	case BOOK3S_IRQPRIO_DATA_STORAGE:
276 		vec = BOOK3S_INTERRUPT_DATA_STORAGE;
277 		break;
278 	case BOOK3S_IRQPRIO_INST_STORAGE:
279 		vec = BOOK3S_INTERRUPT_INST_STORAGE;
280 		break;
281 	case BOOK3S_IRQPRIO_DATA_SEGMENT:
282 		vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
283 		break;
284 	case BOOK3S_IRQPRIO_INST_SEGMENT:
285 		vec = BOOK3S_INTERRUPT_INST_SEGMENT;
286 		break;
287 	case BOOK3S_IRQPRIO_ALIGNMENT:
288 		vec = BOOK3S_INTERRUPT_ALIGNMENT;
289 		break;
290 	case BOOK3S_IRQPRIO_PROGRAM:
291 		vec = BOOK3S_INTERRUPT_PROGRAM;
292 		break;
293 	case BOOK3S_IRQPRIO_VSX:
294 		vec = BOOK3S_INTERRUPT_VSX;
295 		break;
296 	case BOOK3S_IRQPRIO_ALTIVEC:
297 		vec = BOOK3S_INTERRUPT_ALTIVEC;
298 		break;
299 	case BOOK3S_IRQPRIO_FP_UNAVAIL:
300 		vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
301 		break;
302 	case BOOK3S_IRQPRIO_SYSCALL:
303 		vec = BOOK3S_INTERRUPT_SYSCALL;
304 		break;
305 	case BOOK3S_IRQPRIO_DEBUG:
306 		vec = BOOK3S_INTERRUPT_TRACE;
307 		break;
308 	case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
309 		vec = BOOK3S_INTERRUPT_PERFMON;
310 		break;
311 	case BOOK3S_IRQPRIO_FAC_UNAVAIL:
312 		vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
313 		break;
314 	default:
315 		deliver = 0;
316 		printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
317 		break;
318 	}
319 
320 #if 0
321 	printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
322 #endif
323 
324 	if (deliver)
325 		kvmppc_inject_interrupt(vcpu, vec, 0);
326 
327 	return deliver;
328 }
329 
330 /*
331  * This function determines if an irqprio should be cleared once issued.
332  */
clear_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)333 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
334 {
335 	switch (priority) {
336 		case BOOK3S_IRQPRIO_DECREMENTER:
337 			/* DEC interrupts get cleared by mtdec */
338 			return false;
339 		case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
340 			/* External interrupts get cleared by userspace */
341 			return false;
342 	}
343 
344 	return true;
345 }
346 
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)347 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
348 {
349 	unsigned long *pending = &vcpu->arch.pending_exceptions;
350 	unsigned long old_pending = vcpu->arch.pending_exceptions;
351 	unsigned int priority;
352 
353 #ifdef EXIT_DEBUG
354 	if (vcpu->arch.pending_exceptions)
355 		printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
356 #endif
357 	priority = __ffs(*pending);
358 	while (priority < BOOK3S_IRQPRIO_MAX) {
359 		if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
360 		    clear_irqprio(vcpu, priority)) {
361 			clear_bit(priority, &vcpu->arch.pending_exceptions);
362 			break;
363 		}
364 
365 		priority = find_next_bit(pending,
366 					 BITS_PER_BYTE * sizeof(*pending),
367 					 priority + 1);
368 	}
369 
370 	/* Tell the guest about our interrupt status */
371 	kvmppc_update_int_pending(vcpu, *pending, old_pending);
372 
373 	return 0;
374 }
375 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
376 
kvmppc_gpa_to_pfn(struct kvm_vcpu * vcpu,gpa_t gpa,bool writing,bool * writable)377 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
378 			bool *writable)
379 {
380 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
381 	gfn_t gfn = gpa >> PAGE_SHIFT;
382 
383 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
384 		mp_pa = (uint32_t)mp_pa;
385 
386 	/* Magic page override */
387 	gpa &= ~0xFFFULL;
388 	if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
389 		ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
390 		kvm_pfn_t pfn;
391 
392 		pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
393 		get_page(pfn_to_page(pfn));
394 		if (writable)
395 			*writable = true;
396 		return pfn;
397 	}
398 
399 	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
400 }
401 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
402 
kvmppc_xlate(struct kvm_vcpu * vcpu,ulong eaddr,enum xlate_instdata xlid,enum xlate_readwrite xlrw,struct kvmppc_pte * pte)403 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
404 		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
405 {
406 	bool data = (xlid == XLATE_DATA);
407 	bool iswrite = (xlrw == XLATE_WRITE);
408 	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
409 	int r;
410 
411 	if (relocated) {
412 		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
413 	} else {
414 		pte->eaddr = eaddr;
415 		pte->raddr = eaddr & KVM_PAM;
416 		pte->vpage = VSID_REAL | eaddr >> 12;
417 		pte->may_read = true;
418 		pte->may_write = true;
419 		pte->may_execute = true;
420 		r = 0;
421 
422 		if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
423 		    !data) {
424 			if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
425 			    ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
426 			pte->raddr &= ~SPLIT_HACK_MASK;
427 		}
428 	}
429 
430 	return r;
431 }
432 
kvmppc_load_last_inst(struct kvm_vcpu * vcpu,enum instruction_type type,u32 * inst)433 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
434 					 u32 *inst)
435 {
436 	ulong pc = kvmppc_get_pc(vcpu);
437 	int r;
438 
439 	if (type == INST_SC)
440 		pc -= 4;
441 
442 	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
443 	if (r == EMULATE_DONE)
444 		return r;
445 	else
446 		return EMULATE_AGAIN;
447 }
448 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
449 
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)450 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
451 {
452 	return 0;
453 }
454 
kvmppc_subarch_vcpu_init(struct kvm_vcpu * vcpu)455 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
456 {
457 	return 0;
458 }
459 
kvmppc_subarch_vcpu_uninit(struct kvm_vcpu * vcpu)460 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
461 {
462 }
463 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)464 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
465 				  struct kvm_sregs *sregs)
466 {
467 	return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
468 }
469 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)470 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
471 				  struct kvm_sregs *sregs)
472 {
473 	return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
474 }
475 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)476 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
477 {
478 	int i;
479 
480 	regs->pc = kvmppc_get_pc(vcpu);
481 	regs->cr = kvmppc_get_cr(vcpu);
482 	regs->ctr = kvmppc_get_ctr(vcpu);
483 	regs->lr = kvmppc_get_lr(vcpu);
484 	regs->xer = kvmppc_get_xer(vcpu);
485 	regs->msr = kvmppc_get_msr(vcpu);
486 	regs->srr0 = kvmppc_get_srr0(vcpu);
487 	regs->srr1 = kvmppc_get_srr1(vcpu);
488 	regs->pid = vcpu->arch.pid;
489 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
490 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
491 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
492 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
493 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
494 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
495 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
496 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
497 
498 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
499 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
500 
501 	return 0;
502 }
503 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)504 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
505 {
506 	int i;
507 
508 	kvmppc_set_pc(vcpu, regs->pc);
509 	kvmppc_set_cr(vcpu, regs->cr);
510 	kvmppc_set_ctr(vcpu, regs->ctr);
511 	kvmppc_set_lr(vcpu, regs->lr);
512 	kvmppc_set_xer(vcpu, regs->xer);
513 	kvmppc_set_msr(vcpu, regs->msr);
514 	kvmppc_set_srr0(vcpu, regs->srr0);
515 	kvmppc_set_srr1(vcpu, regs->srr1);
516 	kvmppc_set_sprg0(vcpu, regs->sprg0);
517 	kvmppc_set_sprg1(vcpu, regs->sprg1);
518 	kvmppc_set_sprg2(vcpu, regs->sprg2);
519 	kvmppc_set_sprg3(vcpu, regs->sprg3);
520 	kvmppc_set_sprg4(vcpu, regs->sprg4);
521 	kvmppc_set_sprg5(vcpu, regs->sprg5);
522 	kvmppc_set_sprg6(vcpu, regs->sprg6);
523 	kvmppc_set_sprg7(vcpu, regs->sprg7);
524 
525 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
526 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
527 
528 	return 0;
529 }
530 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)531 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
532 {
533 	return -ENOTSUPP;
534 }
535 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)536 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
537 {
538 	return -ENOTSUPP;
539 }
540 
kvmppc_get_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)541 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
542 			union kvmppc_one_reg *val)
543 {
544 	int r = 0;
545 	long int i;
546 
547 	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
548 	if (r == -EINVAL) {
549 		r = 0;
550 		switch (id) {
551 		case KVM_REG_PPC_DAR:
552 			*val = get_reg_val(id, kvmppc_get_dar(vcpu));
553 			break;
554 		case KVM_REG_PPC_DSISR:
555 			*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
556 			break;
557 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
558 			i = id - KVM_REG_PPC_FPR0;
559 			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
560 			break;
561 		case KVM_REG_PPC_FPSCR:
562 			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
563 			break;
564 #ifdef CONFIG_VSX
565 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
566 			if (cpu_has_feature(CPU_FTR_VSX)) {
567 				i = id - KVM_REG_PPC_VSR0;
568 				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
569 				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
570 			} else {
571 				r = -ENXIO;
572 			}
573 			break;
574 #endif /* CONFIG_VSX */
575 		case KVM_REG_PPC_DEBUG_INST:
576 			*val = get_reg_val(id, INS_TW);
577 			break;
578 #ifdef CONFIG_KVM_XICS
579 		case KVM_REG_PPC_ICP_STATE:
580 			if (!vcpu->arch.icp) {
581 				r = -ENXIO;
582 				break;
583 			}
584 			*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
585 			break;
586 #endif /* CONFIG_KVM_XICS */
587 		case KVM_REG_PPC_FSCR:
588 			*val = get_reg_val(id, vcpu->arch.fscr);
589 			break;
590 		case KVM_REG_PPC_TAR:
591 			*val = get_reg_val(id, vcpu->arch.tar);
592 			break;
593 		case KVM_REG_PPC_EBBHR:
594 			*val = get_reg_val(id, vcpu->arch.ebbhr);
595 			break;
596 		case KVM_REG_PPC_EBBRR:
597 			*val = get_reg_val(id, vcpu->arch.ebbrr);
598 			break;
599 		case KVM_REG_PPC_BESCR:
600 			*val = get_reg_val(id, vcpu->arch.bescr);
601 			break;
602 		case KVM_REG_PPC_IC:
603 			*val = get_reg_val(id, vcpu->arch.ic);
604 			break;
605 		default:
606 			r = -EINVAL;
607 			break;
608 		}
609 	}
610 
611 	return r;
612 }
613 
kvmppc_set_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)614 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
615 			union kvmppc_one_reg *val)
616 {
617 	int r = 0;
618 	long int i;
619 
620 	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
621 	if (r == -EINVAL) {
622 		r = 0;
623 		switch (id) {
624 		case KVM_REG_PPC_DAR:
625 			kvmppc_set_dar(vcpu, set_reg_val(id, *val));
626 			break;
627 		case KVM_REG_PPC_DSISR:
628 			kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
629 			break;
630 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
631 			i = id - KVM_REG_PPC_FPR0;
632 			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
633 			break;
634 		case KVM_REG_PPC_FPSCR:
635 			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
636 			break;
637 #ifdef CONFIG_VSX
638 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
639 			if (cpu_has_feature(CPU_FTR_VSX)) {
640 				i = id - KVM_REG_PPC_VSR0;
641 				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
642 				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
643 			} else {
644 				r = -ENXIO;
645 			}
646 			break;
647 #endif /* CONFIG_VSX */
648 #ifdef CONFIG_KVM_XICS
649 		case KVM_REG_PPC_ICP_STATE:
650 			if (!vcpu->arch.icp) {
651 				r = -ENXIO;
652 				break;
653 			}
654 			r = kvmppc_xics_set_icp(vcpu,
655 						set_reg_val(id, *val));
656 			break;
657 #endif /* CONFIG_KVM_XICS */
658 		case KVM_REG_PPC_FSCR:
659 			vcpu->arch.fscr = set_reg_val(id, *val);
660 			break;
661 		case KVM_REG_PPC_TAR:
662 			vcpu->arch.tar = set_reg_val(id, *val);
663 			break;
664 		case KVM_REG_PPC_EBBHR:
665 			vcpu->arch.ebbhr = set_reg_val(id, *val);
666 			break;
667 		case KVM_REG_PPC_EBBRR:
668 			vcpu->arch.ebbrr = set_reg_val(id, *val);
669 			break;
670 		case KVM_REG_PPC_BESCR:
671 			vcpu->arch.bescr = set_reg_val(id, *val);
672 			break;
673 		case KVM_REG_PPC_IC:
674 			vcpu->arch.ic = set_reg_val(id, *val);
675 			break;
676 		default:
677 			r = -EINVAL;
678 			break;
679 		}
680 	}
681 
682 	return r;
683 }
684 
kvmppc_core_vcpu_load(struct kvm_vcpu * vcpu,int cpu)685 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
686 {
687 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
688 }
689 
kvmppc_core_vcpu_put(struct kvm_vcpu * vcpu)690 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
691 {
692 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
693 }
694 
kvmppc_set_msr(struct kvm_vcpu * vcpu,u64 msr)695 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
696 {
697 	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
698 }
699 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
700 
kvmppc_vcpu_run(struct kvm_run * kvm_run,struct kvm_vcpu * vcpu)701 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
702 {
703 	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
704 }
705 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)706 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
707                                   struct kvm_translation *tr)
708 {
709 	return 0;
710 }
711 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)712 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
713 					struct kvm_guest_debug *dbg)
714 {
715 	vcpu->guest_debug = dbg->control;
716 	return 0;
717 }
718 
kvmppc_decrementer_func(struct kvm_vcpu * vcpu)719 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
720 {
721 	kvmppc_core_queue_dec(vcpu);
722 	kvm_vcpu_kick(vcpu);
723 }
724 
kvmppc_core_vcpu_create(struct kvm * kvm,unsigned int id)725 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
726 {
727 	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
728 }
729 
kvmppc_core_vcpu_free(struct kvm_vcpu * vcpu)730 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
731 {
732 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
733 }
734 
kvmppc_core_check_requests(struct kvm_vcpu * vcpu)735 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
736 {
737 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
738 }
739 
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)740 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
741 {
742 	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
743 }
744 
kvmppc_core_free_memslot(struct kvm * kvm,struct kvm_memory_slot * free,struct kvm_memory_slot * dont)745 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
746 			      struct kvm_memory_slot *dont)
747 {
748 	kvm->arch.kvm_ops->free_memslot(free, dont);
749 }
750 
kvmppc_core_create_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned long npages)751 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
752 			       unsigned long npages)
753 {
754 	return kvm->arch.kvm_ops->create_memslot(slot, npages);
755 }
756 
kvmppc_core_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)757 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
758 {
759 	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
760 }
761 
kvmppc_core_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem)762 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
763 				struct kvm_memory_slot *memslot,
764 				const struct kvm_userspace_memory_region *mem)
765 {
766 	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
767 }
768 
kvmppc_core_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new)769 void kvmppc_core_commit_memory_region(struct kvm *kvm,
770 				const struct kvm_userspace_memory_region *mem,
771 				const struct kvm_memory_slot *old,
772 				const struct kvm_memory_slot *new)
773 {
774 	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
775 }
776 
kvm_unmap_hva(struct kvm * kvm,unsigned long hva)777 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
778 {
779 	return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
780 }
781 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
782 
kvm_unmap_hva_range(struct kvm * kvm,unsigned long start,unsigned long end)783 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
784 {
785 	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
786 }
787 
kvm_age_hva(struct kvm * kvm,unsigned long start,unsigned long end)788 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
789 {
790 	return kvm->arch.kvm_ops->age_hva(kvm, start, end);
791 }
792 
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)793 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
794 {
795 	return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
796 }
797 
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)798 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
799 {
800 	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
801 }
802 
kvmppc_mmu_destroy(struct kvm_vcpu * vcpu)803 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
804 {
805 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
806 }
807 
kvmppc_core_init_vm(struct kvm * kvm)808 int kvmppc_core_init_vm(struct kvm *kvm)
809 {
810 
811 #ifdef CONFIG_PPC64
812 	INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
813 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
814 #endif
815 
816 	return kvm->arch.kvm_ops->init_vm(kvm);
817 }
818 
kvmppc_core_destroy_vm(struct kvm * kvm)819 void kvmppc_core_destroy_vm(struct kvm *kvm)
820 {
821 	kvm->arch.kvm_ops->destroy_vm(kvm);
822 
823 #ifdef CONFIG_PPC64
824 	kvmppc_rtas_tokens_free(kvm);
825 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
826 #endif
827 }
828 
kvmppc_h_logical_ci_load(struct kvm_vcpu * vcpu)829 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
830 {
831 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
832 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
833 	u64 buf;
834 	int srcu_idx;
835 	int ret;
836 
837 	if (!is_power_of_2(size) || (size > sizeof(buf)))
838 		return H_TOO_HARD;
839 
840 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
841 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
842 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
843 	if (ret != 0)
844 		return H_TOO_HARD;
845 
846 	switch (size) {
847 	case 1:
848 		kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
849 		break;
850 
851 	case 2:
852 		kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
853 		break;
854 
855 	case 4:
856 		kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
857 		break;
858 
859 	case 8:
860 		kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
861 		break;
862 
863 	default:
864 		BUG();
865 	}
866 
867 	return H_SUCCESS;
868 }
869 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
870 
kvmppc_h_logical_ci_store(struct kvm_vcpu * vcpu)871 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
872 {
873 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
874 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
875 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
876 	u64 buf;
877 	int srcu_idx;
878 	int ret;
879 
880 	switch (size) {
881 	case 1:
882 		*(u8 *)&buf = val;
883 		break;
884 
885 	case 2:
886 		*(__be16 *)&buf = cpu_to_be16(val);
887 		break;
888 
889 	case 4:
890 		*(__be32 *)&buf = cpu_to_be32(val);
891 		break;
892 
893 	case 8:
894 		*(__be64 *)&buf = cpu_to_be64(val);
895 		break;
896 
897 	default:
898 		return H_TOO_HARD;
899 	}
900 
901 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
902 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
903 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
904 	if (ret != 0)
905 		return H_TOO_HARD;
906 
907 	return H_SUCCESS;
908 }
909 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
910 
kvmppc_core_check_processor_compat(void)911 int kvmppc_core_check_processor_compat(void)
912 {
913 	/*
914 	 * We always return 0 for book3s. We check
915 	 * for compatibility while loading the HV
916 	 * or PR module
917 	 */
918 	return 0;
919 }
920 
kvmppc_book3s_hcall_implemented(struct kvm * kvm,unsigned long hcall)921 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
922 {
923 	return kvm->arch.kvm_ops->hcall_implemented(hcall);
924 }
925 
kvmppc_book3s_init(void)926 static int kvmppc_book3s_init(void)
927 {
928 	int r;
929 
930 	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
931 	if (r)
932 		return r;
933 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
934 	r = kvmppc_book3s_init_pr();
935 #endif
936 	return r;
937 
938 }
939 
kvmppc_book3s_exit(void)940 static void kvmppc_book3s_exit(void)
941 {
942 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
943 	kvmppc_book3s_exit_pr();
944 #endif
945 	kvm_exit();
946 }
947 
948 module_init(kvmppc_book3s_init);
949 module_exit(kvmppc_book3s_exit);
950 
951 /* On 32bit this is our one and only kernel module */
952 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
953 MODULE_ALIAS_MISCDEV(KVM_MINOR);
954 MODULE_ALIAS("devname:kvm");
955 #endif
956