• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20 
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cputhreads.h>
35 #include <asm/irqflags.h>
36 #include "timing.h"
37 #include "irq.h"
38 #include "../mm/mmu_decl.h"
39 
40 #define CREATE_TRACE_POINTS
41 #include "trace.h"
42 
43 struct kvmppc_ops *kvmppc_hv_ops;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45 struct kvmppc_ops *kvmppc_pr_ops;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47 
48 
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)49 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50 {
51 	return !!(v->arch.pending_exceptions) ||
52 	       v->requests;
53 }
54 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
56 {
57 	return 1;
58 }
59 
60 /*
61  * Common checks before entering the guest world.  Call with interrupts
62  * disabled.
63  *
64  * returns:
65  *
66  * == 1 if we're ready to go into guest state
67  * <= 0 if we need to go back to the host with return value
68  */
kvmppc_prepare_to_enter(struct kvm_vcpu * vcpu)69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70 {
71 	int r;
72 
73 	WARN_ON(irqs_disabled());
74 	hard_irq_disable();
75 
76 	while (true) {
77 		if (need_resched()) {
78 			local_irq_enable();
79 			cond_resched();
80 			hard_irq_disable();
81 			continue;
82 		}
83 
84 		if (signal_pending(current)) {
85 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
86 			vcpu->run->exit_reason = KVM_EXIT_INTR;
87 			r = -EINTR;
88 			break;
89 		}
90 
91 		vcpu->mode = IN_GUEST_MODE;
92 
93 		/*
94 		 * Reading vcpu->requests must happen after setting vcpu->mode,
95 		 * so we don't miss a request because the requester sees
96 		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 		 * before next entering the guest (and thus doesn't IPI).
98 		 */
99 		smp_mb();
100 
101 		if (vcpu->requests) {
102 			/* Make sure we process requests preemptable */
103 			local_irq_enable();
104 			trace_kvm_check_requests(vcpu);
105 			r = kvmppc_core_check_requests(vcpu);
106 			hard_irq_disable();
107 			if (r > 0)
108 				continue;
109 			break;
110 		}
111 
112 		if (kvmppc_core_prepare_to_enter(vcpu)) {
113 			/* interrupts got enabled in between, so we
114 			   are back at square 1 */
115 			continue;
116 		}
117 
118 		kvm_guest_enter();
119 		return 1;
120 	}
121 
122 	/* return to host */
123 	local_irq_enable();
124 	return r;
125 }
126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
127 
128 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
kvmppc_swab_shared(struct kvm_vcpu * vcpu)129 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
130 {
131 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
132 	int i;
133 
134 	shared->sprg0 = swab64(shared->sprg0);
135 	shared->sprg1 = swab64(shared->sprg1);
136 	shared->sprg2 = swab64(shared->sprg2);
137 	shared->sprg3 = swab64(shared->sprg3);
138 	shared->srr0 = swab64(shared->srr0);
139 	shared->srr1 = swab64(shared->srr1);
140 	shared->dar = swab64(shared->dar);
141 	shared->msr = swab64(shared->msr);
142 	shared->dsisr = swab32(shared->dsisr);
143 	shared->int_pending = swab32(shared->int_pending);
144 	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
145 		shared->sr[i] = swab32(shared->sr[i]);
146 }
147 #endif
148 
kvmppc_kvm_pv(struct kvm_vcpu * vcpu)149 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
150 {
151 	int nr = kvmppc_get_gpr(vcpu, 11);
152 	int r;
153 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
154 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
155 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
156 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
157 	unsigned long r2 = 0;
158 
159 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
160 		/* 32 bit mode */
161 		param1 &= 0xffffffff;
162 		param2 &= 0xffffffff;
163 		param3 &= 0xffffffff;
164 		param4 &= 0xffffffff;
165 	}
166 
167 	switch (nr) {
168 	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
169 	{
170 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 		/* Book3S can be little endian, find it out here */
172 		int shared_big_endian = true;
173 		if (vcpu->arch.intr_msr & MSR_LE)
174 			shared_big_endian = false;
175 		if (shared_big_endian != vcpu->arch.shared_big_endian)
176 			kvmppc_swab_shared(vcpu);
177 		vcpu->arch.shared_big_endian = shared_big_endian;
178 #endif
179 
180 		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
181 			/*
182 			 * Older versions of the Linux magic page code had
183 			 * a bug where they would map their trampoline code
184 			 * NX. If that's the case, remove !PR NX capability.
185 			 */
186 			vcpu->arch.disable_kernel_nx = true;
187 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
188 		}
189 
190 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
192 
193 #ifdef CONFIG_PPC_64K_PAGES
194 		/*
195 		 * Make sure our 4k magic page is in the same window of a 64k
196 		 * page within the guest and within the host's page.
197 		 */
198 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
199 		    ((ulong)vcpu->arch.shared & 0xf000)) {
200 			void *old_shared = vcpu->arch.shared;
201 			ulong shared = (ulong)vcpu->arch.shared;
202 			void *new_shared;
203 
204 			shared &= PAGE_MASK;
205 			shared |= vcpu->arch.magic_page_pa & 0xf000;
206 			new_shared = (void*)shared;
207 			memcpy(new_shared, old_shared, 0x1000);
208 			vcpu->arch.shared = new_shared;
209 		}
210 #endif
211 
212 		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
213 
214 		r = EV_SUCCESS;
215 		break;
216 	}
217 	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
218 		r = EV_SUCCESS;
219 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
220 		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
221 #endif
222 
223 		/* Second return value is in r4 */
224 		break;
225 	case EV_HCALL_TOKEN(EV_IDLE):
226 		r = EV_SUCCESS;
227 		kvm_vcpu_block(vcpu);
228 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
229 		break;
230 	default:
231 		r = EV_UNIMPLEMENTED;
232 		break;
233 	}
234 
235 	kvmppc_set_gpr(vcpu, 4, r2);
236 
237 	return r;
238 }
239 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
240 
kvmppc_sanity_check(struct kvm_vcpu * vcpu)241 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
242 {
243 	int r = false;
244 
245 	/* We have to know what CPU to virtualize */
246 	if (!vcpu->arch.pvr)
247 		goto out;
248 
249 	/* PAPR only works with book3s_64 */
250 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
251 		goto out;
252 
253 	/* HV KVM can only do PAPR mode for now */
254 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
255 		goto out;
256 
257 #ifdef CONFIG_KVM_BOOKE_HV
258 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
259 		goto out;
260 #endif
261 
262 	r = true;
263 
264 out:
265 	vcpu->arch.sane = r;
266 	return r ? 0 : -EINVAL;
267 }
268 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
269 
kvmppc_emulate_mmio(struct kvm_run * run,struct kvm_vcpu * vcpu)270 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
271 {
272 	enum emulation_result er;
273 	int r;
274 
275 	er = kvmppc_emulate_loadstore(vcpu);
276 	switch (er) {
277 	case EMULATE_DONE:
278 		/* Future optimization: only reload non-volatiles if they were
279 		 * actually modified. */
280 		r = RESUME_GUEST_NV;
281 		break;
282 	case EMULATE_AGAIN:
283 		r = RESUME_GUEST;
284 		break;
285 	case EMULATE_DO_MMIO:
286 		run->exit_reason = KVM_EXIT_MMIO;
287 		/* We must reload nonvolatiles because "update" load/store
288 		 * instructions modify register state. */
289 		/* Future optimization: only reload non-volatiles if they were
290 		 * actually modified. */
291 		r = RESUME_HOST_NV;
292 		break;
293 	case EMULATE_FAIL:
294 	{
295 		u32 last_inst;
296 
297 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
298 		/* XXX Deliver Program interrupt to guest. */
299 		pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
300 		r = RESUME_HOST;
301 		break;
302 	}
303 	default:
304 		WARN_ON(1);
305 		r = RESUME_GUEST;
306 	}
307 
308 	return r;
309 }
310 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
311 
kvmppc_st(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)312 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
313 	      bool data)
314 {
315 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
316 	struct kvmppc_pte pte;
317 	int r;
318 
319 	vcpu->stat.st++;
320 
321 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
322 			 XLATE_WRITE, &pte);
323 	if (r < 0)
324 		return r;
325 
326 	*eaddr = pte.raddr;
327 
328 	if (!pte.may_write)
329 		return -EPERM;
330 
331 	/* Magic page override */
332 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
333 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
334 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
335 		void *magic = vcpu->arch.shared;
336 		magic += pte.eaddr & 0xfff;
337 		memcpy(magic, ptr, size);
338 		return EMULATE_DONE;
339 	}
340 
341 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
342 		return EMULATE_DO_MMIO;
343 
344 	return EMULATE_DONE;
345 }
346 EXPORT_SYMBOL_GPL(kvmppc_st);
347 
kvmppc_ld(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)348 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349 		      bool data)
350 {
351 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352 	struct kvmppc_pte pte;
353 	int rc;
354 
355 	vcpu->stat.ld++;
356 
357 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
358 			  XLATE_READ, &pte);
359 	if (rc)
360 		return rc;
361 
362 	*eaddr = pte.raddr;
363 
364 	if (!pte.may_read)
365 		return -EPERM;
366 
367 	if (!data && !pte.may_execute)
368 		return -ENOEXEC;
369 
370 	/* Magic page override */
371 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
372 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
373 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
374 		void *magic = vcpu->arch.shared;
375 		magic += pte.eaddr & 0xfff;
376 		memcpy(ptr, magic, size);
377 		return EMULATE_DONE;
378 	}
379 
380 	if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
381 		return EMULATE_DO_MMIO;
382 
383 	return EMULATE_DONE;
384 }
385 EXPORT_SYMBOL_GPL(kvmppc_ld);
386 
kvm_arch_hardware_enable(void)387 int kvm_arch_hardware_enable(void)
388 {
389 	return 0;
390 }
391 
kvm_arch_hardware_setup(void)392 int kvm_arch_hardware_setup(void)
393 {
394 	return 0;
395 }
396 
kvm_arch_check_processor_compat(void * rtn)397 void kvm_arch_check_processor_compat(void *rtn)
398 {
399 	*(int *)rtn = kvmppc_core_check_processor_compat();
400 }
401 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)402 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
403 {
404 	struct kvmppc_ops *kvm_ops = NULL;
405 	/*
406 	 * if we have both HV and PR enabled, default is HV
407 	 */
408 	if (type == 0) {
409 		if (kvmppc_hv_ops)
410 			kvm_ops = kvmppc_hv_ops;
411 		else
412 			kvm_ops = kvmppc_pr_ops;
413 		if (!kvm_ops)
414 			goto err_out;
415 	} else	if (type == KVM_VM_PPC_HV) {
416 		if (!kvmppc_hv_ops)
417 			goto err_out;
418 		kvm_ops = kvmppc_hv_ops;
419 	} else if (type == KVM_VM_PPC_PR) {
420 		if (!kvmppc_pr_ops)
421 			goto err_out;
422 		kvm_ops = kvmppc_pr_ops;
423 	} else
424 		goto err_out;
425 
426 	if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
427 		return -ENOENT;
428 
429 	kvm->arch.kvm_ops = kvm_ops;
430 	return kvmppc_core_init_vm(kvm);
431 err_out:
432 	return -EINVAL;
433 }
434 
kvm_arch_destroy_vm(struct kvm * kvm)435 void kvm_arch_destroy_vm(struct kvm *kvm)
436 {
437 	unsigned int i;
438 	struct kvm_vcpu *vcpu;
439 
440 	kvm_for_each_vcpu(i, vcpu, kvm)
441 		kvm_arch_vcpu_free(vcpu);
442 
443 	mutex_lock(&kvm->lock);
444 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
445 		kvm->vcpus[i] = NULL;
446 
447 	atomic_set(&kvm->online_vcpus, 0);
448 
449 	kvmppc_core_destroy_vm(kvm);
450 
451 	mutex_unlock(&kvm->lock);
452 
453 	/* drop the module reference */
454 	module_put(kvm->arch.kvm_ops->owner);
455 }
456 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)457 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
458 {
459 	int r;
460 	/* Assume we're using HV mode when the HV module is loaded */
461 	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
462 
463 	if (kvm) {
464 		/*
465 		 * Hooray - we know which VM type we're running on. Depend on
466 		 * that rather than the guess above.
467 		 */
468 		hv_enabled = is_kvmppc_hv_enabled(kvm);
469 	}
470 
471 	switch (ext) {
472 #ifdef CONFIG_BOOKE
473 	case KVM_CAP_PPC_BOOKE_SREGS:
474 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
475 	case KVM_CAP_PPC_EPR:
476 #else
477 	case KVM_CAP_PPC_SEGSTATE:
478 	case KVM_CAP_PPC_HIOR:
479 	case KVM_CAP_PPC_PAPR:
480 #endif
481 	case KVM_CAP_PPC_UNSET_IRQ:
482 	case KVM_CAP_PPC_IRQ_LEVEL:
483 	case KVM_CAP_ENABLE_CAP:
484 	case KVM_CAP_ENABLE_CAP_VM:
485 	case KVM_CAP_ONE_REG:
486 	case KVM_CAP_IOEVENTFD:
487 	case KVM_CAP_DEVICE_CTRL:
488 		r = 1;
489 		break;
490 	case KVM_CAP_PPC_PAIRED_SINGLES:
491 	case KVM_CAP_PPC_OSI:
492 	case KVM_CAP_PPC_GET_PVINFO:
493 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
494 	case KVM_CAP_SW_TLB:
495 #endif
496 		/* We support this only for PR */
497 		r = !hv_enabled;
498 		break;
499 #ifdef CONFIG_KVM_MMIO
500 	case KVM_CAP_COALESCED_MMIO:
501 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
502 		break;
503 #endif
504 #ifdef CONFIG_KVM_MPIC
505 	case KVM_CAP_IRQ_MPIC:
506 		r = 1;
507 		break;
508 #endif
509 
510 #ifdef CONFIG_PPC_BOOK3S_64
511 	case KVM_CAP_SPAPR_TCE:
512 	case KVM_CAP_PPC_ALLOC_HTAB:
513 	case KVM_CAP_PPC_RTAS:
514 	case KVM_CAP_PPC_FIXUP_HCALL:
515 	case KVM_CAP_PPC_ENABLE_HCALL:
516 #ifdef CONFIG_KVM_XICS
517 	case KVM_CAP_IRQ_XICS:
518 #endif
519 		r = 1;
520 		break;
521 #endif /* CONFIG_PPC_BOOK3S_64 */
522 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
523 	case KVM_CAP_PPC_SMT:
524 		if (hv_enabled)
525 			r = threads_per_subcore;
526 		else
527 			r = 0;
528 		break;
529 	case KVM_CAP_PPC_RMA:
530 		r = hv_enabled;
531 		/* PPC970 requires an RMA */
532 		if (r && cpu_has_feature(CPU_FTR_ARCH_201))
533 			r = 2;
534 		break;
535 #endif
536 	case KVM_CAP_SYNC_MMU:
537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538 		if (hv_enabled)
539 			r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
540 		else
541 			r = 0;
542 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
543 		r = 1;
544 #else
545 		r = 0;
546 #endif
547 		break;
548 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
549 	case KVM_CAP_PPC_HTAB_FD:
550 		r = hv_enabled;
551 		break;
552 #endif
553 	case KVM_CAP_NR_VCPUS:
554 		/*
555 		 * Recommending a number of CPUs is somewhat arbitrary; we
556 		 * return the number of present CPUs for -HV (since a host
557 		 * will have secondary threads "offline"), and for other KVM
558 		 * implementations just count online CPUs.
559 		 */
560 		if (hv_enabled)
561 			r = num_present_cpus();
562 		else
563 			r = num_online_cpus();
564 		break;
565 	case KVM_CAP_MAX_VCPUS:
566 		r = KVM_MAX_VCPUS;
567 		break;
568 #ifdef CONFIG_PPC_BOOK3S_64
569 	case KVM_CAP_PPC_GET_SMMU_INFO:
570 		r = 1;
571 		break;
572 #endif
573 	default:
574 		r = 0;
575 		break;
576 	}
577 	return r;
578 
579 }
580 
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)581 long kvm_arch_dev_ioctl(struct file *filp,
582                         unsigned int ioctl, unsigned long arg)
583 {
584 	return -EINVAL;
585 }
586 
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * free,struct kvm_memory_slot * dont)587 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
588 			   struct kvm_memory_slot *dont)
589 {
590 	kvmppc_core_free_memslot(kvm, free, dont);
591 }
592 
kvm_arch_create_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned long npages)593 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
594 			    unsigned long npages)
595 {
596 	return kvmppc_core_create_memslot(kvm, slot, npages);
597 }
598 
kvm_arch_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)599 int kvm_arch_prepare_memory_region(struct kvm *kvm,
600 				   struct kvm_memory_slot *memslot,
601 				   struct kvm_userspace_memory_region *mem,
602 				   enum kvm_mr_change change)
603 {
604 	return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
605 }
606 
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,enum kvm_mr_change change)607 void kvm_arch_commit_memory_region(struct kvm *kvm,
608 				   struct kvm_userspace_memory_region *mem,
609 				   const struct kvm_memory_slot *old,
610 				   enum kvm_mr_change change)
611 {
612 	kvmppc_core_commit_memory_region(kvm, mem, old);
613 }
614 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)615 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
616 				   struct kvm_memory_slot *slot)
617 {
618 	kvmppc_core_flush_memslot(kvm, slot);
619 }
620 
kvm_arch_vcpu_create(struct kvm * kvm,unsigned int id)621 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
622 {
623 	struct kvm_vcpu *vcpu;
624 	vcpu = kvmppc_core_vcpu_create(kvm, id);
625 	if (!IS_ERR(vcpu)) {
626 		vcpu->arch.wqp = &vcpu->wq;
627 		kvmppc_create_vcpu_debugfs(vcpu, id);
628 	}
629 	return vcpu;
630 }
631 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)632 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
633 {
634 	return 0;
635 }
636 
kvm_arch_vcpu_free(struct kvm_vcpu * vcpu)637 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
638 {
639 	/* Make sure we're not using the vcpu anymore */
640 	hrtimer_cancel(&vcpu->arch.dec_timer);
641 
642 	kvmppc_remove_vcpu_debugfs(vcpu);
643 
644 	switch (vcpu->arch.irq_type) {
645 	case KVMPPC_IRQ_MPIC:
646 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
647 		break;
648 	case KVMPPC_IRQ_XICS:
649 		kvmppc_xics_free_icp(vcpu);
650 		break;
651 	}
652 
653 	kvmppc_core_vcpu_free(vcpu);
654 }
655 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)656 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
657 {
658 	kvm_arch_vcpu_free(vcpu);
659 }
660 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)661 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
662 {
663 	return kvmppc_core_pending_dec(vcpu);
664 }
665 
kvmppc_decrementer_wakeup(struct hrtimer * timer)666 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
667 {
668 	struct kvm_vcpu *vcpu;
669 
670 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
671 	kvmppc_decrementer_func(vcpu);
672 
673 	return HRTIMER_NORESTART;
674 }
675 
kvm_arch_vcpu_init(struct kvm_vcpu * vcpu)676 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
677 {
678 	int ret;
679 
680 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
681 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
682 	vcpu->arch.dec_expires = ~(u64)0;
683 
684 #ifdef CONFIG_KVM_EXIT_TIMING
685 	mutex_init(&vcpu->arch.exit_timing_lock);
686 #endif
687 	ret = kvmppc_subarch_vcpu_init(vcpu);
688 	return ret;
689 }
690 
kvm_arch_vcpu_uninit(struct kvm_vcpu * vcpu)691 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
692 {
693 	kvmppc_mmu_destroy(vcpu);
694 	kvmppc_subarch_vcpu_uninit(vcpu);
695 }
696 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)697 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
698 {
699 #ifdef CONFIG_BOOKE
700 	/*
701 	 * vrsave (formerly usprg0) isn't used by Linux, but may
702 	 * be used by the guest.
703 	 *
704 	 * On non-booke this is associated with Altivec and
705 	 * is handled by code in book3s.c.
706 	 */
707 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
708 #endif
709 	kvmppc_core_vcpu_load(vcpu, cpu);
710 }
711 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)712 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
713 {
714 	kvmppc_core_vcpu_put(vcpu);
715 #ifdef CONFIG_BOOKE
716 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
717 #endif
718 }
719 
kvmppc_complete_mmio_load(struct kvm_vcpu * vcpu,struct kvm_run * run)720 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
721                                       struct kvm_run *run)
722 {
723 	u64 uninitialized_var(gpr);
724 
725 	if (run->mmio.len > sizeof(gpr)) {
726 		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
727 		return;
728 	}
729 
730 	if (vcpu->arch.mmio_is_bigendian) {
731 		switch (run->mmio.len) {
732 		case 8: gpr = *(u64 *)run->mmio.data; break;
733 		case 4: gpr = *(u32 *)run->mmio.data; break;
734 		case 2: gpr = *(u16 *)run->mmio.data; break;
735 		case 1: gpr = *(u8 *)run->mmio.data; break;
736 		}
737 	} else {
738 		/* Convert BE data from userland back to LE. */
739 		switch (run->mmio.len) {
740 		case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
741 		case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
742 		case 1: gpr = *(u8 *)run->mmio.data; break;
743 		}
744 	}
745 
746 	if (vcpu->arch.mmio_sign_extend) {
747 		switch (run->mmio.len) {
748 #ifdef CONFIG_PPC64
749 		case 4:
750 			gpr = (s64)(s32)gpr;
751 			break;
752 #endif
753 		case 2:
754 			gpr = (s64)(s16)gpr;
755 			break;
756 		case 1:
757 			gpr = (s64)(s8)gpr;
758 			break;
759 		}
760 	}
761 
762 	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
763 
764 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
765 	case KVM_MMIO_REG_GPR:
766 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
767 		break;
768 	case KVM_MMIO_REG_FPR:
769 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
770 		break;
771 #ifdef CONFIG_PPC_BOOK3S
772 	case KVM_MMIO_REG_QPR:
773 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
774 		break;
775 	case KVM_MMIO_REG_FQPR:
776 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
777 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
778 		break;
779 #endif
780 	default:
781 		BUG();
782 	}
783 }
784 
kvmppc_handle_load(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)785 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
786 		       unsigned int rt, unsigned int bytes,
787 		       int is_default_endian)
788 {
789 	int idx, ret;
790 	int is_bigendian;
791 
792 	if (kvmppc_need_byteswap(vcpu)) {
793 		/* Default endianness is "little endian". */
794 		is_bigendian = !is_default_endian;
795 	} else {
796 		/* Default endianness is "big endian". */
797 		is_bigendian = is_default_endian;
798 	}
799 
800 	if (bytes > sizeof(run->mmio.data)) {
801 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
802 		       run->mmio.len);
803 	}
804 
805 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
806 	run->mmio.len = bytes;
807 	run->mmio.is_write = 0;
808 
809 	vcpu->arch.io_gpr = rt;
810 	vcpu->arch.mmio_is_bigendian = is_bigendian;
811 	vcpu->mmio_needed = 1;
812 	vcpu->mmio_is_write = 0;
813 	vcpu->arch.mmio_sign_extend = 0;
814 
815 	idx = srcu_read_lock(&vcpu->kvm->srcu);
816 
817 	ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
818 			      bytes, &run->mmio.data);
819 
820 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
821 
822 	if (!ret) {
823 		kvmppc_complete_mmio_load(vcpu, run);
824 		vcpu->mmio_needed = 0;
825 		return EMULATE_DONE;
826 	}
827 
828 	return EMULATE_DO_MMIO;
829 }
830 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
831 
832 /* Same as above, but sign extends */
kvmppc_handle_loads(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)833 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
834 			unsigned int rt, unsigned int bytes,
835 			int is_default_endian)
836 {
837 	int r;
838 
839 	vcpu->arch.mmio_sign_extend = 1;
840 	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
841 
842 	return r;
843 }
844 
kvmppc_handle_store(struct kvm_run * run,struct kvm_vcpu * vcpu,u64 val,unsigned int bytes,int is_default_endian)845 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
846 			u64 val, unsigned int bytes, int is_default_endian)
847 {
848 	void *data = run->mmio.data;
849 	int idx, ret;
850 	int is_bigendian;
851 
852 	if (kvmppc_need_byteswap(vcpu)) {
853 		/* Default endianness is "little endian". */
854 		is_bigendian = !is_default_endian;
855 	} else {
856 		/* Default endianness is "big endian". */
857 		is_bigendian = is_default_endian;
858 	}
859 
860 	if (bytes > sizeof(run->mmio.data)) {
861 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
862 		       run->mmio.len);
863 	}
864 
865 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
866 	run->mmio.len = bytes;
867 	run->mmio.is_write = 1;
868 	vcpu->mmio_needed = 1;
869 	vcpu->mmio_is_write = 1;
870 
871 	/* Store the value at the lowest bytes in 'data'. */
872 	if (is_bigendian) {
873 		switch (bytes) {
874 		case 8: *(u64 *)data = val; break;
875 		case 4: *(u32 *)data = val; break;
876 		case 2: *(u16 *)data = val; break;
877 		case 1: *(u8  *)data = val; break;
878 		}
879 	} else {
880 		/* Store LE value into 'data'. */
881 		switch (bytes) {
882 		case 4: st_le32(data, val); break;
883 		case 2: st_le16(data, val); break;
884 		case 1: *(u8 *)data = val; break;
885 		}
886 	}
887 
888 	idx = srcu_read_lock(&vcpu->kvm->srcu);
889 
890 	ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
891 			       bytes, &run->mmio.data);
892 
893 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
894 
895 	if (!ret) {
896 		vcpu->mmio_needed = 0;
897 		return EMULATE_DONE;
898 	}
899 
900 	return EMULATE_DO_MMIO;
901 }
902 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
903 
kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)904 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
905 {
906 	int r = 0;
907 	union kvmppc_one_reg val;
908 	int size;
909 
910 	size = one_reg_size(reg->id);
911 	if (size > sizeof(val))
912 		return -EINVAL;
913 
914 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
915 	if (r == -EINVAL) {
916 		r = 0;
917 		switch (reg->id) {
918 #ifdef CONFIG_ALTIVEC
919 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
920 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
921 				r = -ENXIO;
922 				break;
923 			}
924 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
925 			break;
926 		case KVM_REG_PPC_VSCR:
927 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
928 				r = -ENXIO;
929 				break;
930 			}
931 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
932 			break;
933 		case KVM_REG_PPC_VRSAVE:
934 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
935 			break;
936 #endif /* CONFIG_ALTIVEC */
937 		default:
938 			r = -EINVAL;
939 			break;
940 		}
941 	}
942 
943 	if (r)
944 		return r;
945 
946 	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
947 		r = -EFAULT;
948 
949 	return r;
950 }
951 
kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)952 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
953 {
954 	int r;
955 	union kvmppc_one_reg val;
956 	int size;
957 
958 	size = one_reg_size(reg->id);
959 	if (size > sizeof(val))
960 		return -EINVAL;
961 
962 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
963 		return -EFAULT;
964 
965 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
966 	if (r == -EINVAL) {
967 		r = 0;
968 		switch (reg->id) {
969 #ifdef CONFIG_ALTIVEC
970 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
971 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
972 				r = -ENXIO;
973 				break;
974 			}
975 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
976 			break;
977 		case KVM_REG_PPC_VSCR:
978 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
979 				r = -ENXIO;
980 				break;
981 			}
982 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
983 			break;
984 		case KVM_REG_PPC_VRSAVE:
985 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
986 				r = -ENXIO;
987 				break;
988 			}
989 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
990 			break;
991 #endif /* CONFIG_ALTIVEC */
992 		default:
993 			r = -EINVAL;
994 			break;
995 		}
996 	}
997 
998 	return r;
999 }
1000 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu,struct kvm_run * run)1001 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1002 {
1003 	int r;
1004 	sigset_t sigsaved;
1005 
1006 	if (vcpu->sigset_active)
1007 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1008 
1009 	if (vcpu->mmio_needed) {
1010 		if (!vcpu->mmio_is_write)
1011 			kvmppc_complete_mmio_load(vcpu, run);
1012 		vcpu->mmio_needed = 0;
1013 	} else if (vcpu->arch.osi_needed) {
1014 		u64 *gprs = run->osi.gprs;
1015 		int i;
1016 
1017 		for (i = 0; i < 32; i++)
1018 			kvmppc_set_gpr(vcpu, i, gprs[i]);
1019 		vcpu->arch.osi_needed = 0;
1020 	} else if (vcpu->arch.hcall_needed) {
1021 		int i;
1022 
1023 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1024 		for (i = 0; i < 9; ++i)
1025 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1026 		vcpu->arch.hcall_needed = 0;
1027 #ifdef CONFIG_BOOKE
1028 	} else if (vcpu->arch.epr_needed) {
1029 		kvmppc_set_epr(vcpu, run->epr.epr);
1030 		vcpu->arch.epr_needed = 0;
1031 #endif
1032 	}
1033 
1034 	r = kvmppc_vcpu_run(run, vcpu);
1035 
1036 	if (vcpu->sigset_active)
1037 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1038 
1039 	return r;
1040 }
1041 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1042 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1043 {
1044 	if (irq->irq == KVM_INTERRUPT_UNSET) {
1045 		kvmppc_core_dequeue_external(vcpu);
1046 		return 0;
1047 	}
1048 
1049 	kvmppc_core_queue_external(vcpu, irq);
1050 
1051 	kvm_vcpu_kick(vcpu);
1052 
1053 	return 0;
1054 }
1055 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)1056 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1057 				     struct kvm_enable_cap *cap)
1058 {
1059 	int r;
1060 
1061 	if (cap->flags)
1062 		return -EINVAL;
1063 
1064 	switch (cap->cap) {
1065 	case KVM_CAP_PPC_OSI:
1066 		r = 0;
1067 		vcpu->arch.osi_enabled = true;
1068 		break;
1069 	case KVM_CAP_PPC_PAPR:
1070 		r = 0;
1071 		vcpu->arch.papr_enabled = true;
1072 		break;
1073 	case KVM_CAP_PPC_EPR:
1074 		r = 0;
1075 		if (cap->args[0])
1076 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1077 		else
1078 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1079 		break;
1080 #ifdef CONFIG_BOOKE
1081 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1082 		r = 0;
1083 		vcpu->arch.watchdog_enabled = true;
1084 		break;
1085 #endif
1086 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1087 	case KVM_CAP_SW_TLB: {
1088 		struct kvm_config_tlb cfg;
1089 		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1090 
1091 		r = -EFAULT;
1092 		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1093 			break;
1094 
1095 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1096 		break;
1097 	}
1098 #endif
1099 #ifdef CONFIG_KVM_MPIC
1100 	case KVM_CAP_IRQ_MPIC: {
1101 		struct fd f;
1102 		struct kvm_device *dev;
1103 
1104 		r = -EBADF;
1105 		f = fdget(cap->args[0]);
1106 		if (!f.file)
1107 			break;
1108 
1109 		r = -EPERM;
1110 		dev = kvm_device_from_filp(f.file);
1111 		if (dev)
1112 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1113 
1114 		fdput(f);
1115 		break;
1116 	}
1117 #endif
1118 #ifdef CONFIG_KVM_XICS
1119 	case KVM_CAP_IRQ_XICS: {
1120 		struct fd f;
1121 		struct kvm_device *dev;
1122 
1123 		r = -EBADF;
1124 		f = fdget(cap->args[0]);
1125 		if (!f.file)
1126 			break;
1127 
1128 		r = -EPERM;
1129 		dev = kvm_device_from_filp(f.file);
1130 		if (dev)
1131 			r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1132 
1133 		fdput(f);
1134 		break;
1135 	}
1136 #endif /* CONFIG_KVM_XICS */
1137 	default:
1138 		r = -EINVAL;
1139 		break;
1140 	}
1141 
1142 	if (!r)
1143 		r = kvmppc_sanity_check(vcpu);
1144 
1145 	return r;
1146 }
1147 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)1148 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1149                                     struct kvm_mp_state *mp_state)
1150 {
1151 	return -EINVAL;
1152 }
1153 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)1154 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1155                                     struct kvm_mp_state *mp_state)
1156 {
1157 	return -EINVAL;
1158 }
1159 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1160 long kvm_arch_vcpu_ioctl(struct file *filp,
1161                          unsigned int ioctl, unsigned long arg)
1162 {
1163 	struct kvm_vcpu *vcpu = filp->private_data;
1164 	void __user *argp = (void __user *)arg;
1165 	long r;
1166 
1167 	switch (ioctl) {
1168 	case KVM_INTERRUPT: {
1169 		struct kvm_interrupt irq;
1170 		r = -EFAULT;
1171 		if (copy_from_user(&irq, argp, sizeof(irq)))
1172 			goto out;
1173 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1174 		goto out;
1175 	}
1176 
1177 	case KVM_ENABLE_CAP:
1178 	{
1179 		struct kvm_enable_cap cap;
1180 		r = -EFAULT;
1181 		if (copy_from_user(&cap, argp, sizeof(cap)))
1182 			goto out;
1183 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1184 		break;
1185 	}
1186 
1187 	case KVM_SET_ONE_REG:
1188 	case KVM_GET_ONE_REG:
1189 	{
1190 		struct kvm_one_reg reg;
1191 		r = -EFAULT;
1192 		if (copy_from_user(&reg, argp, sizeof(reg)))
1193 			goto out;
1194 		if (ioctl == KVM_SET_ONE_REG)
1195 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1196 		else
1197 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1198 		break;
1199 	}
1200 
1201 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1202 	case KVM_DIRTY_TLB: {
1203 		struct kvm_dirty_tlb dirty;
1204 		r = -EFAULT;
1205 		if (copy_from_user(&dirty, argp, sizeof(dirty)))
1206 			goto out;
1207 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1208 		break;
1209 	}
1210 #endif
1211 	default:
1212 		r = -EINVAL;
1213 	}
1214 
1215 out:
1216 	return r;
1217 }
1218 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)1219 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1220 {
1221 	return VM_FAULT_SIGBUS;
1222 }
1223 
kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo * pvinfo)1224 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1225 {
1226 	u32 inst_nop = 0x60000000;
1227 #ifdef CONFIG_KVM_BOOKE_HV
1228 	u32 inst_sc1 = 0x44000022;
1229 	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1230 	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1231 	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1232 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1233 #else
1234 	u32 inst_lis = 0x3c000000;
1235 	u32 inst_ori = 0x60000000;
1236 	u32 inst_sc = 0x44000002;
1237 	u32 inst_imm_mask = 0xffff;
1238 
1239 	/*
1240 	 * The hypercall to get into KVM from within guest context is as
1241 	 * follows:
1242 	 *
1243 	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
1244 	 *    ori r0, KVM_SC_MAGIC_R0@l
1245 	 *    sc
1246 	 *    nop
1247 	 */
1248 	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1249 	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1250 	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1251 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1252 #endif
1253 
1254 	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1255 
1256 	return 0;
1257 }
1258 
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)1259 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1260 			  bool line_status)
1261 {
1262 	if (!irqchip_in_kernel(kvm))
1263 		return -ENXIO;
1264 
1265 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1266 					irq_event->irq, irq_event->level,
1267 					line_status);
1268 	return 0;
1269 }
1270 
1271 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)1272 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1273 				   struct kvm_enable_cap *cap)
1274 {
1275 	int r;
1276 
1277 	if (cap->flags)
1278 		return -EINVAL;
1279 
1280 	switch (cap->cap) {
1281 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1282 	case KVM_CAP_PPC_ENABLE_HCALL: {
1283 		unsigned long hcall = cap->args[0];
1284 
1285 		r = -EINVAL;
1286 		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1287 		    cap->args[1] > 1)
1288 			break;
1289 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1290 			break;
1291 		if (cap->args[1])
1292 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1293 		else
1294 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1295 		r = 0;
1296 		break;
1297 	}
1298 #endif
1299 	default:
1300 		r = -EINVAL;
1301 		break;
1302 	}
1303 
1304 	return r;
1305 }
1306 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1307 long kvm_arch_vm_ioctl(struct file *filp,
1308                        unsigned int ioctl, unsigned long arg)
1309 {
1310 	struct kvm *kvm __maybe_unused = filp->private_data;
1311 	void __user *argp = (void __user *)arg;
1312 	long r;
1313 
1314 	switch (ioctl) {
1315 	case KVM_PPC_GET_PVINFO: {
1316 		struct kvm_ppc_pvinfo pvinfo;
1317 		memset(&pvinfo, 0, sizeof(pvinfo));
1318 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1319 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1320 			r = -EFAULT;
1321 			goto out;
1322 		}
1323 
1324 		break;
1325 	}
1326 	case KVM_ENABLE_CAP:
1327 	{
1328 		struct kvm_enable_cap cap;
1329 		r = -EFAULT;
1330 		if (copy_from_user(&cap, argp, sizeof(cap)))
1331 			goto out;
1332 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1333 		break;
1334 	}
1335 #ifdef CONFIG_PPC_BOOK3S_64
1336 	case KVM_CREATE_SPAPR_TCE: {
1337 		struct kvm_create_spapr_tce create_tce;
1338 
1339 		r = -EFAULT;
1340 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1341 			goto out;
1342 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1343 		goto out;
1344 	}
1345 	case KVM_PPC_GET_SMMU_INFO: {
1346 		struct kvm_ppc_smmu_info info;
1347 		struct kvm *kvm = filp->private_data;
1348 
1349 		memset(&info, 0, sizeof(info));
1350 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1351 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1352 			r = -EFAULT;
1353 		break;
1354 	}
1355 	case KVM_PPC_RTAS_DEFINE_TOKEN: {
1356 		struct kvm *kvm = filp->private_data;
1357 
1358 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1359 		break;
1360 	}
1361 	default: {
1362 		struct kvm *kvm = filp->private_data;
1363 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1364 	}
1365 #else /* CONFIG_PPC_BOOK3S_64 */
1366 	default:
1367 		r = -ENOTTY;
1368 #endif
1369 	}
1370 out:
1371 	return r;
1372 }
1373 
1374 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1375 static unsigned long nr_lpids;
1376 
kvmppc_alloc_lpid(void)1377 long kvmppc_alloc_lpid(void)
1378 {
1379 	long lpid;
1380 
1381 	do {
1382 		lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1383 		if (lpid >= nr_lpids) {
1384 			pr_err("%s: No LPIDs free\n", __func__);
1385 			return -ENOMEM;
1386 		}
1387 	} while (test_and_set_bit(lpid, lpid_inuse));
1388 
1389 	return lpid;
1390 }
1391 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1392 
kvmppc_claim_lpid(long lpid)1393 void kvmppc_claim_lpid(long lpid)
1394 {
1395 	set_bit(lpid, lpid_inuse);
1396 }
1397 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1398 
kvmppc_free_lpid(long lpid)1399 void kvmppc_free_lpid(long lpid)
1400 {
1401 	clear_bit(lpid, lpid_inuse);
1402 }
1403 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1404 
kvmppc_init_lpid(unsigned long nr_lpids_param)1405 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1406 {
1407 	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1408 	memset(lpid_inuse, 0, sizeof(lpid_inuse));
1409 }
1410 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1411 
kvm_arch_init(void * opaque)1412 int kvm_arch_init(void *opaque)
1413 {
1414 	return 0;
1415 }
1416 
1417 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1418