1 /*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26
27 #include <asm/reg.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/hvcall.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
45
46 #include "book3s.h"
47
48 #define CREATE_TRACE_POINTS
49 #include "trace_pr.h"
50
51 /* #define EXIT_DEBUG */
52 /* #define DEBUG_EXT */
53
54 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 ulong msr);
56 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
57
58 /* Some compatibility defines */
59 #ifdef CONFIG_PPC_BOOK3S_32
60 #define MSR_USER32 MSR_USER
61 #define MSR_USER64 MSR_USER
62 #define HW_PAGE_SIZE PAGE_SIZE
63 #endif
64
kvmppc_is_split_real(struct kvm_vcpu * vcpu)65 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
66 {
67 ulong msr = kvmppc_get_msr(vcpu);
68 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
69 }
70
kvmppc_fixup_split_real(struct kvm_vcpu * vcpu)71 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
72 {
73 ulong msr = kvmppc_get_msr(vcpu);
74 ulong pc = kvmppc_get_pc(vcpu);
75
76 /* We are in DR only split real mode */
77 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
78 return;
79
80 /* We have not fixed up the guest already */
81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
82 return;
83
84 /* The code is in fixupable address space */
85 if (pc & SPLIT_HACK_MASK)
86 return;
87
88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
89 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
90 }
91
92 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
93
kvmppc_core_vcpu_load_pr(struct kvm_vcpu * vcpu,int cpu)94 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
95 {
96 #ifdef CONFIG_PPC_BOOK3S_64
97 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
98 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
99 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
100 svcpu->in_use = 0;
101 svcpu_put(svcpu);
102 #endif
103
104 /* Disable AIL if supported */
105 if (cpu_has_feature(CPU_FTR_HVMODE) &&
106 cpu_has_feature(CPU_FTR_ARCH_207S))
107 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
108
109 vcpu->cpu = smp_processor_id();
110 #ifdef CONFIG_PPC_BOOK3S_32
111 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
112 #endif
113
114 if (kvmppc_is_split_real(vcpu))
115 kvmppc_fixup_split_real(vcpu);
116 }
117
kvmppc_core_vcpu_put_pr(struct kvm_vcpu * vcpu)118 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
119 {
120 #ifdef CONFIG_PPC_BOOK3S_64
121 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
122 if (svcpu->in_use) {
123 kvmppc_copy_from_svcpu(vcpu, svcpu);
124 }
125 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
126 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
127 svcpu_put(svcpu);
128 #endif
129
130 if (kvmppc_is_split_real(vcpu))
131 kvmppc_unfixup_split_real(vcpu);
132
133 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
134 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
135
136 /* Enable AIL if supported */
137 if (cpu_has_feature(CPU_FTR_HVMODE) &&
138 cpu_has_feature(CPU_FTR_ARCH_207S))
139 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
140
141 vcpu->cpu = -1;
142 }
143
144 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu * svcpu,struct kvm_vcpu * vcpu)145 void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
146 struct kvm_vcpu *vcpu)
147 {
148 svcpu->gpr[0] = vcpu->arch.gpr[0];
149 svcpu->gpr[1] = vcpu->arch.gpr[1];
150 svcpu->gpr[2] = vcpu->arch.gpr[2];
151 svcpu->gpr[3] = vcpu->arch.gpr[3];
152 svcpu->gpr[4] = vcpu->arch.gpr[4];
153 svcpu->gpr[5] = vcpu->arch.gpr[5];
154 svcpu->gpr[6] = vcpu->arch.gpr[6];
155 svcpu->gpr[7] = vcpu->arch.gpr[7];
156 svcpu->gpr[8] = vcpu->arch.gpr[8];
157 svcpu->gpr[9] = vcpu->arch.gpr[9];
158 svcpu->gpr[10] = vcpu->arch.gpr[10];
159 svcpu->gpr[11] = vcpu->arch.gpr[11];
160 svcpu->gpr[12] = vcpu->arch.gpr[12];
161 svcpu->gpr[13] = vcpu->arch.gpr[13];
162 svcpu->cr = vcpu->arch.cr;
163 svcpu->xer = vcpu->arch.xer;
164 svcpu->ctr = vcpu->arch.ctr;
165 svcpu->lr = vcpu->arch.lr;
166 svcpu->pc = vcpu->arch.pc;
167 #ifdef CONFIG_PPC_BOOK3S_64
168 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
169 #endif
170 /*
171 * Now also save the current time base value. We use this
172 * to find the guest purr and spurr value.
173 */
174 vcpu->arch.entry_tb = get_tb();
175 vcpu->arch.entry_vtb = get_vtb();
176 if (cpu_has_feature(CPU_FTR_ARCH_207S))
177 vcpu->arch.entry_ic = mfspr(SPRN_IC);
178 svcpu->in_use = true;
179 }
180
181 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
kvmppc_copy_from_svcpu(struct kvm_vcpu * vcpu,struct kvmppc_book3s_shadow_vcpu * svcpu)182 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
183 struct kvmppc_book3s_shadow_vcpu *svcpu)
184 {
185 /*
186 * vcpu_put would just call us again because in_use hasn't
187 * been updated yet.
188 */
189 preempt_disable();
190
191 /*
192 * Maybe we were already preempted and synced the svcpu from
193 * our preempt notifiers. Don't bother touching this svcpu then.
194 */
195 if (!svcpu->in_use)
196 goto out;
197
198 vcpu->arch.gpr[0] = svcpu->gpr[0];
199 vcpu->arch.gpr[1] = svcpu->gpr[1];
200 vcpu->arch.gpr[2] = svcpu->gpr[2];
201 vcpu->arch.gpr[3] = svcpu->gpr[3];
202 vcpu->arch.gpr[4] = svcpu->gpr[4];
203 vcpu->arch.gpr[5] = svcpu->gpr[5];
204 vcpu->arch.gpr[6] = svcpu->gpr[6];
205 vcpu->arch.gpr[7] = svcpu->gpr[7];
206 vcpu->arch.gpr[8] = svcpu->gpr[8];
207 vcpu->arch.gpr[9] = svcpu->gpr[9];
208 vcpu->arch.gpr[10] = svcpu->gpr[10];
209 vcpu->arch.gpr[11] = svcpu->gpr[11];
210 vcpu->arch.gpr[12] = svcpu->gpr[12];
211 vcpu->arch.gpr[13] = svcpu->gpr[13];
212 vcpu->arch.cr = svcpu->cr;
213 vcpu->arch.xer = svcpu->xer;
214 vcpu->arch.ctr = svcpu->ctr;
215 vcpu->arch.lr = svcpu->lr;
216 vcpu->arch.pc = svcpu->pc;
217 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
218 vcpu->arch.fault_dar = svcpu->fault_dar;
219 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
220 vcpu->arch.last_inst = svcpu->last_inst;
221 #ifdef CONFIG_PPC_BOOK3S_64
222 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
223 #endif
224 /*
225 * Update purr and spurr using time base on exit.
226 */
227 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
228 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
229 vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb;
230 if (cpu_has_feature(CPU_FTR_ARCH_207S))
231 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
232 svcpu->in_use = false;
233
234 out:
235 preempt_enable();
236 }
237
kvmppc_core_check_requests_pr(struct kvm_vcpu * vcpu)238 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
239 {
240 int r = 1; /* Indicate we want to get back into the guest */
241
242 /* We misuse TLB_FLUSH to indicate that we want to clear
243 all shadow cache entries */
244 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
245 kvmppc_mmu_pte_flush(vcpu, 0, 0);
246
247 return r;
248 }
249
250 /************* MMU Notifiers *************/
do_kvm_unmap_hva(struct kvm * kvm,unsigned long start,unsigned long end)251 static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
252 unsigned long end)
253 {
254 long i;
255 struct kvm_vcpu *vcpu;
256 struct kvm_memslots *slots;
257 struct kvm_memory_slot *memslot;
258
259 slots = kvm_memslots(kvm);
260 kvm_for_each_memslot(memslot, slots) {
261 unsigned long hva_start, hva_end;
262 gfn_t gfn, gfn_end;
263
264 hva_start = max(start, memslot->userspace_addr);
265 hva_end = min(end, memslot->userspace_addr +
266 (memslot->npages << PAGE_SHIFT));
267 if (hva_start >= hva_end)
268 continue;
269 /*
270 * {gfn(page) | page intersects with [hva_start, hva_end)} =
271 * {gfn, gfn+1, ..., gfn_end-1}.
272 */
273 gfn = hva_to_gfn_memslot(hva_start, memslot);
274 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
275 kvm_for_each_vcpu(i, vcpu, kvm)
276 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
277 gfn_end << PAGE_SHIFT);
278 }
279 }
280
kvm_unmap_hva_pr(struct kvm * kvm,unsigned long hva)281 static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
282 {
283 trace_kvm_unmap_hva(hva);
284
285 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
286
287 return 0;
288 }
289
kvm_unmap_hva_range_pr(struct kvm * kvm,unsigned long start,unsigned long end)290 static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
291 unsigned long end)
292 {
293 do_kvm_unmap_hva(kvm, start, end);
294
295 return 0;
296 }
297
kvm_age_hva_pr(struct kvm * kvm,unsigned long start,unsigned long end)298 static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
299 unsigned long end)
300 {
301 /* XXX could be more clever ;) */
302 return 0;
303 }
304
kvm_test_age_hva_pr(struct kvm * kvm,unsigned long hva)305 static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
306 {
307 /* XXX could be more clever ;) */
308 return 0;
309 }
310
kvm_set_spte_hva_pr(struct kvm * kvm,unsigned long hva,pte_t pte)311 static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
312 {
313 /* The page will get remapped properly on its next fault */
314 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
315 }
316
317 /*****************************************/
318
kvmppc_recalc_shadow_msr(struct kvm_vcpu * vcpu)319 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
320 {
321 ulong guest_msr = kvmppc_get_msr(vcpu);
322 ulong smsr = guest_msr;
323
324 /* Guest MSR values */
325 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
326 /* Process MSR values */
327 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
328 /* External providers the guest reserved */
329 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
330 /* 64-bit Process MSR values */
331 #ifdef CONFIG_PPC_BOOK3S_64
332 smsr |= MSR_ISF | MSR_HV;
333 #endif
334 vcpu->arch.shadow_msr = smsr;
335 }
336
kvmppc_set_msr_pr(struct kvm_vcpu * vcpu,u64 msr)337 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
338 {
339 ulong old_msr = kvmppc_get_msr(vcpu);
340
341 #ifdef EXIT_DEBUG
342 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
343 #endif
344
345 msr &= to_book3s(vcpu)->msr_mask;
346 kvmppc_set_msr_fast(vcpu, msr);
347 kvmppc_recalc_shadow_msr(vcpu);
348
349 if (msr & MSR_POW) {
350 if (!vcpu->arch.pending_exceptions) {
351 kvm_vcpu_block(vcpu);
352 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
353 vcpu->stat.halt_wakeup++;
354
355 /* Unset POW bit after we woke up */
356 msr &= ~MSR_POW;
357 kvmppc_set_msr_fast(vcpu, msr);
358 }
359 }
360
361 if (kvmppc_is_split_real(vcpu))
362 kvmppc_fixup_split_real(vcpu);
363 else
364 kvmppc_unfixup_split_real(vcpu);
365
366 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
367 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
368 kvmppc_mmu_flush_segments(vcpu);
369 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
370
371 /* Preload magic page segment when in kernel mode */
372 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
373 struct kvm_vcpu_arch *a = &vcpu->arch;
374
375 if (msr & MSR_DR)
376 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
377 else
378 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
379 }
380 }
381
382 /*
383 * When switching from 32 to 64-bit, we may have a stale 32-bit
384 * magic page around, we need to flush it. Typically 32-bit magic
385 * page will be instanciated when calling into RTAS. Note: We
386 * assume that such transition only happens while in kernel mode,
387 * ie, we never transition from user 32-bit to kernel 64-bit with
388 * a 32-bit magic page around.
389 */
390 if (vcpu->arch.magic_page_pa &&
391 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
392 /* going from RTAS to normal kernel code */
393 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
394 ~0xFFFUL);
395 }
396
397 /* Preload FPU if it's enabled */
398 if (kvmppc_get_msr(vcpu) & MSR_FP)
399 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
400 }
401
kvmppc_set_pvr_pr(struct kvm_vcpu * vcpu,u32 pvr)402 void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
403 {
404 u32 host_pvr;
405
406 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
407 vcpu->arch.pvr = pvr;
408 #ifdef CONFIG_PPC_BOOK3S_64
409 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
410 kvmppc_mmu_book3s_64_init(vcpu);
411 if (!to_book3s(vcpu)->hior_explicit)
412 to_book3s(vcpu)->hior = 0xfff00000;
413 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
414 vcpu->arch.cpu_type = KVM_CPU_3S_64;
415 } else
416 #endif
417 {
418 kvmppc_mmu_book3s_32_init(vcpu);
419 if (!to_book3s(vcpu)->hior_explicit)
420 to_book3s(vcpu)->hior = 0;
421 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
422 vcpu->arch.cpu_type = KVM_CPU_3S_32;
423 }
424
425 kvmppc_sanity_check(vcpu);
426
427 /* If we are in hypervisor level on 970, we can tell the CPU to
428 * treat DCBZ as 32 bytes store */
429 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
430 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
431 !strcmp(cur_cpu_spec->platform, "ppc970"))
432 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
433
434 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
435 really needs them in a VM on Cell and force disable them. */
436 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
437 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
438
439 /*
440 * If they're asking for POWER6 or later, set the flag
441 * indicating that we can do multiple large page sizes
442 * and 1TB segments.
443 * Also set the flag that indicates that tlbie has the large
444 * page bit in the RB operand instead of the instruction.
445 */
446 switch (PVR_VER(pvr)) {
447 case PVR_POWER6:
448 case PVR_POWER7:
449 case PVR_POWER7p:
450 case PVR_POWER8:
451 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
452 BOOK3S_HFLAG_NEW_TLBIE;
453 break;
454 }
455
456 #ifdef CONFIG_PPC_BOOK3S_32
457 /* 32 bit Book3S always has 32 byte dcbz */
458 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
459 #endif
460
461 /* On some CPUs we can execute paired single operations natively */
462 asm ( "mfpvr %0" : "=r"(host_pvr));
463 switch (host_pvr) {
464 case 0x00080200: /* lonestar 2.0 */
465 case 0x00088202: /* lonestar 2.2 */
466 case 0x70000100: /* gekko 1.0 */
467 case 0x00080100: /* gekko 2.0 */
468 case 0x00083203: /* gekko 2.3a */
469 case 0x00083213: /* gekko 2.3b */
470 case 0x00083204: /* gekko 2.4 */
471 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
472 case 0x00087200: /* broadway */
473 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
474 /* Enable HID2.PSE - in case we need it later */
475 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
476 }
477 }
478
479 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
480 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
481 * emulate 32 bytes dcbz length.
482 *
483 * The Book3s_64 inventors also realized this case and implemented a special bit
484 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
485 *
486 * My approach here is to patch the dcbz instruction on executing pages.
487 */
kvmppc_patch_dcbz(struct kvm_vcpu * vcpu,struct kvmppc_pte * pte)488 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
489 {
490 struct page *hpage;
491 u64 hpage_offset;
492 u32 *page;
493 int i;
494
495 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
496 if (is_error_page(hpage))
497 return;
498
499 hpage_offset = pte->raddr & ~PAGE_MASK;
500 hpage_offset &= ~0xFFFULL;
501 hpage_offset /= 4;
502
503 get_page(hpage);
504 page = kmap_atomic(hpage);
505
506 /* patch dcbz into reserved instruction, so we trap */
507 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
508 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
509 page[i] &= cpu_to_be32(0xfffffff7);
510
511 kunmap_atomic(page);
512 put_page(hpage);
513 }
514
kvmppc_visible_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)515 static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
516 {
517 ulong mp_pa = vcpu->arch.magic_page_pa;
518
519 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
520 mp_pa = (uint32_t)mp_pa;
521
522 gpa &= ~0xFFFULL;
523 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
524 return 1;
525 }
526
527 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
528 }
529
kvmppc_handle_pagefault(struct kvm_run * run,struct kvm_vcpu * vcpu,ulong eaddr,int vec)530 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
531 ulong eaddr, int vec)
532 {
533 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
534 bool iswrite = false;
535 int r = RESUME_GUEST;
536 int relocated;
537 int page_found = 0;
538 struct kvmppc_pte pte;
539 bool is_mmio = false;
540 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
541 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
542 u64 vsid;
543
544 relocated = data ? dr : ir;
545 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
546 iswrite = true;
547
548 /* Resolve real address if translation turned on */
549 if (relocated) {
550 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
551 } else {
552 pte.may_execute = true;
553 pte.may_read = true;
554 pte.may_write = true;
555 pte.raddr = eaddr & KVM_PAM;
556 pte.eaddr = eaddr;
557 pte.vpage = eaddr >> 12;
558 pte.page_size = MMU_PAGE_64K;
559 }
560
561 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
562 case 0:
563 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
564 break;
565 case MSR_DR:
566 if (!data &&
567 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
568 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
569 pte.raddr &= ~SPLIT_HACK_MASK;
570 /* fall through */
571 case MSR_IR:
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
573
574 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
576 else
577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
578 pte.vpage |= vsid;
579
580 if (vsid == -1)
581 page_found = -EINVAL;
582 break;
583 }
584
585 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
586 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
587 /*
588 * If we do the dcbz hack, we have to NX on every execution,
589 * so we can patch the executing code. This renders our guest
590 * NX-less.
591 */
592 pte.may_execute = !data;
593 }
594
595 if (page_found == -ENOENT) {
596 /* Page not found in guest PTE entries */
597 u64 ssrr1 = vcpu->arch.shadow_srr1;
598 u64 msr = kvmppc_get_msr(vcpu);
599 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
600 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
601 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
602 kvmppc_book3s_queue_irqprio(vcpu, vec);
603 } else if (page_found == -EPERM) {
604 /* Storage protection */
605 u32 dsisr = vcpu->arch.fault_dsisr;
606 u64 ssrr1 = vcpu->arch.shadow_srr1;
607 u64 msr = kvmppc_get_msr(vcpu);
608 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
609 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
610 kvmppc_set_dsisr(vcpu, dsisr);
611 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
612 kvmppc_book3s_queue_irqprio(vcpu, vec);
613 } else if (page_found == -EINVAL) {
614 /* Page not found in guest SLB */
615 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
616 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
617 } else if (!is_mmio &&
618 kvmppc_visible_gpa(vcpu, pte.raddr)) {
619 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
620 /*
621 * There is already a host HPTE there, presumably
622 * a read-only one for a page the guest thinks
623 * is writable, so get rid of it first.
624 */
625 kvmppc_mmu_unmap_page(vcpu, &pte);
626 }
627 /* The guest's PTE is not mapped yet. Map on the host */
628 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
629 /* Exit KVM if mapping failed */
630 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
631 return RESUME_HOST;
632 }
633 if (data)
634 vcpu->stat.sp_storage++;
635 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
636 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
637 kvmppc_patch_dcbz(vcpu, &pte);
638 } else {
639 /* MMIO */
640 vcpu->stat.mmio_exits++;
641 vcpu->arch.paddr_accessed = pte.raddr;
642 vcpu->arch.vaddr_accessed = pte.eaddr;
643 r = kvmppc_emulate_mmio(run, vcpu);
644 if ( r == RESUME_HOST_NV )
645 r = RESUME_HOST;
646 }
647
648 return r;
649 }
650
651 /* Give up external provider (FPU, Altivec, VSX) */
kvmppc_giveup_ext(struct kvm_vcpu * vcpu,ulong msr)652 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
653 {
654 struct thread_struct *t = ¤t->thread;
655
656 /*
657 * VSX instructions can access FP and vector registers, so if
658 * we are giving up VSX, make sure we give up FP and VMX as well.
659 */
660 if (msr & MSR_VSX)
661 msr |= MSR_FP | MSR_VEC;
662
663 msr &= vcpu->arch.guest_owned_ext;
664 if (!msr)
665 return;
666
667 #ifdef DEBUG_EXT
668 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
669 #endif
670
671 if (msr & MSR_FP) {
672 /*
673 * Note that on CPUs with VSX, giveup_fpu stores
674 * both the traditional FP registers and the added VSX
675 * registers into thread.fp_state.fpr[].
676 */
677 if (t->regs->msr & MSR_FP)
678 giveup_fpu(current);
679 t->fp_save_area = NULL;
680 }
681
682 #ifdef CONFIG_ALTIVEC
683 if (msr & MSR_VEC) {
684 if (current->thread.regs->msr & MSR_VEC)
685 giveup_altivec(current);
686 t->vr_save_area = NULL;
687 }
688 #endif
689
690 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
691 kvmppc_recalc_shadow_msr(vcpu);
692 }
693
694 /* Give up facility (TAR / EBB / DSCR) */
kvmppc_giveup_fac(struct kvm_vcpu * vcpu,ulong fac)695 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
696 {
697 #ifdef CONFIG_PPC_BOOK3S_64
698 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
699 /* Facility not available to the guest, ignore giveup request*/
700 return;
701 }
702
703 switch (fac) {
704 case FSCR_TAR_LG:
705 vcpu->arch.tar = mfspr(SPRN_TAR);
706 mtspr(SPRN_TAR, current->thread.tar);
707 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
708 break;
709 }
710 #endif
711 }
712
713 /* Handle external providers (FPU, Altivec, VSX) */
kvmppc_handle_ext(struct kvm_vcpu * vcpu,unsigned int exit_nr,ulong msr)714 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
715 ulong msr)
716 {
717 struct thread_struct *t = ¤t->thread;
718
719 /* When we have paired singles, we emulate in software */
720 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
721 return RESUME_GUEST;
722
723 if (!(kvmppc_get_msr(vcpu) & msr)) {
724 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
725 return RESUME_GUEST;
726 }
727
728 if (msr == MSR_VSX) {
729 /* No VSX? Give an illegal instruction interrupt */
730 #ifdef CONFIG_VSX
731 if (!cpu_has_feature(CPU_FTR_VSX))
732 #endif
733 {
734 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
735 return RESUME_GUEST;
736 }
737
738 /*
739 * We have to load up all the FP and VMX registers before
740 * we can let the guest use VSX instructions.
741 */
742 msr = MSR_FP | MSR_VEC | MSR_VSX;
743 }
744
745 /* See if we already own all the ext(s) needed */
746 msr &= ~vcpu->arch.guest_owned_ext;
747 if (!msr)
748 return RESUME_GUEST;
749
750 #ifdef DEBUG_EXT
751 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
752 #endif
753
754 if (msr & MSR_FP) {
755 preempt_disable();
756 enable_kernel_fp();
757 load_fp_state(&vcpu->arch.fp);
758 t->fp_save_area = &vcpu->arch.fp;
759 preempt_enable();
760 }
761
762 if (msr & MSR_VEC) {
763 #ifdef CONFIG_ALTIVEC
764 preempt_disable();
765 enable_kernel_altivec();
766 load_vr_state(&vcpu->arch.vr);
767 t->vr_save_area = &vcpu->arch.vr;
768 preempt_enable();
769 #endif
770 }
771
772 t->regs->msr |= msr;
773 vcpu->arch.guest_owned_ext |= msr;
774 kvmppc_recalc_shadow_msr(vcpu);
775
776 return RESUME_GUEST;
777 }
778
779 /*
780 * Kernel code using FP or VMX could have flushed guest state to
781 * the thread_struct; if so, get it back now.
782 */
kvmppc_handle_lost_ext(struct kvm_vcpu * vcpu)783 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
784 {
785 unsigned long lost_ext;
786
787 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
788 if (!lost_ext)
789 return;
790
791 if (lost_ext & MSR_FP) {
792 preempt_disable();
793 enable_kernel_fp();
794 load_fp_state(&vcpu->arch.fp);
795 preempt_enable();
796 }
797 #ifdef CONFIG_ALTIVEC
798 if (lost_ext & MSR_VEC) {
799 preempt_disable();
800 enable_kernel_altivec();
801 load_vr_state(&vcpu->arch.vr);
802 preempt_enable();
803 }
804 #endif
805 current->thread.regs->msr |= lost_ext;
806 }
807
808 #ifdef CONFIG_PPC_BOOK3S_64
809
kvmppc_trigger_fac_interrupt(struct kvm_vcpu * vcpu,ulong fac)810 static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
811 {
812 /* Inject the Interrupt Cause field and trigger a guest interrupt */
813 vcpu->arch.fscr &= ~(0xffULL << 56);
814 vcpu->arch.fscr |= (fac << 56);
815 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
816 }
817
kvmppc_emulate_fac(struct kvm_vcpu * vcpu,ulong fac)818 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
819 {
820 enum emulation_result er = EMULATE_FAIL;
821
822 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
823 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
824
825 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
826 /* Couldn't emulate, trigger interrupt in guest */
827 kvmppc_trigger_fac_interrupt(vcpu, fac);
828 }
829 }
830
831 /* Enable facilities (TAR, EBB, DSCR) for the guest */
kvmppc_handle_fac(struct kvm_vcpu * vcpu,ulong fac)832 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
833 {
834 bool guest_fac_enabled;
835 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
836
837 /*
838 * Not every facility is enabled by FSCR bits, check whether the
839 * guest has this facility enabled at all.
840 */
841 switch (fac) {
842 case FSCR_TAR_LG:
843 case FSCR_EBB_LG:
844 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
845 break;
846 case FSCR_TM_LG:
847 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
848 break;
849 default:
850 guest_fac_enabled = false;
851 break;
852 }
853
854 if (!guest_fac_enabled) {
855 /* Facility not enabled by the guest */
856 kvmppc_trigger_fac_interrupt(vcpu, fac);
857 return RESUME_GUEST;
858 }
859
860 switch (fac) {
861 case FSCR_TAR_LG:
862 /* TAR switching isn't lazy in Linux yet */
863 current->thread.tar = mfspr(SPRN_TAR);
864 mtspr(SPRN_TAR, vcpu->arch.tar);
865 vcpu->arch.shadow_fscr |= FSCR_TAR;
866 break;
867 default:
868 kvmppc_emulate_fac(vcpu, fac);
869 break;
870 }
871
872 return RESUME_GUEST;
873 }
874
kvmppc_set_fscr(struct kvm_vcpu * vcpu,u64 fscr)875 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
876 {
877 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
878 /* TAR got dropped, drop it in shadow too */
879 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
880 }
881 vcpu->arch.fscr = fscr;
882 }
883 #endif
884
kvmppc_handle_exit_pr(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int exit_nr)885 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
886 unsigned int exit_nr)
887 {
888 int r = RESUME_HOST;
889 int s;
890
891 vcpu->stat.sum_exits++;
892
893 run->exit_reason = KVM_EXIT_UNKNOWN;
894 run->ready_for_interrupt_injection = 1;
895
896 /* We get here with MSR.EE=1 */
897
898 trace_kvm_exit(exit_nr, vcpu);
899 kvm_guest_exit();
900
901 switch (exit_nr) {
902 case BOOK3S_INTERRUPT_INST_STORAGE:
903 {
904 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
905 vcpu->stat.pf_instruc++;
906
907 if (kvmppc_is_split_real(vcpu))
908 kvmppc_fixup_split_real(vcpu);
909
910 #ifdef CONFIG_PPC_BOOK3S_32
911 /* We set segments as unused segments when invalidating them. So
912 * treat the respective fault as segment fault. */
913 {
914 struct kvmppc_book3s_shadow_vcpu *svcpu;
915 u32 sr;
916
917 svcpu = svcpu_get(vcpu);
918 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
919 svcpu_put(svcpu);
920 if (sr == SR_INVALID) {
921 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
922 r = RESUME_GUEST;
923 break;
924 }
925 }
926 #endif
927
928 /* only care about PTEG not found errors, but leave NX alone */
929 if (shadow_srr1 & 0x40000000) {
930 int idx = srcu_read_lock(&vcpu->kvm->srcu);
931 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
932 srcu_read_unlock(&vcpu->kvm->srcu, idx);
933 vcpu->stat.sp_instruc++;
934 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
935 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
936 /*
937 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
938 * so we can't use the NX bit inside the guest. Let's cross our fingers,
939 * that no guest that needs the dcbz hack does NX.
940 */
941 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
942 r = RESUME_GUEST;
943 } else {
944 u64 msr = kvmppc_get_msr(vcpu);
945 msr |= shadow_srr1 & 0x58000000;
946 kvmppc_set_msr_fast(vcpu, msr);
947 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
948 r = RESUME_GUEST;
949 }
950 break;
951 }
952 case BOOK3S_INTERRUPT_DATA_STORAGE:
953 {
954 ulong dar = kvmppc_get_fault_dar(vcpu);
955 u32 fault_dsisr = vcpu->arch.fault_dsisr;
956 vcpu->stat.pf_storage++;
957
958 #ifdef CONFIG_PPC_BOOK3S_32
959 /* We set segments as unused segments when invalidating them. So
960 * treat the respective fault as segment fault. */
961 {
962 struct kvmppc_book3s_shadow_vcpu *svcpu;
963 u32 sr;
964
965 svcpu = svcpu_get(vcpu);
966 sr = svcpu->sr[dar >> SID_SHIFT];
967 svcpu_put(svcpu);
968 if (sr == SR_INVALID) {
969 kvmppc_mmu_map_segment(vcpu, dar);
970 r = RESUME_GUEST;
971 break;
972 }
973 }
974 #endif
975
976 /*
977 * We need to handle missing shadow PTEs, and
978 * protection faults due to us mapping a page read-only
979 * when the guest thinks it is writable.
980 */
981 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
982 int idx = srcu_read_lock(&vcpu->kvm->srcu);
983 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
984 srcu_read_unlock(&vcpu->kvm->srcu, idx);
985 } else {
986 kvmppc_set_dar(vcpu, dar);
987 kvmppc_set_dsisr(vcpu, fault_dsisr);
988 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
989 r = RESUME_GUEST;
990 }
991 break;
992 }
993 case BOOK3S_INTERRUPT_DATA_SEGMENT:
994 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
995 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
996 kvmppc_book3s_queue_irqprio(vcpu,
997 BOOK3S_INTERRUPT_DATA_SEGMENT);
998 }
999 r = RESUME_GUEST;
1000 break;
1001 case BOOK3S_INTERRUPT_INST_SEGMENT:
1002 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1003 kvmppc_book3s_queue_irqprio(vcpu,
1004 BOOK3S_INTERRUPT_INST_SEGMENT);
1005 }
1006 r = RESUME_GUEST;
1007 break;
1008 /* We're good on these - the host merely wanted to get our attention */
1009 case BOOK3S_INTERRUPT_DECREMENTER:
1010 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1011 case BOOK3S_INTERRUPT_DOORBELL:
1012 case BOOK3S_INTERRUPT_H_DOORBELL:
1013 vcpu->stat.dec_exits++;
1014 r = RESUME_GUEST;
1015 break;
1016 case BOOK3S_INTERRUPT_EXTERNAL:
1017 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1018 case BOOK3S_INTERRUPT_EXTERNAL_HV:
1019 vcpu->stat.ext_intr_exits++;
1020 r = RESUME_GUEST;
1021 break;
1022 case BOOK3S_INTERRUPT_PERFMON:
1023 r = RESUME_GUEST;
1024 break;
1025 case BOOK3S_INTERRUPT_PROGRAM:
1026 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1027 {
1028 enum emulation_result er;
1029 ulong flags;
1030 u32 last_inst;
1031 int emul;
1032
1033 program_interrupt:
1034 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1035
1036 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1037 if (emul != EMULATE_DONE) {
1038 r = RESUME_GUEST;
1039 break;
1040 }
1041
1042 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1043 #ifdef EXIT_DEBUG
1044 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1045 kvmppc_get_pc(vcpu), last_inst);
1046 #endif
1047 if ((last_inst & 0xff0007ff) !=
1048 (INS_DCBZ & 0xfffffff7)) {
1049 kvmppc_core_queue_program(vcpu, flags);
1050 r = RESUME_GUEST;
1051 break;
1052 }
1053 }
1054
1055 vcpu->stat.emulated_inst_exits++;
1056 er = kvmppc_emulate_instruction(run, vcpu);
1057 switch (er) {
1058 case EMULATE_DONE:
1059 r = RESUME_GUEST_NV;
1060 break;
1061 case EMULATE_AGAIN:
1062 r = RESUME_GUEST;
1063 break;
1064 case EMULATE_FAIL:
1065 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
1066 __func__, kvmppc_get_pc(vcpu), last_inst);
1067 kvmppc_core_queue_program(vcpu, flags);
1068 r = RESUME_GUEST;
1069 break;
1070 case EMULATE_DO_MMIO:
1071 run->exit_reason = KVM_EXIT_MMIO;
1072 r = RESUME_HOST_NV;
1073 break;
1074 case EMULATE_EXIT_USER:
1075 r = RESUME_HOST_NV;
1076 break;
1077 default:
1078 BUG();
1079 }
1080 break;
1081 }
1082 case BOOK3S_INTERRUPT_SYSCALL:
1083 {
1084 u32 last_sc;
1085 int emul;
1086
1087 /* Get last sc for papr */
1088 if (vcpu->arch.papr_enabled) {
1089 /* The sc instuction points SRR0 to the next inst */
1090 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1091 if (emul != EMULATE_DONE) {
1092 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1093 r = RESUME_GUEST;
1094 break;
1095 }
1096 }
1097
1098 if (vcpu->arch.papr_enabled &&
1099 (last_sc == 0x44000022) &&
1100 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1101 /* SC 1 papr hypercalls */
1102 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1103 int i;
1104
1105 #ifdef CONFIG_PPC_BOOK3S_64
1106 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1107 r = RESUME_GUEST;
1108 break;
1109 }
1110 #endif
1111
1112 run->papr_hcall.nr = cmd;
1113 for (i = 0; i < 9; ++i) {
1114 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1115 run->papr_hcall.args[i] = gpr;
1116 }
1117 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1118 vcpu->arch.hcall_needed = 1;
1119 r = RESUME_HOST;
1120 } else if (vcpu->arch.osi_enabled &&
1121 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1122 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1123 /* MOL hypercalls */
1124 u64 *gprs = run->osi.gprs;
1125 int i;
1126
1127 run->exit_reason = KVM_EXIT_OSI;
1128 for (i = 0; i < 32; i++)
1129 gprs[i] = kvmppc_get_gpr(vcpu, i);
1130 vcpu->arch.osi_needed = 1;
1131 r = RESUME_HOST_NV;
1132 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1133 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1134 /* KVM PV hypercalls */
1135 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1136 r = RESUME_GUEST;
1137 } else {
1138 /* Guest syscalls */
1139 vcpu->stat.syscall_exits++;
1140 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1141 r = RESUME_GUEST;
1142 }
1143 break;
1144 }
1145 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1146 case BOOK3S_INTERRUPT_ALTIVEC:
1147 case BOOK3S_INTERRUPT_VSX:
1148 {
1149 int ext_msr = 0;
1150 int emul;
1151 u32 last_inst;
1152
1153 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1154 /* Do paired single instruction emulation */
1155 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1156 &last_inst);
1157 if (emul == EMULATE_DONE)
1158 goto program_interrupt;
1159 else
1160 r = RESUME_GUEST;
1161
1162 break;
1163 }
1164
1165 /* Enable external provider */
1166 switch (exit_nr) {
1167 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1168 ext_msr = MSR_FP;
1169 break;
1170
1171 case BOOK3S_INTERRUPT_ALTIVEC:
1172 ext_msr = MSR_VEC;
1173 break;
1174
1175 case BOOK3S_INTERRUPT_VSX:
1176 ext_msr = MSR_VSX;
1177 break;
1178 }
1179
1180 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1181 break;
1182 }
1183 case BOOK3S_INTERRUPT_ALIGNMENT:
1184 {
1185 u32 last_inst;
1186 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1187
1188 if (emul == EMULATE_DONE) {
1189 u32 dsisr;
1190 u64 dar;
1191
1192 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1193 dar = kvmppc_alignment_dar(vcpu, last_inst);
1194
1195 kvmppc_set_dsisr(vcpu, dsisr);
1196 kvmppc_set_dar(vcpu, dar);
1197
1198 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1199 }
1200 r = RESUME_GUEST;
1201 break;
1202 }
1203 #ifdef CONFIG_PPC_BOOK3S_64
1204 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1205 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1206 r = RESUME_GUEST;
1207 break;
1208 #endif
1209 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1210 case BOOK3S_INTERRUPT_TRACE:
1211 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1212 r = RESUME_GUEST;
1213 break;
1214 default:
1215 {
1216 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1217 /* Ugh - bork here! What did we get? */
1218 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1219 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1220 r = RESUME_HOST;
1221 BUG();
1222 break;
1223 }
1224 }
1225
1226 if (!(r & RESUME_HOST)) {
1227 /* To avoid clobbering exit_reason, only check for signals if
1228 * we aren't already exiting to userspace for some other
1229 * reason. */
1230
1231 /*
1232 * Interrupts could be timers for the guest which we have to
1233 * inject again, so let's postpone them until we're in the guest
1234 * and if we really did time things so badly, then we just exit
1235 * again due to a host external interrupt.
1236 */
1237 s = kvmppc_prepare_to_enter(vcpu);
1238 if (s <= 0)
1239 r = s;
1240 else {
1241 /* interrupts now hard-disabled */
1242 kvmppc_fix_ee_before_entry();
1243 }
1244
1245 kvmppc_handle_lost_ext(vcpu);
1246 }
1247
1248 trace_kvm_book3s_reenter(r, vcpu);
1249
1250 return r;
1251 }
1252
kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1253 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1254 struct kvm_sregs *sregs)
1255 {
1256 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1257 int i;
1258
1259 sregs->pvr = vcpu->arch.pvr;
1260
1261 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1262 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1263 for (i = 0; i < 64; i++) {
1264 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1265 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1266 }
1267 } else {
1268 for (i = 0; i < 16; i++)
1269 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1270
1271 for (i = 0; i < 8; i++) {
1272 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1273 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1274 }
1275 }
1276
1277 return 0;
1278 }
1279
kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1280 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1281 struct kvm_sregs *sregs)
1282 {
1283 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1284 int i;
1285
1286 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1287
1288 vcpu3s->sdr1 = sregs->u.s.sdr1;
1289 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1290 for (i = 0; i < 64; i++) {
1291 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1292 sregs->u.s.ppc64.slb[i].slbe);
1293 }
1294 } else {
1295 for (i = 0; i < 16; i++) {
1296 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1297 }
1298 for (i = 0; i < 8; i++) {
1299 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1300 (u32)sregs->u.s.ppc32.ibat[i]);
1301 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1302 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1303 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1304 (u32)sregs->u.s.ppc32.dbat[i]);
1305 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1306 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1307 }
1308 }
1309
1310 /* Flush the MMU after messing with the segments */
1311 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1312
1313 return 0;
1314 }
1315
kvmppc_get_one_reg_pr(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1316 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1317 union kvmppc_one_reg *val)
1318 {
1319 int r = 0;
1320
1321 switch (id) {
1322 case KVM_REG_PPC_DEBUG_INST:
1323 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1324 break;
1325 case KVM_REG_PPC_HIOR:
1326 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1327 break;
1328 case KVM_REG_PPC_LPCR:
1329 case KVM_REG_PPC_LPCR_64:
1330 /*
1331 * We are only interested in the LPCR_ILE bit
1332 */
1333 if (vcpu->arch.intr_msr & MSR_LE)
1334 *val = get_reg_val(id, LPCR_ILE);
1335 else
1336 *val = get_reg_val(id, 0);
1337 break;
1338 default:
1339 r = -EINVAL;
1340 break;
1341 }
1342
1343 return r;
1344 }
1345
kvmppc_set_lpcr_pr(struct kvm_vcpu * vcpu,u64 new_lpcr)1346 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1347 {
1348 if (new_lpcr & LPCR_ILE)
1349 vcpu->arch.intr_msr |= MSR_LE;
1350 else
1351 vcpu->arch.intr_msr &= ~MSR_LE;
1352 }
1353
kvmppc_set_one_reg_pr(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1354 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1355 union kvmppc_one_reg *val)
1356 {
1357 int r = 0;
1358
1359 switch (id) {
1360 case KVM_REG_PPC_HIOR:
1361 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1362 to_book3s(vcpu)->hior_explicit = true;
1363 break;
1364 case KVM_REG_PPC_LPCR:
1365 case KVM_REG_PPC_LPCR_64:
1366 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1367 break;
1368 default:
1369 r = -EINVAL;
1370 break;
1371 }
1372
1373 return r;
1374 }
1375
kvmppc_core_vcpu_create_pr(struct kvm * kvm,unsigned int id)1376 static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1377 unsigned int id)
1378 {
1379 struct kvmppc_vcpu_book3s *vcpu_book3s;
1380 struct kvm_vcpu *vcpu;
1381 int err = -ENOMEM;
1382 unsigned long p;
1383
1384 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1385 if (!vcpu)
1386 goto out;
1387
1388 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1389 if (!vcpu_book3s)
1390 goto free_vcpu;
1391 vcpu->arch.book3s = vcpu_book3s;
1392
1393 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1394 vcpu->arch.shadow_vcpu =
1395 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1396 if (!vcpu->arch.shadow_vcpu)
1397 goto free_vcpu3s;
1398 #endif
1399
1400 err = kvm_vcpu_init(vcpu, kvm, id);
1401 if (err)
1402 goto free_shadow_vcpu;
1403
1404 err = -ENOMEM;
1405 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1406 if (!p)
1407 goto uninit_vcpu;
1408 vcpu->arch.shared = (void *)p;
1409 #ifdef CONFIG_PPC_BOOK3S_64
1410 /* Always start the shared struct in native endian mode */
1411 #ifdef __BIG_ENDIAN__
1412 vcpu->arch.shared_big_endian = true;
1413 #else
1414 vcpu->arch.shared_big_endian = false;
1415 #endif
1416
1417 /*
1418 * Default to the same as the host if we're on sufficiently
1419 * recent machine that we have 1TB segments;
1420 * otherwise default to PPC970FX.
1421 */
1422 vcpu->arch.pvr = 0x3C0301;
1423 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1424 vcpu->arch.pvr = mfspr(SPRN_PVR);
1425 vcpu->arch.intr_msr = MSR_SF;
1426 #else
1427 /* default to book3s_32 (750) */
1428 vcpu->arch.pvr = 0x84202;
1429 #endif
1430 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1431 vcpu->arch.slb_nr = 64;
1432
1433 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1434
1435 err = kvmppc_mmu_init(vcpu);
1436 if (err < 0)
1437 goto free_shared_page;
1438
1439 return vcpu;
1440
1441 free_shared_page:
1442 free_page((unsigned long)vcpu->arch.shared);
1443 uninit_vcpu:
1444 kvm_vcpu_uninit(vcpu);
1445 free_shadow_vcpu:
1446 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1447 kfree(vcpu->arch.shadow_vcpu);
1448 free_vcpu3s:
1449 #endif
1450 vfree(vcpu_book3s);
1451 free_vcpu:
1452 kmem_cache_free(kvm_vcpu_cache, vcpu);
1453 out:
1454 return ERR_PTR(err);
1455 }
1456
kvmppc_core_vcpu_free_pr(struct kvm_vcpu * vcpu)1457 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1458 {
1459 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1460
1461 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1462 kvm_vcpu_uninit(vcpu);
1463 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1464 kfree(vcpu->arch.shadow_vcpu);
1465 #endif
1466 vfree(vcpu_book3s);
1467 kmem_cache_free(kvm_vcpu_cache, vcpu);
1468 }
1469
kvmppc_vcpu_run_pr(struct kvm_run * kvm_run,struct kvm_vcpu * vcpu)1470 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1471 {
1472 int ret;
1473 #ifdef CONFIG_ALTIVEC
1474 unsigned long uninitialized_var(vrsave);
1475 #endif
1476
1477 /* Check if we can run the vcpu at all */
1478 if (!vcpu->arch.sane) {
1479 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1480 ret = -EINVAL;
1481 goto out;
1482 }
1483
1484 /*
1485 * Interrupts could be timers for the guest which we have to inject
1486 * again, so let's postpone them until we're in the guest and if we
1487 * really did time things so badly, then we just exit again due to
1488 * a host external interrupt.
1489 */
1490 ret = kvmppc_prepare_to_enter(vcpu);
1491 if (ret <= 0)
1492 goto out;
1493 /* interrupts now hard-disabled */
1494
1495 /* Save FPU state in thread_struct */
1496 if (current->thread.regs->msr & MSR_FP)
1497 giveup_fpu(current);
1498
1499 #ifdef CONFIG_ALTIVEC
1500 /* Save Altivec state in thread_struct */
1501 if (current->thread.regs->msr & MSR_VEC)
1502 giveup_altivec(current);
1503 #endif
1504
1505 #ifdef CONFIG_VSX
1506 /* Save VSX state in thread_struct */
1507 if (current->thread.regs->msr & MSR_VSX)
1508 __giveup_vsx(current);
1509 #endif
1510
1511 /* Preload FPU if it's enabled */
1512 if (kvmppc_get_msr(vcpu) & MSR_FP)
1513 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1514
1515 kvmppc_fix_ee_before_entry();
1516
1517 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1518
1519 /* No need for kvm_guest_exit. It's done in handle_exit.
1520 We also get here with interrupts enabled. */
1521
1522 /* Make sure we save the guest FPU/Altivec/VSX state */
1523 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1524
1525 /* Make sure we save the guest TAR/EBB/DSCR state */
1526 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1527
1528 out:
1529 vcpu->mode = OUTSIDE_GUEST_MODE;
1530 return ret;
1531 }
1532
1533 /*
1534 * Get (and clear) the dirty memory log for a memory slot.
1535 */
kvm_vm_ioctl_get_dirty_log_pr(struct kvm * kvm,struct kvm_dirty_log * log)1536 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1537 struct kvm_dirty_log *log)
1538 {
1539 struct kvm_memslots *slots;
1540 struct kvm_memory_slot *memslot;
1541 struct kvm_vcpu *vcpu;
1542 ulong ga, ga_end;
1543 int is_dirty = 0;
1544 int r;
1545 unsigned long n;
1546
1547 mutex_lock(&kvm->slots_lock);
1548
1549 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1550 if (r)
1551 goto out;
1552
1553 /* If nothing is dirty, don't bother messing with page tables. */
1554 if (is_dirty) {
1555 slots = kvm_memslots(kvm);
1556 memslot = id_to_memslot(slots, log->slot);
1557
1558 ga = memslot->base_gfn << PAGE_SHIFT;
1559 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1560
1561 kvm_for_each_vcpu(n, vcpu, kvm)
1562 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1563
1564 n = kvm_dirty_bitmap_bytes(memslot);
1565 memset(memslot->dirty_bitmap, 0, n);
1566 }
1567
1568 r = 0;
1569 out:
1570 mutex_unlock(&kvm->slots_lock);
1571 return r;
1572 }
1573
kvmppc_core_flush_memslot_pr(struct kvm * kvm,struct kvm_memory_slot * memslot)1574 static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1575 struct kvm_memory_slot *memslot)
1576 {
1577 return;
1578 }
1579
kvmppc_core_prepare_memory_region_pr(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem)1580 static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1581 struct kvm_memory_slot *memslot,
1582 const struct kvm_userspace_memory_region *mem)
1583 {
1584 return 0;
1585 }
1586
kvmppc_core_commit_memory_region_pr(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new)1587 static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1588 const struct kvm_userspace_memory_region *mem,
1589 const struct kvm_memory_slot *old,
1590 const struct kvm_memory_slot *new)
1591 {
1592 return;
1593 }
1594
kvmppc_core_free_memslot_pr(struct kvm_memory_slot * free,struct kvm_memory_slot * dont)1595 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1596 struct kvm_memory_slot *dont)
1597 {
1598 return;
1599 }
1600
kvmppc_core_create_memslot_pr(struct kvm_memory_slot * slot,unsigned long npages)1601 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1602 unsigned long npages)
1603 {
1604 return 0;
1605 }
1606
1607
1608 #ifdef CONFIG_PPC64
kvm_vm_ioctl_get_smmu_info_pr(struct kvm * kvm,struct kvm_ppc_smmu_info * info)1609 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1610 struct kvm_ppc_smmu_info *info)
1611 {
1612 long int i;
1613 struct kvm_vcpu *vcpu;
1614
1615 info->flags = 0;
1616
1617 /* SLB is always 64 entries */
1618 info->slb_size = 64;
1619
1620 /* Standard 4k base page size segment */
1621 info->sps[0].page_shift = 12;
1622 info->sps[0].slb_enc = 0;
1623 info->sps[0].enc[0].page_shift = 12;
1624 info->sps[0].enc[0].pte_enc = 0;
1625
1626 /*
1627 * 64k large page size.
1628 * We only want to put this in if the CPUs we're emulating
1629 * support it, but unfortunately we don't have a vcpu easily
1630 * to hand here to test. Just pick the first vcpu, and if
1631 * that doesn't exist yet, report the minimum capability,
1632 * i.e., no 64k pages.
1633 * 1T segment support goes along with 64k pages.
1634 */
1635 i = 1;
1636 vcpu = kvm_get_vcpu(kvm, 0);
1637 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1638 info->flags = KVM_PPC_1T_SEGMENTS;
1639 info->sps[i].page_shift = 16;
1640 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1641 info->sps[i].enc[0].page_shift = 16;
1642 info->sps[i].enc[0].pte_enc = 1;
1643 ++i;
1644 }
1645
1646 /* Standard 16M large page size segment */
1647 info->sps[i].page_shift = 24;
1648 info->sps[i].slb_enc = SLB_VSID_L;
1649 info->sps[i].enc[0].page_shift = 24;
1650 info->sps[i].enc[0].pte_enc = 0;
1651
1652 return 0;
1653 }
1654 #else
kvm_vm_ioctl_get_smmu_info_pr(struct kvm * kvm,struct kvm_ppc_smmu_info * info)1655 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1656 struct kvm_ppc_smmu_info *info)
1657 {
1658 /* We should not get called */
1659 BUG();
1660 }
1661 #endif /* CONFIG_PPC64 */
1662
1663 static unsigned int kvm_global_user_count = 0;
1664 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1665
kvmppc_core_init_vm_pr(struct kvm * kvm)1666 static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1667 {
1668 mutex_init(&kvm->arch.hpt_mutex);
1669
1670 #ifdef CONFIG_PPC_BOOK3S_64
1671 /* Start out with the default set of hcalls enabled */
1672 kvmppc_pr_init_default_hcalls(kvm);
1673 #endif
1674
1675 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1676 spin_lock(&kvm_global_user_count_lock);
1677 if (++kvm_global_user_count == 1)
1678 pSeries_disable_reloc_on_exc();
1679 spin_unlock(&kvm_global_user_count_lock);
1680 }
1681 return 0;
1682 }
1683
kvmppc_core_destroy_vm_pr(struct kvm * kvm)1684 static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
1685 {
1686 #ifdef CONFIG_PPC64
1687 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1688 #endif
1689
1690 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1691 spin_lock(&kvm_global_user_count_lock);
1692 BUG_ON(kvm_global_user_count == 0);
1693 if (--kvm_global_user_count == 0)
1694 pSeries_enable_reloc_on_exc();
1695 spin_unlock(&kvm_global_user_count_lock);
1696 }
1697 }
1698
kvmppc_core_check_processor_compat_pr(void)1699 static int kvmppc_core_check_processor_compat_pr(void)
1700 {
1701 /* we are always compatible */
1702 return 0;
1703 }
1704
kvm_arch_vm_ioctl_pr(struct file * filp,unsigned int ioctl,unsigned long arg)1705 static long kvm_arch_vm_ioctl_pr(struct file *filp,
1706 unsigned int ioctl, unsigned long arg)
1707 {
1708 return -ENOTTY;
1709 }
1710
1711 static struct kvmppc_ops kvm_ops_pr = {
1712 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1713 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1714 .get_one_reg = kvmppc_get_one_reg_pr,
1715 .set_one_reg = kvmppc_set_one_reg_pr,
1716 .vcpu_load = kvmppc_core_vcpu_load_pr,
1717 .vcpu_put = kvmppc_core_vcpu_put_pr,
1718 .set_msr = kvmppc_set_msr_pr,
1719 .vcpu_run = kvmppc_vcpu_run_pr,
1720 .vcpu_create = kvmppc_core_vcpu_create_pr,
1721 .vcpu_free = kvmppc_core_vcpu_free_pr,
1722 .check_requests = kvmppc_core_check_requests_pr,
1723 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1724 .flush_memslot = kvmppc_core_flush_memslot_pr,
1725 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1726 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1727 .unmap_hva = kvm_unmap_hva_pr,
1728 .unmap_hva_range = kvm_unmap_hva_range_pr,
1729 .age_hva = kvm_age_hva_pr,
1730 .test_age_hva = kvm_test_age_hva_pr,
1731 .set_spte_hva = kvm_set_spte_hva_pr,
1732 .mmu_destroy = kvmppc_mmu_destroy_pr,
1733 .free_memslot = kvmppc_core_free_memslot_pr,
1734 .create_memslot = kvmppc_core_create_memslot_pr,
1735 .init_vm = kvmppc_core_init_vm_pr,
1736 .destroy_vm = kvmppc_core_destroy_vm_pr,
1737 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1738 .emulate_op = kvmppc_core_emulate_op_pr,
1739 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1740 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1741 .fast_vcpu_kick = kvm_vcpu_kick,
1742 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1743 #ifdef CONFIG_PPC_BOOK3S_64
1744 .hcall_implemented = kvmppc_hcall_impl_pr,
1745 #endif
1746 };
1747
1748
kvmppc_book3s_init_pr(void)1749 int kvmppc_book3s_init_pr(void)
1750 {
1751 int r;
1752
1753 r = kvmppc_core_check_processor_compat_pr();
1754 if (r < 0)
1755 return r;
1756
1757 kvm_ops_pr.owner = THIS_MODULE;
1758 kvmppc_pr_ops = &kvm_ops_pr;
1759
1760 r = kvmppc_mmu_hpte_sysinit();
1761 return r;
1762 }
1763
kvmppc_book3s_exit_pr(void)1764 void kvmppc_book3s_exit_pr(void)
1765 {
1766 kvmppc_pr_ops = NULL;
1767 kvmppc_mmu_hpte_sysexit();
1768 }
1769
1770 /*
1771 * We only support separate modules for book3s 64
1772 */
1773 #ifdef CONFIG_PPC_BOOK3S_64
1774
1775 module_init(kvmppc_book3s_init_pr);
1776 module_exit(kvmppc_book3s_exit_pr);
1777
1778 MODULE_LICENSE("GPL");
1779 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1780 MODULE_ALIAS("devname:kvm");
1781 #endif
1782