1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 *
6 * Authors:
7 * Paul Mackerras <paulus@au1.ibm.com>
8 * Alexander Graf <agraf@suse.de>
9 * Kevin Wolf <mail@kevin-wolf.de>
10 *
11 * Description: KVM functions specific to running on Book 3S
12 * processors in hypervisor mode (specifically POWER7 and later).
13 *
14 * This file is derived from arch/powerpc/kvm/book3s.c,
15 * by Alexander Graf <agraf@suse.de>.
16 */
17
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/preempt.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/stat.h>
25 #include <linux/delay.h>
26 #include <linux/export.h>
27 #include <linux/fs.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/cpu.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
33 #include <linux/srcu.h>
34 #include <linux/miscdevice.h>
35 #include <linux/debugfs.h>
36 #include <linux/gfp.h>
37 #include <linux/vmalloc.h>
38 #include <linux/highmem.h>
39 #include <linux/hugetlb.h>
40 #include <linux/kvm_irqfd.h>
41 #include <linux/irqbypass.h>
42 #include <linux/module.h>
43 #include <linux/compiler.h>
44 #include <linux/of.h>
45
46 #include <asm/ftrace.h>
47 #include <asm/reg.h>
48 #include <asm/ppc-opcode.h>
49 #include <asm/asm-prototypes.h>
50 #include <asm/archrandom.h>
51 #include <asm/debug.h>
52 #include <asm/disassemble.h>
53 #include <asm/cputable.h>
54 #include <asm/cacheflush.h>
55 #include <linux/uaccess.h>
56 #include <asm/io.h>
57 #include <asm/kvm_ppc.h>
58 #include <asm/kvm_book3s.h>
59 #include <asm/mmu_context.h>
60 #include <asm/lppaca.h>
61 #include <asm/pmc.h>
62 #include <asm/processor.h>
63 #include <asm/cputhreads.h>
64 #include <asm/page.h>
65 #include <asm/hvcall.h>
66 #include <asm/switch_to.h>
67 #include <asm/smp.h>
68 #include <asm/dbell.h>
69 #include <asm/hmi.h>
70 #include <asm/pnv-pci.h>
71 #include <asm/mmu.h>
72 #include <asm/opal.h>
73 #include <asm/xics.h>
74 #include <asm/xive.h>
75 #include <asm/hw_breakpoint.h>
76 #include <asm/kvm_book3s_uvmem.h>
77 #include <asm/ultravisor.h>
78 #include <asm/dtl.h>
79
80 #include "book3s.h"
81
82 #define CREATE_TRACE_POINTS
83 #include "trace_hv.h"
84
85 /* #define EXIT_DEBUG */
86 /* #define EXIT_DEBUG_SIMPLE */
87 /* #define EXIT_DEBUG_INT */
88
89 /* Used to indicate that a guest page fault needs to be handled */
90 #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
91 /* Used to indicate that a guest passthrough interrupt needs to be handled */
92 #define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2)
93
94 /* Used as a "null" value for timebase values */
95 #define TB_NIL (~(u64)0)
96
97 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
98
99 static int dynamic_mt_modes = 6;
100 module_param(dynamic_mt_modes, int, 0644);
101 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
102 static int target_smt_mode;
103 module_param(target_smt_mode, int, 0644);
104 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
105
106 static bool indep_threads_mode = true;
107 module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
108 MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
109
110 static bool one_vm_per_core;
111 module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
113
114 #ifdef CONFIG_KVM_XICS
115 static const struct kernel_param_ops module_param_ops = {
116 .set = param_set_int,
117 .get = param_get_int,
118 };
119
120 module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644);
121 MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
122
123 module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
124 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
125 #endif
126
127 /* If set, guests are allowed to create and control nested guests */
128 static bool nested = true;
129 module_param(nested, bool, S_IRUGO | S_IWUSR);
130 MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
131
nesting_enabled(struct kvm * kvm)132 static inline bool nesting_enabled(struct kvm *kvm)
133 {
134 return kvm->arch.nested_enable && kvm_is_radix(kvm);
135 }
136
137 /* If set, the threads on each CPU core have to be in the same MMU mode */
138 static bool no_mixing_hpt_and_radix;
139
140 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
141
142 /*
143 * RWMR values for POWER8. These control the rate at which PURR
144 * and SPURR count and should be set according to the number of
145 * online threads in the vcore being run.
146 */
147 #define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL
148 #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL
149 #define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL
150 #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL
151 #define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL
152 #define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL
153 #define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL
154 #define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL
155
156 static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
157 RWMR_RPA_P8_1THREAD,
158 RWMR_RPA_P8_1THREAD,
159 RWMR_RPA_P8_2THREAD,
160 RWMR_RPA_P8_3THREAD,
161 RWMR_RPA_P8_4THREAD,
162 RWMR_RPA_P8_5THREAD,
163 RWMR_RPA_P8_6THREAD,
164 RWMR_RPA_P8_7THREAD,
165 RWMR_RPA_P8_8THREAD,
166 };
167
next_runnable_thread(struct kvmppc_vcore * vc,int * ip)168 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
169 int *ip)
170 {
171 int i = *ip;
172 struct kvm_vcpu *vcpu;
173
174 while (++i < MAX_SMT_THREADS) {
175 vcpu = READ_ONCE(vc->runnable_threads[i]);
176 if (vcpu) {
177 *ip = i;
178 return vcpu;
179 }
180 }
181 return NULL;
182 }
183
184 /* Used to traverse the list of runnable threads for a given vcore */
185 #define for_each_runnable_thread(i, vcpu, vc) \
186 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
187
kvmppc_ipi_thread(int cpu)188 static bool kvmppc_ipi_thread(int cpu)
189 {
190 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
191
192 /* If we're a nested hypervisor, fall back to ordinary IPIs for now */
193 if (kvmhv_on_pseries())
194 return false;
195
196 /* On POWER9 we can use msgsnd to IPI any cpu */
197 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
198 msg |= get_hard_smp_processor_id(cpu);
199 smp_mb();
200 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
201 return true;
202 }
203
204 /* On POWER8 for IPIs to threads in the same core, use msgsnd */
205 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
206 preempt_disable();
207 if (cpu_first_thread_sibling(cpu) ==
208 cpu_first_thread_sibling(smp_processor_id())) {
209 msg |= cpu_thread_in_core(cpu);
210 smp_mb();
211 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
212 preempt_enable();
213 return true;
214 }
215 preempt_enable();
216 }
217
218 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
219 if (cpu >= 0 && cpu < nr_cpu_ids) {
220 if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
221 xics_wake_cpu(cpu);
222 return true;
223 }
224 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
225 return true;
226 }
227 #endif
228
229 return false;
230 }
231
kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu * vcpu)232 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
233 {
234 int cpu;
235 struct rcuwait *waitp;
236
237 waitp = kvm_arch_vcpu_get_wait(vcpu);
238 if (rcuwait_wake_up(waitp))
239 ++vcpu->stat.halt_wakeup;
240
241 cpu = READ_ONCE(vcpu->arch.thread_cpu);
242 if (cpu >= 0 && kvmppc_ipi_thread(cpu))
243 return;
244
245 /* CPU points to the first thread of the core */
246 cpu = vcpu->cpu;
247 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
248 smp_send_reschedule(cpu);
249 }
250
251 /*
252 * We use the vcpu_load/put functions to measure stolen time.
253 * Stolen time is counted as time when either the vcpu is able to
254 * run as part of a virtual core, but the task running the vcore
255 * is preempted or sleeping, or when the vcpu needs something done
256 * in the kernel by the task running the vcpu, but that task is
257 * preempted or sleeping. Those two things have to be counted
258 * separately, since one of the vcpu tasks will take on the job
259 * of running the core, and the other vcpu tasks in the vcore will
260 * sleep waiting for it to do that, but that sleep shouldn't count
261 * as stolen time.
262 *
263 * Hence we accumulate stolen time when the vcpu can run as part of
264 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
265 * needs its task to do other things in the kernel (for example,
266 * service a page fault) in busy_stolen. We don't accumulate
267 * stolen time for a vcore when it is inactive, or for a vcpu
268 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
269 * a misnomer; it means that the vcpu task is not executing in
270 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
271 * the kernel. We don't have any way of dividing up that time
272 * between time that the vcpu is genuinely stopped, time that
273 * the task is actively working on behalf of the vcpu, and time
274 * that the task is preempted, so we don't count any of it as
275 * stolen.
276 *
277 * Updates to busy_stolen are protected by arch.tbacct_lock;
278 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
279 * lock. The stolen times are measured in units of timebase ticks.
280 * (Note that the != TB_NIL checks below are purely defensive;
281 * they should never fail.)
282 */
283
kvmppc_core_start_stolen(struct kvmppc_vcore * vc)284 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
285 {
286 unsigned long flags;
287
288 spin_lock_irqsave(&vc->stoltb_lock, flags);
289 vc->preempt_tb = mftb();
290 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
291 }
292
kvmppc_core_end_stolen(struct kvmppc_vcore * vc)293 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
294 {
295 unsigned long flags;
296
297 spin_lock_irqsave(&vc->stoltb_lock, flags);
298 if (vc->preempt_tb != TB_NIL) {
299 vc->stolen_tb += mftb() - vc->preempt_tb;
300 vc->preempt_tb = TB_NIL;
301 }
302 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
303 }
304
kvmppc_core_vcpu_load_hv(struct kvm_vcpu * vcpu,int cpu)305 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
306 {
307 struct kvmppc_vcore *vc = vcpu->arch.vcore;
308 unsigned long flags;
309
310 /*
311 * We can test vc->runner without taking the vcore lock,
312 * because only this task ever sets vc->runner to this
313 * vcpu, and once it is set to this vcpu, only this task
314 * ever sets it to NULL.
315 */
316 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
317 kvmppc_core_end_stolen(vc);
318
319 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
320 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
321 vcpu->arch.busy_preempt != TB_NIL) {
322 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
323 vcpu->arch.busy_preempt = TB_NIL;
324 }
325 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
326 }
327
kvmppc_core_vcpu_put_hv(struct kvm_vcpu * vcpu)328 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
329 {
330 struct kvmppc_vcore *vc = vcpu->arch.vcore;
331 unsigned long flags;
332
333 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
334 kvmppc_core_start_stolen(vc);
335
336 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
337 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
338 vcpu->arch.busy_preempt = mftb();
339 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
340 }
341
kvmppc_set_pvr_hv(struct kvm_vcpu * vcpu,u32 pvr)342 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
343 {
344 vcpu->arch.pvr = pvr;
345 }
346
347 /* Dummy value used in computing PCR value below */
348 #define PCR_ARCH_31 (PCR_ARCH_300 << 1)
349
kvmppc_set_arch_compat(struct kvm_vcpu * vcpu,u32 arch_compat)350 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
351 {
352 unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
353 struct kvmppc_vcore *vc = vcpu->arch.vcore;
354
355 /* We can (emulate) our own architecture version and anything older */
356 if (cpu_has_feature(CPU_FTR_ARCH_31))
357 host_pcr_bit = PCR_ARCH_31;
358 else if (cpu_has_feature(CPU_FTR_ARCH_300))
359 host_pcr_bit = PCR_ARCH_300;
360 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
361 host_pcr_bit = PCR_ARCH_207;
362 else if (cpu_has_feature(CPU_FTR_ARCH_206))
363 host_pcr_bit = PCR_ARCH_206;
364 else
365 host_pcr_bit = PCR_ARCH_205;
366
367 /* Determine lowest PCR bit needed to run guest in given PVR level */
368 guest_pcr_bit = host_pcr_bit;
369 if (arch_compat) {
370 switch (arch_compat) {
371 case PVR_ARCH_205:
372 guest_pcr_bit = PCR_ARCH_205;
373 break;
374 case PVR_ARCH_206:
375 case PVR_ARCH_206p:
376 guest_pcr_bit = PCR_ARCH_206;
377 break;
378 case PVR_ARCH_207:
379 guest_pcr_bit = PCR_ARCH_207;
380 break;
381 case PVR_ARCH_300:
382 guest_pcr_bit = PCR_ARCH_300;
383 break;
384 case PVR_ARCH_31:
385 guest_pcr_bit = PCR_ARCH_31;
386 break;
387 default:
388 return -EINVAL;
389 }
390 }
391
392 /* Check requested PCR bits don't exceed our capabilities */
393 if (guest_pcr_bit > host_pcr_bit)
394 return -EINVAL;
395
396 spin_lock(&vc->lock);
397 vc->arch_compat = arch_compat;
398 /*
399 * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
400 * Also set all reserved PCR bits
401 */
402 vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
403 spin_unlock(&vc->lock);
404
405 return 0;
406 }
407
kvmppc_dump_regs(struct kvm_vcpu * vcpu)408 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
409 {
410 int r;
411
412 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
413 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
414 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
415 for (r = 0; r < 16; ++r)
416 pr_err("r%2d = %.16lx r%d = %.16lx\n",
417 r, kvmppc_get_gpr(vcpu, r),
418 r+16, kvmppc_get_gpr(vcpu, r+16));
419 pr_err("ctr = %.16lx lr = %.16lx\n",
420 vcpu->arch.regs.ctr, vcpu->arch.regs.link);
421 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
422 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
423 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
424 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
425 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
426 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
427 pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n",
428 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
429 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
430 pr_err("fault dar = %.16lx dsisr = %.8x\n",
431 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
432 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
433 for (r = 0; r < vcpu->arch.slb_max; ++r)
434 pr_err(" ESID = %.16llx VSID = %.16llx\n",
435 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
436 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
437 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
438 vcpu->arch.last_inst);
439 }
440
kvmppc_find_vcpu(struct kvm * kvm,int id)441 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
442 {
443 return kvm_get_vcpu_by_id(kvm, id);
444 }
445
init_vpa(struct kvm_vcpu * vcpu,struct lppaca * vpa)446 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
447 {
448 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
449 vpa->yield_count = cpu_to_be32(1);
450 }
451
set_vpa(struct kvm_vcpu * vcpu,struct kvmppc_vpa * v,unsigned long addr,unsigned long len)452 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
453 unsigned long addr, unsigned long len)
454 {
455 /* check address is cacheline aligned */
456 if (addr & (L1_CACHE_BYTES - 1))
457 return -EINVAL;
458 spin_lock(&vcpu->arch.vpa_update_lock);
459 if (v->next_gpa != addr || v->len != len) {
460 v->next_gpa = addr;
461 v->len = addr ? len : 0;
462 v->update_pending = 1;
463 }
464 spin_unlock(&vcpu->arch.vpa_update_lock);
465 return 0;
466 }
467
468 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
469 struct reg_vpa {
470 u32 dummy;
471 union {
472 __be16 hword;
473 __be32 word;
474 } length;
475 };
476
vpa_is_registered(struct kvmppc_vpa * vpap)477 static int vpa_is_registered(struct kvmppc_vpa *vpap)
478 {
479 if (vpap->update_pending)
480 return vpap->next_gpa != 0;
481 return vpap->pinned_addr != NULL;
482 }
483
do_h_register_vpa(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long vcpuid,unsigned long vpa)484 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
485 unsigned long flags,
486 unsigned long vcpuid, unsigned long vpa)
487 {
488 struct kvm *kvm = vcpu->kvm;
489 unsigned long len, nb;
490 void *va;
491 struct kvm_vcpu *tvcpu;
492 int err;
493 int subfunc;
494 struct kvmppc_vpa *vpap;
495
496 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
497 if (!tvcpu)
498 return H_PARAMETER;
499
500 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
501 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
502 subfunc == H_VPA_REG_SLB) {
503 /* Registering new area - address must be cache-line aligned */
504 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
505 return H_PARAMETER;
506
507 /* convert logical addr to kernel addr and read length */
508 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
509 if (va == NULL)
510 return H_PARAMETER;
511 if (subfunc == H_VPA_REG_VPA)
512 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
513 else
514 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
515 kvmppc_unpin_guest_page(kvm, va, vpa, false);
516
517 /* Check length */
518 if (len > nb || len < sizeof(struct reg_vpa))
519 return H_PARAMETER;
520 } else {
521 vpa = 0;
522 len = 0;
523 }
524
525 err = H_PARAMETER;
526 vpap = NULL;
527 spin_lock(&tvcpu->arch.vpa_update_lock);
528
529 switch (subfunc) {
530 case H_VPA_REG_VPA: /* register VPA */
531 /*
532 * The size of our lppaca is 1kB because of the way we align
533 * it for the guest to avoid crossing a 4kB boundary. We only
534 * use 640 bytes of the structure though, so we should accept
535 * clients that set a size of 640.
536 */
537 BUILD_BUG_ON(sizeof(struct lppaca) != 640);
538 if (len < sizeof(struct lppaca))
539 break;
540 vpap = &tvcpu->arch.vpa;
541 err = 0;
542 break;
543
544 case H_VPA_REG_DTL: /* register DTL */
545 if (len < sizeof(struct dtl_entry))
546 break;
547 len -= len % sizeof(struct dtl_entry);
548
549 /* Check that they have previously registered a VPA */
550 err = H_RESOURCE;
551 if (!vpa_is_registered(&tvcpu->arch.vpa))
552 break;
553
554 vpap = &tvcpu->arch.dtl;
555 err = 0;
556 break;
557
558 case H_VPA_REG_SLB: /* register SLB shadow buffer */
559 /* Check that they have previously registered a VPA */
560 err = H_RESOURCE;
561 if (!vpa_is_registered(&tvcpu->arch.vpa))
562 break;
563
564 vpap = &tvcpu->arch.slb_shadow;
565 err = 0;
566 break;
567
568 case H_VPA_DEREG_VPA: /* deregister VPA */
569 /* Check they don't still have a DTL or SLB buf registered */
570 err = H_RESOURCE;
571 if (vpa_is_registered(&tvcpu->arch.dtl) ||
572 vpa_is_registered(&tvcpu->arch.slb_shadow))
573 break;
574
575 vpap = &tvcpu->arch.vpa;
576 err = 0;
577 break;
578
579 case H_VPA_DEREG_DTL: /* deregister DTL */
580 vpap = &tvcpu->arch.dtl;
581 err = 0;
582 break;
583
584 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
585 vpap = &tvcpu->arch.slb_shadow;
586 err = 0;
587 break;
588 }
589
590 if (vpap) {
591 vpap->next_gpa = vpa;
592 vpap->len = len;
593 vpap->update_pending = 1;
594 }
595
596 spin_unlock(&tvcpu->arch.vpa_update_lock);
597
598 return err;
599 }
600
kvmppc_update_vpa(struct kvm_vcpu * vcpu,struct kvmppc_vpa * vpap)601 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
602 {
603 struct kvm *kvm = vcpu->kvm;
604 void *va;
605 unsigned long nb;
606 unsigned long gpa;
607
608 /*
609 * We need to pin the page pointed to by vpap->next_gpa,
610 * but we can't call kvmppc_pin_guest_page under the lock
611 * as it does get_user_pages() and down_read(). So we
612 * have to drop the lock, pin the page, then get the lock
613 * again and check that a new area didn't get registered
614 * in the meantime.
615 */
616 for (;;) {
617 gpa = vpap->next_gpa;
618 spin_unlock(&vcpu->arch.vpa_update_lock);
619 va = NULL;
620 nb = 0;
621 if (gpa)
622 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
623 spin_lock(&vcpu->arch.vpa_update_lock);
624 if (gpa == vpap->next_gpa)
625 break;
626 /* sigh... unpin that one and try again */
627 if (va)
628 kvmppc_unpin_guest_page(kvm, va, gpa, false);
629 }
630
631 vpap->update_pending = 0;
632 if (va && nb < vpap->len) {
633 /*
634 * If it's now too short, it must be that userspace
635 * has changed the mappings underlying guest memory,
636 * so unregister the region.
637 */
638 kvmppc_unpin_guest_page(kvm, va, gpa, false);
639 va = NULL;
640 }
641 if (vpap->pinned_addr)
642 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
643 vpap->dirty);
644 vpap->gpa = gpa;
645 vpap->pinned_addr = va;
646 vpap->dirty = false;
647 if (va)
648 vpap->pinned_end = va + vpap->len;
649 }
650
kvmppc_update_vpas(struct kvm_vcpu * vcpu)651 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
652 {
653 if (!(vcpu->arch.vpa.update_pending ||
654 vcpu->arch.slb_shadow.update_pending ||
655 vcpu->arch.dtl.update_pending))
656 return;
657
658 spin_lock(&vcpu->arch.vpa_update_lock);
659 if (vcpu->arch.vpa.update_pending) {
660 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
661 if (vcpu->arch.vpa.pinned_addr)
662 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
663 }
664 if (vcpu->arch.dtl.update_pending) {
665 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
666 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
667 vcpu->arch.dtl_index = 0;
668 }
669 if (vcpu->arch.slb_shadow.update_pending)
670 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
671 spin_unlock(&vcpu->arch.vpa_update_lock);
672 }
673
674 /*
675 * Return the accumulated stolen time for the vcore up until `now'.
676 * The caller should hold the vcore lock.
677 */
vcore_stolen_time(struct kvmppc_vcore * vc,u64 now)678 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
679 {
680 u64 p;
681 unsigned long flags;
682
683 spin_lock_irqsave(&vc->stoltb_lock, flags);
684 p = vc->stolen_tb;
685 if (vc->vcore_state != VCORE_INACTIVE &&
686 vc->preempt_tb != TB_NIL)
687 p += now - vc->preempt_tb;
688 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
689 return p;
690 }
691
kvmppc_create_dtl_entry(struct kvm_vcpu * vcpu,struct kvmppc_vcore * vc)692 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
693 struct kvmppc_vcore *vc)
694 {
695 struct dtl_entry *dt;
696 struct lppaca *vpa;
697 unsigned long stolen;
698 unsigned long core_stolen;
699 u64 now;
700 unsigned long flags;
701
702 dt = vcpu->arch.dtl_ptr;
703 vpa = vcpu->arch.vpa.pinned_addr;
704 now = mftb();
705 core_stolen = vcore_stolen_time(vc, now);
706 stolen = core_stolen - vcpu->arch.stolen_logged;
707 vcpu->arch.stolen_logged = core_stolen;
708 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
709 stolen += vcpu->arch.busy_stolen;
710 vcpu->arch.busy_stolen = 0;
711 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
712 if (!dt || !vpa)
713 return;
714 memset(dt, 0, sizeof(struct dtl_entry));
715 dt->dispatch_reason = 7;
716 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
717 dt->timebase = cpu_to_be64(now + vc->tb_offset);
718 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
719 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
720 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
721 ++dt;
722 if (dt == vcpu->arch.dtl.pinned_end)
723 dt = vcpu->arch.dtl.pinned_addr;
724 vcpu->arch.dtl_ptr = dt;
725 /* order writing *dt vs. writing vpa->dtl_idx */
726 smp_wmb();
727 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
728 vcpu->arch.dtl.dirty = true;
729 }
730
731 /* See if there is a doorbell interrupt pending for a vcpu */
kvmppc_doorbell_pending(struct kvm_vcpu * vcpu)732 static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
733 {
734 int thr;
735 struct kvmppc_vcore *vc;
736
737 if (vcpu->arch.doorbell_request)
738 return true;
739 /*
740 * Ensure that the read of vcore->dpdes comes after the read
741 * of vcpu->doorbell_request. This barrier matches the
742 * smp_wmb() in kvmppc_guest_entry_inject().
743 */
744 smp_rmb();
745 vc = vcpu->arch.vcore;
746 thr = vcpu->vcpu_id - vc->first_vcpuid;
747 return !!(vc->dpdes & (1 << thr));
748 }
749
kvmppc_power8_compatible(struct kvm_vcpu * vcpu)750 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
751 {
752 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
753 return true;
754 if ((!vcpu->arch.vcore->arch_compat) &&
755 cpu_has_feature(CPU_FTR_ARCH_207S))
756 return true;
757 return false;
758 }
759
kvmppc_h_set_mode(struct kvm_vcpu * vcpu,unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)760 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
761 unsigned long resource, unsigned long value1,
762 unsigned long value2)
763 {
764 switch (resource) {
765 case H_SET_MODE_RESOURCE_SET_CIABR:
766 if (!kvmppc_power8_compatible(vcpu))
767 return H_P2;
768 if (value2)
769 return H_P4;
770 if (mflags)
771 return H_UNSUPPORTED_FLAG_START;
772 /* Guests can't breakpoint the hypervisor */
773 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
774 return H_P3;
775 vcpu->arch.ciabr = value1;
776 return H_SUCCESS;
777 case H_SET_MODE_RESOURCE_SET_DAWR0:
778 if (!kvmppc_power8_compatible(vcpu))
779 return H_P2;
780 if (!ppc_breakpoint_available())
781 return H_P2;
782 if (mflags)
783 return H_UNSUPPORTED_FLAG_START;
784 if (value2 & DABRX_HYP)
785 return H_P4;
786 vcpu->arch.dawr = value1;
787 vcpu->arch.dawrx = value2;
788 return H_SUCCESS;
789 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
790 /* KVM does not support mflags=2 (AIL=2) */
791 if (mflags != 0 && mflags != 3)
792 return H_UNSUPPORTED_FLAG_START;
793 return H_TOO_HARD;
794 default:
795 return H_TOO_HARD;
796 }
797 }
798
799 /* Copy guest memory in place - must reside within a single memslot */
kvmppc_copy_guest(struct kvm * kvm,gpa_t to,gpa_t from,unsigned long len)800 static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
801 unsigned long len)
802 {
803 struct kvm_memory_slot *to_memslot = NULL;
804 struct kvm_memory_slot *from_memslot = NULL;
805 unsigned long to_addr, from_addr;
806 int r;
807
808 /* Get HPA for from address */
809 from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
810 if (!from_memslot)
811 return -EFAULT;
812 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
813 << PAGE_SHIFT))
814 return -EINVAL;
815 from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
816 if (kvm_is_error_hva(from_addr))
817 return -EFAULT;
818 from_addr |= (from & (PAGE_SIZE - 1));
819
820 /* Get HPA for to address */
821 to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
822 if (!to_memslot)
823 return -EFAULT;
824 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
825 << PAGE_SHIFT))
826 return -EINVAL;
827 to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
828 if (kvm_is_error_hva(to_addr))
829 return -EFAULT;
830 to_addr |= (to & (PAGE_SIZE - 1));
831
832 /* Perform copy */
833 r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
834 len);
835 if (r)
836 return -EFAULT;
837 mark_page_dirty(kvm, to >> PAGE_SHIFT);
838 return 0;
839 }
840
kvmppc_h_page_init(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long dest,unsigned long src)841 static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
842 unsigned long dest, unsigned long src)
843 {
844 u64 pg_sz = SZ_4K; /* 4K page size */
845 u64 pg_mask = SZ_4K - 1;
846 int ret;
847
848 /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
849 if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
850 H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
851 return H_PARAMETER;
852
853 /* dest (and src if copy_page flag set) must be page aligned */
854 if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
855 return H_PARAMETER;
856
857 /* zero and/or copy the page as determined by the flags */
858 if (flags & H_COPY_PAGE) {
859 ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
860 if (ret < 0)
861 return H_PARAMETER;
862 } else if (flags & H_ZERO_PAGE) {
863 ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
864 if (ret < 0)
865 return H_PARAMETER;
866 }
867
868 /* We can ignore the remaining flags */
869
870 return H_SUCCESS;
871 }
872
kvm_arch_vcpu_yield_to(struct kvm_vcpu * target)873 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
874 {
875 struct kvmppc_vcore *vcore = target->arch.vcore;
876
877 /*
878 * We expect to have been called by the real mode handler
879 * (kvmppc_rm_h_confer()) which would have directly returned
880 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
881 * have useful work to do and should not confer) so we don't
882 * recheck that here.
883 */
884
885 spin_lock(&vcore->lock);
886 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
887 vcore->vcore_state != VCORE_INACTIVE &&
888 vcore->runner)
889 target = vcore->runner;
890 spin_unlock(&vcore->lock);
891
892 return kvm_vcpu_yield_to(target);
893 }
894
kvmppc_get_yield_count(struct kvm_vcpu * vcpu)895 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
896 {
897 int yield_count = 0;
898 struct lppaca *lppaca;
899
900 spin_lock(&vcpu->arch.vpa_update_lock);
901 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
902 if (lppaca)
903 yield_count = be32_to_cpu(lppaca->yield_count);
904 spin_unlock(&vcpu->arch.vpa_update_lock);
905 return yield_count;
906 }
907
kvmppc_pseries_do_hcall(struct kvm_vcpu * vcpu)908 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
909 {
910 unsigned long req = kvmppc_get_gpr(vcpu, 3);
911 unsigned long target, ret = H_SUCCESS;
912 int yield_count;
913 struct kvm_vcpu *tvcpu;
914 int idx, rc;
915
916 if (req <= MAX_HCALL_OPCODE &&
917 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
918 return RESUME_HOST;
919
920 switch (req) {
921 case H_CEDE:
922 break;
923 case H_PROD:
924 target = kvmppc_get_gpr(vcpu, 4);
925 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
926 if (!tvcpu) {
927 ret = H_PARAMETER;
928 break;
929 }
930 tvcpu->arch.prodded = 1;
931 smp_mb();
932 if (tvcpu->arch.ceded)
933 kvmppc_fast_vcpu_kick_hv(tvcpu);
934 break;
935 case H_CONFER:
936 target = kvmppc_get_gpr(vcpu, 4);
937 if (target == -1)
938 break;
939 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
940 if (!tvcpu) {
941 ret = H_PARAMETER;
942 break;
943 }
944 yield_count = kvmppc_get_gpr(vcpu, 5);
945 if (kvmppc_get_yield_count(tvcpu) != yield_count)
946 break;
947 kvm_arch_vcpu_yield_to(tvcpu);
948 break;
949 case H_REGISTER_VPA:
950 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
951 kvmppc_get_gpr(vcpu, 5),
952 kvmppc_get_gpr(vcpu, 6));
953 break;
954 case H_RTAS:
955 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
956 return RESUME_HOST;
957
958 idx = srcu_read_lock(&vcpu->kvm->srcu);
959 rc = kvmppc_rtas_hcall(vcpu);
960 srcu_read_unlock(&vcpu->kvm->srcu, idx);
961
962 if (rc == -ENOENT)
963 return RESUME_HOST;
964 else if (rc == 0)
965 break;
966
967 /* Send the error out to userspace via KVM_RUN */
968 return rc;
969 case H_LOGICAL_CI_LOAD:
970 ret = kvmppc_h_logical_ci_load(vcpu);
971 if (ret == H_TOO_HARD)
972 return RESUME_HOST;
973 break;
974 case H_LOGICAL_CI_STORE:
975 ret = kvmppc_h_logical_ci_store(vcpu);
976 if (ret == H_TOO_HARD)
977 return RESUME_HOST;
978 break;
979 case H_SET_MODE:
980 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
981 kvmppc_get_gpr(vcpu, 5),
982 kvmppc_get_gpr(vcpu, 6),
983 kvmppc_get_gpr(vcpu, 7));
984 if (ret == H_TOO_HARD)
985 return RESUME_HOST;
986 break;
987 case H_XIRR:
988 case H_CPPR:
989 case H_EOI:
990 case H_IPI:
991 case H_IPOLL:
992 case H_XIRR_X:
993 if (kvmppc_xics_enabled(vcpu)) {
994 if (xics_on_xive()) {
995 ret = H_NOT_AVAILABLE;
996 return RESUME_GUEST;
997 }
998 ret = kvmppc_xics_hcall(vcpu, req);
999 break;
1000 }
1001 return RESUME_HOST;
1002 case H_SET_DABR:
1003 ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
1004 break;
1005 case H_SET_XDABR:
1006 ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
1007 kvmppc_get_gpr(vcpu, 5));
1008 break;
1009 #ifdef CONFIG_SPAPR_TCE_IOMMU
1010 case H_GET_TCE:
1011 ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1012 kvmppc_get_gpr(vcpu, 5));
1013 if (ret == H_TOO_HARD)
1014 return RESUME_HOST;
1015 break;
1016 case H_PUT_TCE:
1017 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1018 kvmppc_get_gpr(vcpu, 5),
1019 kvmppc_get_gpr(vcpu, 6));
1020 if (ret == H_TOO_HARD)
1021 return RESUME_HOST;
1022 break;
1023 case H_PUT_TCE_INDIRECT:
1024 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
1025 kvmppc_get_gpr(vcpu, 5),
1026 kvmppc_get_gpr(vcpu, 6),
1027 kvmppc_get_gpr(vcpu, 7));
1028 if (ret == H_TOO_HARD)
1029 return RESUME_HOST;
1030 break;
1031 case H_STUFF_TCE:
1032 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1033 kvmppc_get_gpr(vcpu, 5),
1034 kvmppc_get_gpr(vcpu, 6),
1035 kvmppc_get_gpr(vcpu, 7));
1036 if (ret == H_TOO_HARD)
1037 return RESUME_HOST;
1038 break;
1039 #endif
1040 case H_RANDOM:
1041 if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
1042 ret = H_HARDWARE;
1043 break;
1044
1045 case H_SET_PARTITION_TABLE:
1046 ret = H_FUNCTION;
1047 if (nesting_enabled(vcpu->kvm))
1048 ret = kvmhv_set_partition_table(vcpu);
1049 break;
1050 case H_ENTER_NESTED:
1051 ret = H_FUNCTION;
1052 if (!nesting_enabled(vcpu->kvm))
1053 break;
1054 ret = kvmhv_enter_nested_guest(vcpu);
1055 if (ret == H_INTERRUPT) {
1056 kvmppc_set_gpr(vcpu, 3, 0);
1057 vcpu->arch.hcall_needed = 0;
1058 return -EINTR;
1059 } else if (ret == H_TOO_HARD) {
1060 kvmppc_set_gpr(vcpu, 3, 0);
1061 vcpu->arch.hcall_needed = 0;
1062 return RESUME_HOST;
1063 }
1064 break;
1065 case H_TLB_INVALIDATE:
1066 ret = H_FUNCTION;
1067 if (nesting_enabled(vcpu->kvm))
1068 ret = kvmhv_do_nested_tlbie(vcpu);
1069 break;
1070 case H_COPY_TOFROM_GUEST:
1071 ret = H_FUNCTION;
1072 if (nesting_enabled(vcpu->kvm))
1073 ret = kvmhv_copy_tofrom_guest_nested(vcpu);
1074 break;
1075 case H_PAGE_INIT:
1076 ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
1077 kvmppc_get_gpr(vcpu, 5),
1078 kvmppc_get_gpr(vcpu, 6));
1079 break;
1080 case H_SVM_PAGE_IN:
1081 ret = H_UNSUPPORTED;
1082 if (kvmppc_get_srr1(vcpu) & MSR_S)
1083 ret = kvmppc_h_svm_page_in(vcpu->kvm,
1084 kvmppc_get_gpr(vcpu, 4),
1085 kvmppc_get_gpr(vcpu, 5),
1086 kvmppc_get_gpr(vcpu, 6));
1087 break;
1088 case H_SVM_PAGE_OUT:
1089 ret = H_UNSUPPORTED;
1090 if (kvmppc_get_srr1(vcpu) & MSR_S)
1091 ret = kvmppc_h_svm_page_out(vcpu->kvm,
1092 kvmppc_get_gpr(vcpu, 4),
1093 kvmppc_get_gpr(vcpu, 5),
1094 kvmppc_get_gpr(vcpu, 6));
1095 break;
1096 case H_SVM_INIT_START:
1097 ret = H_UNSUPPORTED;
1098 if (kvmppc_get_srr1(vcpu) & MSR_S)
1099 ret = kvmppc_h_svm_init_start(vcpu->kvm);
1100 break;
1101 case H_SVM_INIT_DONE:
1102 ret = H_UNSUPPORTED;
1103 if (kvmppc_get_srr1(vcpu) & MSR_S)
1104 ret = kvmppc_h_svm_init_done(vcpu->kvm);
1105 break;
1106 case H_SVM_INIT_ABORT:
1107 /*
1108 * Even if that call is made by the Ultravisor, the SSR1 value
1109 * is the guest context one, with the secure bit clear as it has
1110 * not yet been secured. So we can't check it here.
1111 * Instead the kvm->arch.secure_guest flag is checked inside
1112 * kvmppc_h_svm_init_abort().
1113 */
1114 ret = kvmppc_h_svm_init_abort(vcpu->kvm);
1115 break;
1116
1117 default:
1118 return RESUME_HOST;
1119 }
1120 kvmppc_set_gpr(vcpu, 3, ret);
1121 vcpu->arch.hcall_needed = 0;
1122 return RESUME_GUEST;
1123 }
1124
1125 /*
1126 * Handle H_CEDE in the nested virtualization case where we haven't
1127 * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
1128 * This has to be done early, not in kvmppc_pseries_do_hcall(), so
1129 * that the cede logic in kvmppc_run_single_vcpu() works properly.
1130 */
kvmppc_nested_cede(struct kvm_vcpu * vcpu)1131 static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
1132 {
1133 vcpu->arch.shregs.msr |= MSR_EE;
1134 vcpu->arch.ceded = 1;
1135 smp_mb();
1136 if (vcpu->arch.prodded) {
1137 vcpu->arch.prodded = 0;
1138 smp_mb();
1139 vcpu->arch.ceded = 0;
1140 }
1141 }
1142
kvmppc_hcall_impl_hv(unsigned long cmd)1143 static int kvmppc_hcall_impl_hv(unsigned long cmd)
1144 {
1145 switch (cmd) {
1146 case H_CEDE:
1147 case H_PROD:
1148 case H_CONFER:
1149 case H_REGISTER_VPA:
1150 case H_SET_MODE:
1151 case H_LOGICAL_CI_LOAD:
1152 case H_LOGICAL_CI_STORE:
1153 #ifdef CONFIG_KVM_XICS
1154 case H_XIRR:
1155 case H_CPPR:
1156 case H_EOI:
1157 case H_IPI:
1158 case H_IPOLL:
1159 case H_XIRR_X:
1160 #endif
1161 case H_PAGE_INIT:
1162 return 1;
1163 }
1164
1165 /* See if it's in the real-mode table */
1166 return kvmppc_hcall_impl_hv_realmode(cmd);
1167 }
1168
kvmppc_emulate_debug_inst(struct kvm_vcpu * vcpu)1169 static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
1170 {
1171 u32 last_inst;
1172
1173 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
1174 EMULATE_DONE) {
1175 /*
1176 * Fetch failed, so return to guest and
1177 * try executing it again.
1178 */
1179 return RESUME_GUEST;
1180 }
1181
1182 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
1183 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
1184 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1185 return RESUME_HOST;
1186 } else {
1187 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1188 return RESUME_GUEST;
1189 }
1190 }
1191
do_nothing(void * x)1192 static void do_nothing(void *x)
1193 {
1194 }
1195
kvmppc_read_dpdes(struct kvm_vcpu * vcpu)1196 static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
1197 {
1198 int thr, cpu, pcpu, nthreads;
1199 struct kvm_vcpu *v;
1200 unsigned long dpdes;
1201
1202 nthreads = vcpu->kvm->arch.emul_smt_mode;
1203 dpdes = 0;
1204 cpu = vcpu->vcpu_id & ~(nthreads - 1);
1205 for (thr = 0; thr < nthreads; ++thr, ++cpu) {
1206 v = kvmppc_find_vcpu(vcpu->kvm, cpu);
1207 if (!v)
1208 continue;
1209 /*
1210 * If the vcpu is currently running on a physical cpu thread,
1211 * interrupt it in order to pull it out of the guest briefly,
1212 * which will update its vcore->dpdes value.
1213 */
1214 pcpu = READ_ONCE(v->cpu);
1215 if (pcpu >= 0)
1216 smp_call_function_single(pcpu, do_nothing, NULL, 1);
1217 if (kvmppc_doorbell_pending(v))
1218 dpdes |= 1 << thr;
1219 }
1220 return dpdes;
1221 }
1222
1223 /*
1224 * On POWER9, emulate doorbell-related instructions in order to
1225 * give the guest the illusion of running on a multi-threaded core.
1226 * The instructions emulated are msgsndp, msgclrp, mfspr TIR,
1227 * and mfspr DPDES.
1228 */
kvmppc_emulate_doorbell_instr(struct kvm_vcpu * vcpu)1229 static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1230 {
1231 u32 inst, rb, thr;
1232 unsigned long arg;
1233 struct kvm *kvm = vcpu->kvm;
1234 struct kvm_vcpu *tvcpu;
1235
1236 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
1237 return RESUME_GUEST;
1238 if (get_op(inst) != 31)
1239 return EMULATE_FAIL;
1240 rb = get_rb(inst);
1241 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1242 switch (get_xop(inst)) {
1243 case OP_31_XOP_MSGSNDP:
1244 arg = kvmppc_get_gpr(vcpu, rb);
1245 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1246 break;
1247 arg &= 0x3f;
1248 if (arg >= kvm->arch.emul_smt_mode)
1249 break;
1250 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
1251 if (!tvcpu)
1252 break;
1253 if (!tvcpu->arch.doorbell_request) {
1254 tvcpu->arch.doorbell_request = 1;
1255 kvmppc_fast_vcpu_kick_hv(tvcpu);
1256 }
1257 break;
1258 case OP_31_XOP_MSGCLRP:
1259 arg = kvmppc_get_gpr(vcpu, rb);
1260 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1261 break;
1262 vcpu->arch.vcore->dpdes = 0;
1263 vcpu->arch.doorbell_request = 0;
1264 break;
1265 case OP_31_XOP_MFSPR:
1266 switch (get_sprn(inst)) {
1267 case SPRN_TIR:
1268 arg = thr;
1269 break;
1270 case SPRN_DPDES:
1271 arg = kvmppc_read_dpdes(vcpu);
1272 break;
1273 default:
1274 return EMULATE_FAIL;
1275 }
1276 kvmppc_set_gpr(vcpu, get_rt(inst), arg);
1277 break;
1278 default:
1279 return EMULATE_FAIL;
1280 }
1281 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
1282 return RESUME_GUEST;
1283 }
1284
kvmppc_handle_exit_hv(struct kvm_vcpu * vcpu,struct task_struct * tsk)1285 static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1286 struct task_struct *tsk)
1287 {
1288 struct kvm_run *run = vcpu->run;
1289 int r = RESUME_HOST;
1290
1291 vcpu->stat.sum_exits++;
1292
1293 /*
1294 * This can happen if an interrupt occurs in the last stages
1295 * of guest entry or the first stages of guest exit (i.e. after
1296 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1297 * and before setting it to KVM_GUEST_MODE_HOST_HV).
1298 * That can happen due to a bug, or due to a machine check
1299 * occurring at just the wrong time.
1300 */
1301 if (vcpu->arch.shregs.msr & MSR_HV) {
1302 printk(KERN_EMERG "KVM trap in HV mode!\n");
1303 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1304 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1305 vcpu->arch.shregs.msr);
1306 kvmppc_dump_regs(vcpu);
1307 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1308 run->hw.hardware_exit_reason = vcpu->arch.trap;
1309 return RESUME_HOST;
1310 }
1311 run->exit_reason = KVM_EXIT_UNKNOWN;
1312 run->ready_for_interrupt_injection = 1;
1313 switch (vcpu->arch.trap) {
1314 /* We're good on these - the host merely wanted to get our attention */
1315 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1316 vcpu->stat.dec_exits++;
1317 r = RESUME_GUEST;
1318 break;
1319 case BOOK3S_INTERRUPT_EXTERNAL:
1320 case BOOK3S_INTERRUPT_H_DOORBELL:
1321 case BOOK3S_INTERRUPT_H_VIRT:
1322 vcpu->stat.ext_intr_exits++;
1323 r = RESUME_GUEST;
1324 break;
1325 /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1326 case BOOK3S_INTERRUPT_HMI:
1327 case BOOK3S_INTERRUPT_PERFMON:
1328 case BOOK3S_INTERRUPT_SYSTEM_RESET:
1329 r = RESUME_GUEST;
1330 break;
1331 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1332 /* Print the MCE event to host console. */
1333 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1334
1335 /*
1336 * If the guest can do FWNMI, exit to userspace so it can
1337 * deliver a FWNMI to the guest.
1338 * Otherwise we synthesize a machine check for the guest
1339 * so that it knows that the machine check occurred.
1340 */
1341 if (!vcpu->kvm->arch.fwnmi_enabled) {
1342 ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
1343 kvmppc_core_queue_machine_check(vcpu, flags);
1344 r = RESUME_GUEST;
1345 break;
1346 }
1347
1348 /* Exit to guest with KVM_EXIT_NMI as exit reason */
1349 run->exit_reason = KVM_EXIT_NMI;
1350 run->hw.hardware_exit_reason = vcpu->arch.trap;
1351 /* Clear out the old NMI status from run->flags */
1352 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
1353 /* Now set the NMI status */
1354 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1355 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
1356 else
1357 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1358
1359 r = RESUME_HOST;
1360 break;
1361 case BOOK3S_INTERRUPT_PROGRAM:
1362 {
1363 ulong flags;
1364 /*
1365 * Normally program interrupts are delivered directly
1366 * to the guest by the hardware, but we can get here
1367 * as a result of a hypervisor emulation interrupt
1368 * (e40) getting turned into a 700 by BML RTAS.
1369 */
1370 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1371 kvmppc_core_queue_program(vcpu, flags);
1372 r = RESUME_GUEST;
1373 break;
1374 }
1375 case BOOK3S_INTERRUPT_SYSCALL:
1376 {
1377 /* hcall - punt to userspace */
1378 int i;
1379
1380 /* hypercall with MSR_PR has already been handled in rmode,
1381 * and never reaches here.
1382 */
1383
1384 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1385 for (i = 0; i < 9; ++i)
1386 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1387 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1388 vcpu->arch.hcall_needed = 1;
1389 r = RESUME_HOST;
1390 break;
1391 }
1392 /*
1393 * We get these next two if the guest accesses a page which it thinks
1394 * it has mapped but which is not actually present, either because
1395 * it is for an emulated I/O device or because the corresonding
1396 * host page has been paged out. Any other HDSI/HISI interrupts
1397 * have been handled already.
1398 */
1399 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1400 r = RESUME_PAGE_FAULT;
1401 break;
1402 case BOOK3S_INTERRUPT_H_INST_STORAGE:
1403 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1404 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
1405 DSISR_SRR1_MATCH_64S;
1406 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1407 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1408 r = RESUME_PAGE_FAULT;
1409 break;
1410 /*
1411 * This occurs if the guest executes an illegal instruction.
1412 * If the guest debug is disabled, generate a program interrupt
1413 * to the guest. If guest debug is enabled, we need to check
1414 * whether the instruction is a software breakpoint instruction.
1415 * Accordingly return to Guest or Host.
1416 */
1417 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1418 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1419 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1420 swab32(vcpu->arch.emul_inst) :
1421 vcpu->arch.emul_inst;
1422 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1423 r = kvmppc_emulate_debug_inst(vcpu);
1424 } else {
1425 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1426 r = RESUME_GUEST;
1427 }
1428 break;
1429 /*
1430 * This occurs if the guest (kernel or userspace), does something that
1431 * is prohibited by HFSCR.
1432 * On POWER9, this could be a doorbell instruction that we need
1433 * to emulate.
1434 * Otherwise, we just generate a program interrupt to the guest.
1435 */
1436 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1437 r = EMULATE_FAIL;
1438 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1439 cpu_has_feature(CPU_FTR_ARCH_300))
1440 r = kvmppc_emulate_doorbell_instr(vcpu);
1441 if (r == EMULATE_FAIL) {
1442 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1443 r = RESUME_GUEST;
1444 }
1445 break;
1446
1447 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1448 case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1449 /*
1450 * This occurs for various TM-related instructions that
1451 * we need to emulate on POWER9 DD2.2. We have already
1452 * handled the cases where the guest was in real-suspend
1453 * mode and was transitioning to transactional state.
1454 */
1455 r = kvmhv_p9_tm_emulation(vcpu);
1456 break;
1457 #endif
1458
1459 case BOOK3S_INTERRUPT_HV_RM_HARD:
1460 r = RESUME_PASSTHROUGH;
1461 break;
1462 default:
1463 kvmppc_dump_regs(vcpu);
1464 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1465 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1466 vcpu->arch.shregs.msr);
1467 run->hw.hardware_exit_reason = vcpu->arch.trap;
1468 r = RESUME_HOST;
1469 break;
1470 }
1471
1472 return r;
1473 }
1474
kvmppc_handle_nested_exit(struct kvm_vcpu * vcpu)1475 static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
1476 {
1477 int r;
1478 int srcu_idx;
1479
1480 vcpu->stat.sum_exits++;
1481
1482 /*
1483 * This can happen if an interrupt occurs in the last stages
1484 * of guest entry or the first stages of guest exit (i.e. after
1485 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1486 * and before setting it to KVM_GUEST_MODE_HOST_HV).
1487 * That can happen due to a bug, or due to a machine check
1488 * occurring at just the wrong time.
1489 */
1490 if (vcpu->arch.shregs.msr & MSR_HV) {
1491 pr_emerg("KVM trap in HV mode while nested!\n");
1492 pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1493 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1494 vcpu->arch.shregs.msr);
1495 kvmppc_dump_regs(vcpu);
1496 return RESUME_HOST;
1497 }
1498 switch (vcpu->arch.trap) {
1499 /* We're good on these - the host merely wanted to get our attention */
1500 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1501 vcpu->stat.dec_exits++;
1502 r = RESUME_GUEST;
1503 break;
1504 case BOOK3S_INTERRUPT_EXTERNAL:
1505 vcpu->stat.ext_intr_exits++;
1506 r = RESUME_HOST;
1507 break;
1508 case BOOK3S_INTERRUPT_H_DOORBELL:
1509 case BOOK3S_INTERRUPT_H_VIRT:
1510 vcpu->stat.ext_intr_exits++;
1511 r = RESUME_GUEST;
1512 break;
1513 /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1514 case BOOK3S_INTERRUPT_HMI:
1515 case BOOK3S_INTERRUPT_PERFMON:
1516 case BOOK3S_INTERRUPT_SYSTEM_RESET:
1517 r = RESUME_GUEST;
1518 break;
1519 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1520 /* Pass the machine check to the L1 guest */
1521 r = RESUME_HOST;
1522 /* Print the MCE event to host console. */
1523 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1524 break;
1525 /*
1526 * We get these next two if the guest accesses a page which it thinks
1527 * it has mapped but which is not actually present, either because
1528 * it is for an emulated I/O device or because the corresonding
1529 * host page has been paged out.
1530 */
1531 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1532 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1533 r = kvmhv_nested_page_fault(vcpu);
1534 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1535 break;
1536 case BOOK3S_INTERRUPT_H_INST_STORAGE:
1537 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1538 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
1539 DSISR_SRR1_MATCH_64S;
1540 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1541 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1542 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1543 r = kvmhv_nested_page_fault(vcpu);
1544 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1545 break;
1546
1547 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1548 case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1549 /*
1550 * This occurs for various TM-related instructions that
1551 * we need to emulate on POWER9 DD2.2. We have already
1552 * handled the cases where the guest was in real-suspend
1553 * mode and was transitioning to transactional state.
1554 */
1555 r = kvmhv_p9_tm_emulation(vcpu);
1556 break;
1557 #endif
1558
1559 case BOOK3S_INTERRUPT_HV_RM_HARD:
1560 vcpu->arch.trap = 0;
1561 r = RESUME_GUEST;
1562 if (!xics_on_xive())
1563 kvmppc_xics_rm_complete(vcpu, 0);
1564 break;
1565 default:
1566 r = RESUME_HOST;
1567 break;
1568 }
1569
1570 return r;
1571 }
1572
kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1573 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1574 struct kvm_sregs *sregs)
1575 {
1576 int i;
1577
1578 memset(sregs, 0, sizeof(struct kvm_sregs));
1579 sregs->pvr = vcpu->arch.pvr;
1580 for (i = 0; i < vcpu->arch.slb_max; i++) {
1581 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1582 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1583 }
1584
1585 return 0;
1586 }
1587
kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1588 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1589 struct kvm_sregs *sregs)
1590 {
1591 int i, j;
1592
1593 /* Only accept the same PVR as the host's, since we can't spoof it */
1594 if (sregs->pvr != vcpu->arch.pvr)
1595 return -EINVAL;
1596
1597 j = 0;
1598 for (i = 0; i < vcpu->arch.slb_nr; i++) {
1599 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1600 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1601 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1602 ++j;
1603 }
1604 }
1605 vcpu->arch.slb_max = j;
1606
1607 return 0;
1608 }
1609
kvmppc_set_lpcr(struct kvm_vcpu * vcpu,u64 new_lpcr,bool preserve_top32)1610 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1611 bool preserve_top32)
1612 {
1613 struct kvm *kvm = vcpu->kvm;
1614 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1615 u64 mask;
1616
1617 spin_lock(&vc->lock);
1618 /*
1619 * If ILE (interrupt little-endian) has changed, update the
1620 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1621 */
1622 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1623 struct kvm_vcpu *vcpu;
1624 int i;
1625
1626 kvm_for_each_vcpu(i, vcpu, kvm) {
1627 if (vcpu->arch.vcore != vc)
1628 continue;
1629 if (new_lpcr & LPCR_ILE)
1630 vcpu->arch.intr_msr |= MSR_LE;
1631 else
1632 vcpu->arch.intr_msr &= ~MSR_LE;
1633 }
1634 }
1635
1636 /*
1637 * Userspace can only modify DPFD (default prefetch depth),
1638 * ILE (interrupt little-endian) and TC (translation control).
1639 * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
1640 */
1641 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
1642 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1643 mask |= LPCR_AIL;
1644 /*
1645 * On POWER9, allow userspace to enable large decrementer for the
1646 * guest, whether or not the host has it enabled.
1647 */
1648 if (cpu_has_feature(CPU_FTR_ARCH_300))
1649 mask |= LPCR_LD;
1650
1651 /* Broken 32-bit version of LPCR must not clear top bits */
1652 if (preserve_top32)
1653 mask &= 0xFFFFFFFF;
1654 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1655 spin_unlock(&vc->lock);
1656 }
1657
kvmppc_get_one_reg_hv(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1658 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1659 union kvmppc_one_reg *val)
1660 {
1661 int r = 0;
1662 long int i;
1663
1664 switch (id) {
1665 case KVM_REG_PPC_DEBUG_INST:
1666 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1667 break;
1668 case KVM_REG_PPC_HIOR:
1669 *val = get_reg_val(id, 0);
1670 break;
1671 case KVM_REG_PPC_DABR:
1672 *val = get_reg_val(id, vcpu->arch.dabr);
1673 break;
1674 case KVM_REG_PPC_DABRX:
1675 *val = get_reg_val(id, vcpu->arch.dabrx);
1676 break;
1677 case KVM_REG_PPC_DSCR:
1678 *val = get_reg_val(id, vcpu->arch.dscr);
1679 break;
1680 case KVM_REG_PPC_PURR:
1681 *val = get_reg_val(id, vcpu->arch.purr);
1682 break;
1683 case KVM_REG_PPC_SPURR:
1684 *val = get_reg_val(id, vcpu->arch.spurr);
1685 break;
1686 case KVM_REG_PPC_AMR:
1687 *val = get_reg_val(id, vcpu->arch.amr);
1688 break;
1689 case KVM_REG_PPC_UAMOR:
1690 *val = get_reg_val(id, vcpu->arch.uamor);
1691 break;
1692 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
1693 i = id - KVM_REG_PPC_MMCR0;
1694 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1695 break;
1696 case KVM_REG_PPC_MMCR2:
1697 *val = get_reg_val(id, vcpu->arch.mmcr[2]);
1698 break;
1699 case KVM_REG_PPC_MMCRA:
1700 *val = get_reg_val(id, vcpu->arch.mmcra);
1701 break;
1702 case KVM_REG_PPC_MMCRS:
1703 *val = get_reg_val(id, vcpu->arch.mmcrs);
1704 break;
1705 case KVM_REG_PPC_MMCR3:
1706 *val = get_reg_val(id, vcpu->arch.mmcr[3]);
1707 break;
1708 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1709 i = id - KVM_REG_PPC_PMC1;
1710 *val = get_reg_val(id, vcpu->arch.pmc[i]);
1711 break;
1712 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1713 i = id - KVM_REG_PPC_SPMC1;
1714 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1715 break;
1716 case KVM_REG_PPC_SIAR:
1717 *val = get_reg_val(id, vcpu->arch.siar);
1718 break;
1719 case KVM_REG_PPC_SDAR:
1720 *val = get_reg_val(id, vcpu->arch.sdar);
1721 break;
1722 case KVM_REG_PPC_SIER:
1723 *val = get_reg_val(id, vcpu->arch.sier[0]);
1724 break;
1725 case KVM_REG_PPC_SIER2:
1726 *val = get_reg_val(id, vcpu->arch.sier[1]);
1727 break;
1728 case KVM_REG_PPC_SIER3:
1729 *val = get_reg_val(id, vcpu->arch.sier[2]);
1730 break;
1731 case KVM_REG_PPC_IAMR:
1732 *val = get_reg_val(id, vcpu->arch.iamr);
1733 break;
1734 case KVM_REG_PPC_PSPB:
1735 *val = get_reg_val(id, vcpu->arch.pspb);
1736 break;
1737 case KVM_REG_PPC_DPDES:
1738 /*
1739 * On POWER9, where we are emulating msgsndp etc.,
1740 * we return 1 bit for each vcpu, which can come from
1741 * either vcore->dpdes or doorbell_request.
1742 * On POWER8, doorbell_request is 0.
1743 */
1744 *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
1745 vcpu->arch.doorbell_request);
1746 break;
1747 case KVM_REG_PPC_VTB:
1748 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
1749 break;
1750 case KVM_REG_PPC_DAWR:
1751 *val = get_reg_val(id, vcpu->arch.dawr);
1752 break;
1753 case KVM_REG_PPC_DAWRX:
1754 *val = get_reg_val(id, vcpu->arch.dawrx);
1755 break;
1756 case KVM_REG_PPC_CIABR:
1757 *val = get_reg_val(id, vcpu->arch.ciabr);
1758 break;
1759 case KVM_REG_PPC_CSIGR:
1760 *val = get_reg_val(id, vcpu->arch.csigr);
1761 break;
1762 case KVM_REG_PPC_TACR:
1763 *val = get_reg_val(id, vcpu->arch.tacr);
1764 break;
1765 case KVM_REG_PPC_TCSCR:
1766 *val = get_reg_val(id, vcpu->arch.tcscr);
1767 break;
1768 case KVM_REG_PPC_PID:
1769 *val = get_reg_val(id, vcpu->arch.pid);
1770 break;
1771 case KVM_REG_PPC_ACOP:
1772 *val = get_reg_val(id, vcpu->arch.acop);
1773 break;
1774 case KVM_REG_PPC_WORT:
1775 *val = get_reg_val(id, vcpu->arch.wort);
1776 break;
1777 case KVM_REG_PPC_TIDR:
1778 *val = get_reg_val(id, vcpu->arch.tid);
1779 break;
1780 case KVM_REG_PPC_PSSCR:
1781 *val = get_reg_val(id, vcpu->arch.psscr);
1782 break;
1783 case KVM_REG_PPC_VPA_ADDR:
1784 spin_lock(&vcpu->arch.vpa_update_lock);
1785 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1786 spin_unlock(&vcpu->arch.vpa_update_lock);
1787 break;
1788 case KVM_REG_PPC_VPA_SLB:
1789 spin_lock(&vcpu->arch.vpa_update_lock);
1790 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1791 val->vpaval.length = vcpu->arch.slb_shadow.len;
1792 spin_unlock(&vcpu->arch.vpa_update_lock);
1793 break;
1794 case KVM_REG_PPC_VPA_DTL:
1795 spin_lock(&vcpu->arch.vpa_update_lock);
1796 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1797 val->vpaval.length = vcpu->arch.dtl.len;
1798 spin_unlock(&vcpu->arch.vpa_update_lock);
1799 break;
1800 case KVM_REG_PPC_TB_OFFSET:
1801 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1802 break;
1803 case KVM_REG_PPC_LPCR:
1804 case KVM_REG_PPC_LPCR_64:
1805 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1806 break;
1807 case KVM_REG_PPC_PPR:
1808 *val = get_reg_val(id, vcpu->arch.ppr);
1809 break;
1810 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1811 case KVM_REG_PPC_TFHAR:
1812 *val = get_reg_val(id, vcpu->arch.tfhar);
1813 break;
1814 case KVM_REG_PPC_TFIAR:
1815 *val = get_reg_val(id, vcpu->arch.tfiar);
1816 break;
1817 case KVM_REG_PPC_TEXASR:
1818 *val = get_reg_val(id, vcpu->arch.texasr);
1819 break;
1820 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1821 i = id - KVM_REG_PPC_TM_GPR0;
1822 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1823 break;
1824 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1825 {
1826 int j;
1827 i = id - KVM_REG_PPC_TM_VSR0;
1828 if (i < 32)
1829 for (j = 0; j < TS_FPRWIDTH; j++)
1830 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1831 else {
1832 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1833 val->vval = vcpu->arch.vr_tm.vr[i-32];
1834 else
1835 r = -ENXIO;
1836 }
1837 break;
1838 }
1839 case KVM_REG_PPC_TM_CR:
1840 *val = get_reg_val(id, vcpu->arch.cr_tm);
1841 break;
1842 case KVM_REG_PPC_TM_XER:
1843 *val = get_reg_val(id, vcpu->arch.xer_tm);
1844 break;
1845 case KVM_REG_PPC_TM_LR:
1846 *val = get_reg_val(id, vcpu->arch.lr_tm);
1847 break;
1848 case KVM_REG_PPC_TM_CTR:
1849 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1850 break;
1851 case KVM_REG_PPC_TM_FPSCR:
1852 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1853 break;
1854 case KVM_REG_PPC_TM_AMR:
1855 *val = get_reg_val(id, vcpu->arch.amr_tm);
1856 break;
1857 case KVM_REG_PPC_TM_PPR:
1858 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1859 break;
1860 case KVM_REG_PPC_TM_VRSAVE:
1861 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1862 break;
1863 case KVM_REG_PPC_TM_VSCR:
1864 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1865 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1866 else
1867 r = -ENXIO;
1868 break;
1869 case KVM_REG_PPC_TM_DSCR:
1870 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1871 break;
1872 case KVM_REG_PPC_TM_TAR:
1873 *val = get_reg_val(id, vcpu->arch.tar_tm);
1874 break;
1875 #endif
1876 case KVM_REG_PPC_ARCH_COMPAT:
1877 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1878 break;
1879 case KVM_REG_PPC_DEC_EXPIRY:
1880 *val = get_reg_val(id, vcpu->arch.dec_expires +
1881 vcpu->arch.vcore->tb_offset);
1882 break;
1883 case KVM_REG_PPC_ONLINE:
1884 *val = get_reg_val(id, vcpu->arch.online);
1885 break;
1886 case KVM_REG_PPC_PTCR:
1887 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
1888 break;
1889 default:
1890 r = -EINVAL;
1891 break;
1892 }
1893
1894 return r;
1895 }
1896
kvmppc_set_one_reg_hv(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1897 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1898 union kvmppc_one_reg *val)
1899 {
1900 int r = 0;
1901 long int i;
1902 unsigned long addr, len;
1903
1904 switch (id) {
1905 case KVM_REG_PPC_HIOR:
1906 /* Only allow this to be set to zero */
1907 if (set_reg_val(id, *val))
1908 r = -EINVAL;
1909 break;
1910 case KVM_REG_PPC_DABR:
1911 vcpu->arch.dabr = set_reg_val(id, *val);
1912 break;
1913 case KVM_REG_PPC_DABRX:
1914 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1915 break;
1916 case KVM_REG_PPC_DSCR:
1917 vcpu->arch.dscr = set_reg_val(id, *val);
1918 break;
1919 case KVM_REG_PPC_PURR:
1920 vcpu->arch.purr = set_reg_val(id, *val);
1921 break;
1922 case KVM_REG_PPC_SPURR:
1923 vcpu->arch.spurr = set_reg_val(id, *val);
1924 break;
1925 case KVM_REG_PPC_AMR:
1926 vcpu->arch.amr = set_reg_val(id, *val);
1927 break;
1928 case KVM_REG_PPC_UAMOR:
1929 vcpu->arch.uamor = set_reg_val(id, *val);
1930 break;
1931 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
1932 i = id - KVM_REG_PPC_MMCR0;
1933 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1934 break;
1935 case KVM_REG_PPC_MMCR2:
1936 vcpu->arch.mmcr[2] = set_reg_val(id, *val);
1937 break;
1938 case KVM_REG_PPC_MMCRA:
1939 vcpu->arch.mmcra = set_reg_val(id, *val);
1940 break;
1941 case KVM_REG_PPC_MMCRS:
1942 vcpu->arch.mmcrs = set_reg_val(id, *val);
1943 break;
1944 case KVM_REG_PPC_MMCR3:
1945 *val = get_reg_val(id, vcpu->arch.mmcr[3]);
1946 break;
1947 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1948 i = id - KVM_REG_PPC_PMC1;
1949 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1950 break;
1951 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1952 i = id - KVM_REG_PPC_SPMC1;
1953 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1954 break;
1955 case KVM_REG_PPC_SIAR:
1956 vcpu->arch.siar = set_reg_val(id, *val);
1957 break;
1958 case KVM_REG_PPC_SDAR:
1959 vcpu->arch.sdar = set_reg_val(id, *val);
1960 break;
1961 case KVM_REG_PPC_SIER:
1962 vcpu->arch.sier[0] = set_reg_val(id, *val);
1963 break;
1964 case KVM_REG_PPC_SIER2:
1965 vcpu->arch.sier[1] = set_reg_val(id, *val);
1966 break;
1967 case KVM_REG_PPC_SIER3:
1968 vcpu->arch.sier[2] = set_reg_val(id, *val);
1969 break;
1970 case KVM_REG_PPC_IAMR:
1971 vcpu->arch.iamr = set_reg_val(id, *val);
1972 break;
1973 case KVM_REG_PPC_PSPB:
1974 vcpu->arch.pspb = set_reg_val(id, *val);
1975 break;
1976 case KVM_REG_PPC_DPDES:
1977 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1978 break;
1979 case KVM_REG_PPC_VTB:
1980 vcpu->arch.vcore->vtb = set_reg_val(id, *val);
1981 break;
1982 case KVM_REG_PPC_DAWR:
1983 vcpu->arch.dawr = set_reg_val(id, *val);
1984 break;
1985 case KVM_REG_PPC_DAWRX:
1986 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1987 break;
1988 case KVM_REG_PPC_CIABR:
1989 vcpu->arch.ciabr = set_reg_val(id, *val);
1990 /* Don't allow setting breakpoints in hypervisor code */
1991 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1992 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1993 break;
1994 case KVM_REG_PPC_CSIGR:
1995 vcpu->arch.csigr = set_reg_val(id, *val);
1996 break;
1997 case KVM_REG_PPC_TACR:
1998 vcpu->arch.tacr = set_reg_val(id, *val);
1999 break;
2000 case KVM_REG_PPC_TCSCR:
2001 vcpu->arch.tcscr = set_reg_val(id, *val);
2002 break;
2003 case KVM_REG_PPC_PID:
2004 vcpu->arch.pid = set_reg_val(id, *val);
2005 break;
2006 case KVM_REG_PPC_ACOP:
2007 vcpu->arch.acop = set_reg_val(id, *val);
2008 break;
2009 case KVM_REG_PPC_WORT:
2010 vcpu->arch.wort = set_reg_val(id, *val);
2011 break;
2012 case KVM_REG_PPC_TIDR:
2013 vcpu->arch.tid = set_reg_val(id, *val);
2014 break;
2015 case KVM_REG_PPC_PSSCR:
2016 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2017 break;
2018 case KVM_REG_PPC_VPA_ADDR:
2019 addr = set_reg_val(id, *val);
2020 r = -EINVAL;
2021 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2022 vcpu->arch.dtl.next_gpa))
2023 break;
2024 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2025 break;
2026 case KVM_REG_PPC_VPA_SLB:
2027 addr = val->vpaval.addr;
2028 len = val->vpaval.length;
2029 r = -EINVAL;
2030 if (addr && !vcpu->arch.vpa.next_gpa)
2031 break;
2032 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2033 break;
2034 case KVM_REG_PPC_VPA_DTL:
2035 addr = val->vpaval.addr;
2036 len = val->vpaval.length;
2037 r = -EINVAL;
2038 if (addr && (len < sizeof(struct dtl_entry) ||
2039 !vcpu->arch.vpa.next_gpa))
2040 break;
2041 len -= len % sizeof(struct dtl_entry);
2042 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2043 break;
2044 case KVM_REG_PPC_TB_OFFSET:
2045 /* round up to multiple of 2^24 */
2046 vcpu->arch.vcore->tb_offset =
2047 ALIGN(set_reg_val(id, *val), 1UL << 24);
2048 break;
2049 case KVM_REG_PPC_LPCR:
2050 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
2051 break;
2052 case KVM_REG_PPC_LPCR_64:
2053 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
2054 break;
2055 case KVM_REG_PPC_PPR:
2056 vcpu->arch.ppr = set_reg_val(id, *val);
2057 break;
2058 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2059 case KVM_REG_PPC_TFHAR:
2060 vcpu->arch.tfhar = set_reg_val(id, *val);
2061 break;
2062 case KVM_REG_PPC_TFIAR:
2063 vcpu->arch.tfiar = set_reg_val(id, *val);
2064 break;
2065 case KVM_REG_PPC_TEXASR:
2066 vcpu->arch.texasr = set_reg_val(id, *val);
2067 break;
2068 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
2069 i = id - KVM_REG_PPC_TM_GPR0;
2070 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2071 break;
2072 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
2073 {
2074 int j;
2075 i = id - KVM_REG_PPC_TM_VSR0;
2076 if (i < 32)
2077 for (j = 0; j < TS_FPRWIDTH; j++)
2078 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2079 else
2080 if (cpu_has_feature(CPU_FTR_ALTIVEC))
2081 vcpu->arch.vr_tm.vr[i-32] = val->vval;
2082 else
2083 r = -ENXIO;
2084 break;
2085 }
2086 case KVM_REG_PPC_TM_CR:
2087 vcpu->arch.cr_tm = set_reg_val(id, *val);
2088 break;
2089 case KVM_REG_PPC_TM_XER:
2090 vcpu->arch.xer_tm = set_reg_val(id, *val);
2091 break;
2092 case KVM_REG_PPC_TM_LR:
2093 vcpu->arch.lr_tm = set_reg_val(id, *val);
2094 break;
2095 case KVM_REG_PPC_TM_CTR:
2096 vcpu->arch.ctr_tm = set_reg_val(id, *val);
2097 break;
2098 case KVM_REG_PPC_TM_FPSCR:
2099 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2100 break;
2101 case KVM_REG_PPC_TM_AMR:
2102 vcpu->arch.amr_tm = set_reg_val(id, *val);
2103 break;
2104 case KVM_REG_PPC_TM_PPR:
2105 vcpu->arch.ppr_tm = set_reg_val(id, *val);
2106 break;
2107 case KVM_REG_PPC_TM_VRSAVE:
2108 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2109 break;
2110 case KVM_REG_PPC_TM_VSCR:
2111 if (cpu_has_feature(CPU_FTR_ALTIVEC))
2112 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2113 else
2114 r = - ENXIO;
2115 break;
2116 case KVM_REG_PPC_TM_DSCR:
2117 vcpu->arch.dscr_tm = set_reg_val(id, *val);
2118 break;
2119 case KVM_REG_PPC_TM_TAR:
2120 vcpu->arch.tar_tm = set_reg_val(id, *val);
2121 break;
2122 #endif
2123 case KVM_REG_PPC_ARCH_COMPAT:
2124 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
2125 break;
2126 case KVM_REG_PPC_DEC_EXPIRY:
2127 vcpu->arch.dec_expires = set_reg_val(id, *val) -
2128 vcpu->arch.vcore->tb_offset;
2129 break;
2130 case KVM_REG_PPC_ONLINE:
2131 i = set_reg_val(id, *val);
2132 if (i && !vcpu->arch.online)
2133 atomic_inc(&vcpu->arch.vcore->online_count);
2134 else if (!i && vcpu->arch.online)
2135 atomic_dec(&vcpu->arch.vcore->online_count);
2136 vcpu->arch.online = i;
2137 break;
2138 case KVM_REG_PPC_PTCR:
2139 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2140 break;
2141 default:
2142 r = -EINVAL;
2143 break;
2144 }
2145
2146 return r;
2147 }
2148
2149 /*
2150 * On POWER9, threads are independent and can be in different partitions.
2151 * Therefore we consider each thread to be a subcore.
2152 * There is a restriction that all threads have to be in the same
2153 * MMU mode (radix or HPT), unfortunately, but since we only support
2154 * HPT guests on a HPT host so far, that isn't an impediment yet.
2155 */
threads_per_vcore(struct kvm * kvm)2156 static int threads_per_vcore(struct kvm *kvm)
2157 {
2158 if (kvm->arch.threads_indep)
2159 return 1;
2160 return threads_per_subcore;
2161 }
2162
kvmppc_vcore_create(struct kvm * kvm,int id)2163 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
2164 {
2165 struct kvmppc_vcore *vcore;
2166
2167 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
2168
2169 if (vcore == NULL)
2170 return NULL;
2171
2172 spin_lock_init(&vcore->lock);
2173 spin_lock_init(&vcore->stoltb_lock);
2174 rcuwait_init(&vcore->wait);
2175 vcore->preempt_tb = TB_NIL;
2176 vcore->lpcr = kvm->arch.lpcr;
2177 vcore->first_vcpuid = id;
2178 vcore->kvm = kvm;
2179 INIT_LIST_HEAD(&vcore->preempt_list);
2180
2181 return vcore;
2182 }
2183
2184 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2185 static struct debugfs_timings_element {
2186 const char *name;
2187 size_t offset;
2188 } timings[] = {
2189 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2190 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2191 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2192 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2193 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2194 };
2195
2196 #define N_TIMINGS (ARRAY_SIZE(timings))
2197
2198 struct debugfs_timings_state {
2199 struct kvm_vcpu *vcpu;
2200 unsigned int buflen;
2201 char buf[N_TIMINGS * 100];
2202 };
2203
debugfs_timings_open(struct inode * inode,struct file * file)2204 static int debugfs_timings_open(struct inode *inode, struct file *file)
2205 {
2206 struct kvm_vcpu *vcpu = inode->i_private;
2207 struct debugfs_timings_state *p;
2208
2209 p = kzalloc(sizeof(*p), GFP_KERNEL);
2210 if (!p)
2211 return -ENOMEM;
2212
2213 kvm_get_kvm(vcpu->kvm);
2214 p->vcpu = vcpu;
2215 file->private_data = p;
2216
2217 return nonseekable_open(inode, file);
2218 }
2219
debugfs_timings_release(struct inode * inode,struct file * file)2220 static int debugfs_timings_release(struct inode *inode, struct file *file)
2221 {
2222 struct debugfs_timings_state *p = file->private_data;
2223
2224 kvm_put_kvm(p->vcpu->kvm);
2225 kfree(p);
2226 return 0;
2227 }
2228
debugfs_timings_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)2229 static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
2230 size_t len, loff_t *ppos)
2231 {
2232 struct debugfs_timings_state *p = file->private_data;
2233 struct kvm_vcpu *vcpu = p->vcpu;
2234 char *s, *buf_end;
2235 struct kvmhv_tb_accumulator tb;
2236 u64 count;
2237 loff_t pos;
2238 ssize_t n;
2239 int i, loops;
2240 bool ok;
2241
2242 if (!p->buflen) {
2243 s = p->buf;
2244 buf_end = s + sizeof(p->buf);
2245 for (i = 0; i < N_TIMINGS; ++i) {
2246 struct kvmhv_tb_accumulator *acc;
2247
2248 acc = (struct kvmhv_tb_accumulator *)
2249 ((unsigned long)vcpu + timings[i].offset);
2250 ok = false;
2251 for (loops = 0; loops < 1000; ++loops) {
2252 count = acc->seqcount;
2253 if (!(count & 1)) {
2254 smp_rmb();
2255 tb = *acc;
2256 smp_rmb();
2257 if (count == acc->seqcount) {
2258 ok = true;
2259 break;
2260 }
2261 }
2262 udelay(1);
2263 }
2264 if (!ok)
2265 snprintf(s, buf_end - s, "%s: stuck\n",
2266 timings[i].name);
2267 else
2268 snprintf(s, buf_end - s,
2269 "%s: %llu %llu %llu %llu\n",
2270 timings[i].name, count / 2,
2271 tb_to_ns(tb.tb_total),
2272 tb_to_ns(tb.tb_min),
2273 tb_to_ns(tb.tb_max));
2274 s += strlen(s);
2275 }
2276 p->buflen = s - p->buf;
2277 }
2278
2279 pos = *ppos;
2280 if (pos >= p->buflen)
2281 return 0;
2282 if (len > p->buflen - pos)
2283 len = p->buflen - pos;
2284 n = copy_to_user(buf, p->buf + pos, len);
2285 if (n) {
2286 if (n == len)
2287 return -EFAULT;
2288 len -= n;
2289 }
2290 *ppos = pos + len;
2291 return len;
2292 }
2293
debugfs_timings_write(struct file * file,const char __user * buf,size_t len,loff_t * ppos)2294 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
2295 size_t len, loff_t *ppos)
2296 {
2297 return -EACCES;
2298 }
2299
2300 static const struct file_operations debugfs_timings_ops = {
2301 .owner = THIS_MODULE,
2302 .open = debugfs_timings_open,
2303 .release = debugfs_timings_release,
2304 .read = debugfs_timings_read,
2305 .write = debugfs_timings_write,
2306 .llseek = generic_file_llseek,
2307 };
2308
2309 /* Create a debugfs directory for the vcpu */
debugfs_vcpu_init(struct kvm_vcpu * vcpu,unsigned int id)2310 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2311 {
2312 char buf[16];
2313 struct kvm *kvm = vcpu->kvm;
2314
2315 snprintf(buf, sizeof(buf), "vcpu%u", id);
2316 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
2317 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu,
2318 &debugfs_timings_ops);
2319 }
2320
2321 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
debugfs_vcpu_init(struct kvm_vcpu * vcpu,unsigned int id)2322 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2323 {
2324 }
2325 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
2326
kvmppc_core_vcpu_create_hv(struct kvm_vcpu * vcpu)2327 static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
2328 {
2329 int err;
2330 int core;
2331 struct kvmppc_vcore *vcore;
2332 struct kvm *kvm;
2333 unsigned int id;
2334
2335 kvm = vcpu->kvm;
2336 id = vcpu->vcpu_id;
2337
2338 vcpu->arch.shared = &vcpu->arch.shregs;
2339 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2340 /*
2341 * The shared struct is never shared on HV,
2342 * so we can always use host endianness
2343 */
2344 #ifdef __BIG_ENDIAN__
2345 vcpu->arch.shared_big_endian = true;
2346 #else
2347 vcpu->arch.shared_big_endian = false;
2348 #endif
2349 #endif
2350 vcpu->arch.mmcr[0] = MMCR0_FC;
2351 vcpu->arch.ctrl = CTRL_RUNLATCH;
2352 /* default to host PVR, since we can't spoof it */
2353 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2354 spin_lock_init(&vcpu->arch.vpa_update_lock);
2355 spin_lock_init(&vcpu->arch.tbacct_lock);
2356 vcpu->arch.busy_preempt = TB_NIL;
2357 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
2358
2359 /*
2360 * Set the default HFSCR for the guest from the host value.
2361 * This value is only used on POWER9.
2362 * On POWER9, we want to virtualize the doorbell facility, so we
2363 * don't set the HFSCR_MSGP bit, and that causes those instructions
2364 * to trap and then we emulate them.
2365 */
2366 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
2367 HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
2368 if (cpu_has_feature(CPU_FTR_HVMODE)) {
2369 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
2370 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2371 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
2372 vcpu->arch.hfscr |= HFSCR_TM;
2373 #endif
2374 }
2375 if (cpu_has_feature(CPU_FTR_TM_COMP))
2376 vcpu->arch.hfscr |= HFSCR_TM;
2377
2378 kvmppc_mmu_book3s_hv_init(vcpu);
2379
2380 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2381
2382 init_waitqueue_head(&vcpu->arch.cpu_run);
2383
2384 mutex_lock(&kvm->lock);
2385 vcore = NULL;
2386 err = -EINVAL;
2387 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2388 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
2389 pr_devel("KVM: VCPU ID too high\n");
2390 core = KVM_MAX_VCORES;
2391 } else {
2392 BUG_ON(kvm->arch.smt_mode != 1);
2393 core = kvmppc_pack_vcpu_id(kvm, id);
2394 }
2395 } else {
2396 core = id / kvm->arch.smt_mode;
2397 }
2398 if (core < KVM_MAX_VCORES) {
2399 vcore = kvm->arch.vcores[core];
2400 if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
2401 pr_devel("KVM: collision on id %u", id);
2402 vcore = NULL;
2403 } else if (!vcore) {
2404 /*
2405 * Take mmu_setup_lock for mutual exclusion
2406 * with kvmppc_update_lpcr().
2407 */
2408 err = -ENOMEM;
2409 vcore = kvmppc_vcore_create(kvm,
2410 id & ~(kvm->arch.smt_mode - 1));
2411 mutex_lock(&kvm->arch.mmu_setup_lock);
2412 kvm->arch.vcores[core] = vcore;
2413 kvm->arch.online_vcores++;
2414 mutex_unlock(&kvm->arch.mmu_setup_lock);
2415 }
2416 }
2417 mutex_unlock(&kvm->lock);
2418
2419 if (!vcore)
2420 return err;
2421
2422 spin_lock(&vcore->lock);
2423 ++vcore->num_threads;
2424 spin_unlock(&vcore->lock);
2425 vcpu->arch.vcore = vcore;
2426 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
2427 vcpu->arch.thread_cpu = -1;
2428 vcpu->arch.prev_cpu = -1;
2429
2430 vcpu->arch.cpu_type = KVM_CPU_3S_64;
2431 kvmppc_sanity_check(vcpu);
2432
2433 debugfs_vcpu_init(vcpu, id);
2434
2435 return 0;
2436 }
2437
kvmhv_set_smt_mode(struct kvm * kvm,unsigned long smt_mode,unsigned long flags)2438 static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
2439 unsigned long flags)
2440 {
2441 int err;
2442 int esmt = 0;
2443
2444 if (flags)
2445 return -EINVAL;
2446 if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
2447 return -EINVAL;
2448 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2449 /*
2450 * On POWER8 (or POWER7), the threading mode is "strict",
2451 * so we pack smt_mode vcpus per vcore.
2452 */
2453 if (smt_mode > threads_per_subcore)
2454 return -EINVAL;
2455 } else {
2456 /*
2457 * On POWER9, the threading mode is "loose",
2458 * so each vcpu gets its own vcore.
2459 */
2460 esmt = smt_mode;
2461 smt_mode = 1;
2462 }
2463 mutex_lock(&kvm->lock);
2464 err = -EBUSY;
2465 if (!kvm->arch.online_vcores) {
2466 kvm->arch.smt_mode = smt_mode;
2467 kvm->arch.emul_smt_mode = esmt;
2468 err = 0;
2469 }
2470 mutex_unlock(&kvm->lock);
2471
2472 return err;
2473 }
2474
unpin_vpa(struct kvm * kvm,struct kvmppc_vpa * vpa)2475 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
2476 {
2477 if (vpa->pinned_addr)
2478 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
2479 vpa->dirty);
2480 }
2481
kvmppc_core_vcpu_free_hv(struct kvm_vcpu * vcpu)2482 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
2483 {
2484 spin_lock(&vcpu->arch.vpa_update_lock);
2485 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2486 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2487 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2488 spin_unlock(&vcpu->arch.vpa_update_lock);
2489 }
2490
kvmppc_core_check_requests_hv(struct kvm_vcpu * vcpu)2491 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
2492 {
2493 /* Indicate we want to get back into the guest */
2494 return 1;
2495 }
2496
kvmppc_set_timer(struct kvm_vcpu * vcpu)2497 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
2498 {
2499 unsigned long dec_nsec, now;
2500
2501 now = get_tb();
2502 if (now > vcpu->arch.dec_expires) {
2503 /* decrementer has already gone negative */
2504 kvmppc_core_queue_dec(vcpu);
2505 kvmppc_core_prepare_to_enter(vcpu);
2506 return;
2507 }
2508 dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
2509 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
2510 vcpu->arch.timer_running = 1;
2511 }
2512
2513 extern int __kvmppc_vcore_entry(void);
2514
kvmppc_remove_runnable(struct kvmppc_vcore * vc,struct kvm_vcpu * vcpu)2515 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
2516 struct kvm_vcpu *vcpu)
2517 {
2518 u64 now;
2519
2520 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2521 return;
2522 spin_lock_irq(&vcpu->arch.tbacct_lock);
2523 now = mftb();
2524 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2525 vcpu->arch.stolen_logged;
2526 vcpu->arch.busy_preempt = now;
2527 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2528 spin_unlock_irq(&vcpu->arch.tbacct_lock);
2529 --vc->n_runnable;
2530 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
2531 }
2532
kvmppc_grab_hwthread(int cpu)2533 static int kvmppc_grab_hwthread(int cpu)
2534 {
2535 struct paca_struct *tpaca;
2536 long timeout = 10000;
2537
2538 tpaca = paca_ptrs[cpu];
2539
2540 /* Ensure the thread won't go into the kernel if it wakes */
2541 tpaca->kvm_hstate.kvm_vcpu = NULL;
2542 tpaca->kvm_hstate.kvm_vcore = NULL;
2543 tpaca->kvm_hstate.napping = 0;
2544 smp_wmb();
2545 tpaca->kvm_hstate.hwthread_req = 1;
2546
2547 /*
2548 * If the thread is already executing in the kernel (e.g. handling
2549 * a stray interrupt), wait for it to get back to nap mode.
2550 * The smp_mb() is to ensure that our setting of hwthread_req
2551 * is visible before we look at hwthread_state, so if this
2552 * races with the code at system_reset_pSeries and the thread
2553 * misses our setting of hwthread_req, we are sure to see its
2554 * setting of hwthread_state, and vice versa.
2555 */
2556 smp_mb();
2557 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
2558 if (--timeout <= 0) {
2559 pr_err("KVM: couldn't grab cpu %d\n", cpu);
2560 return -EBUSY;
2561 }
2562 udelay(1);
2563 }
2564 return 0;
2565 }
2566
kvmppc_release_hwthread(int cpu)2567 static void kvmppc_release_hwthread(int cpu)
2568 {
2569 struct paca_struct *tpaca;
2570
2571 tpaca = paca_ptrs[cpu];
2572 tpaca->kvm_hstate.hwthread_req = 0;
2573 tpaca->kvm_hstate.kvm_vcpu = NULL;
2574 tpaca->kvm_hstate.kvm_vcore = NULL;
2575 tpaca->kvm_hstate.kvm_split_mode = NULL;
2576 }
2577
radix_flush_cpu(struct kvm * kvm,int cpu,struct kvm_vcpu * vcpu)2578 static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
2579 {
2580 struct kvm_nested_guest *nested = vcpu->arch.nested;
2581 cpumask_t *cpu_in_guest;
2582 int i;
2583
2584 cpu = cpu_first_tlb_thread_sibling(cpu);
2585 if (nested) {
2586 cpumask_set_cpu(cpu, &nested->need_tlb_flush);
2587 cpu_in_guest = &nested->cpu_in_guest;
2588 } else {
2589 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2590 cpu_in_guest = &kvm->arch.cpu_in_guest;
2591 }
2592 /*
2593 * Make sure setting of bit in need_tlb_flush precedes
2594 * testing of cpu_in_guest bits. The matching barrier on
2595 * the other side is the first smp_mb() in kvmppc_run_core().
2596 */
2597 smp_mb();
2598 for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
2599 i += cpu_tlb_thread_sibling_step())
2600 if (cpumask_test_cpu(i, cpu_in_guest))
2601 smp_call_function_single(i, do_nothing, NULL, 1);
2602 }
2603
kvmppc_prepare_radix_vcpu(struct kvm_vcpu * vcpu,int pcpu)2604 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
2605 {
2606 struct kvm_nested_guest *nested = vcpu->arch.nested;
2607 struct kvm *kvm = vcpu->kvm;
2608 int prev_cpu;
2609
2610 if (!cpu_has_feature(CPU_FTR_HVMODE))
2611 return;
2612
2613 if (nested)
2614 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
2615 else
2616 prev_cpu = vcpu->arch.prev_cpu;
2617
2618 /*
2619 * With radix, the guest can do TLB invalidations itself,
2620 * and it could choose to use the local form (tlbiel) if
2621 * it is invalidating a translation that has only ever been
2622 * used on one vcpu. However, that doesn't mean it has
2623 * only ever been used on one physical cpu, since vcpus
2624 * can move around between pcpus. To cope with this, when
2625 * a vcpu moves from one pcpu to another, we need to tell
2626 * any vcpus running on the same core as this vcpu previously
2627 * ran to flush the TLB. The TLB is shared between threads,
2628 * so we use a single bit in .need_tlb_flush for all 4 threads.
2629 */
2630 if (prev_cpu != pcpu) {
2631 if (prev_cpu >= 0 &&
2632 cpu_first_tlb_thread_sibling(prev_cpu) !=
2633 cpu_first_tlb_thread_sibling(pcpu))
2634 radix_flush_cpu(kvm, prev_cpu, vcpu);
2635 if (nested)
2636 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
2637 else
2638 vcpu->arch.prev_cpu = pcpu;
2639 }
2640 }
2641
kvmppc_start_thread(struct kvm_vcpu * vcpu,struct kvmppc_vcore * vc)2642 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
2643 {
2644 int cpu;
2645 struct paca_struct *tpaca;
2646 struct kvm *kvm = vc->kvm;
2647
2648 cpu = vc->pcpu;
2649 if (vcpu) {
2650 if (vcpu->arch.timer_running) {
2651 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2652 vcpu->arch.timer_running = 0;
2653 }
2654 cpu += vcpu->arch.ptid;
2655 vcpu->cpu = vc->pcpu;
2656 vcpu->arch.thread_cpu = cpu;
2657 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
2658 }
2659 tpaca = paca_ptrs[cpu];
2660 tpaca->kvm_hstate.kvm_vcpu = vcpu;
2661 tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
2662 tpaca->kvm_hstate.fake_suspend = 0;
2663 /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
2664 smp_wmb();
2665 tpaca->kvm_hstate.kvm_vcore = vc;
2666 if (cpu != smp_processor_id())
2667 kvmppc_ipi_thread(cpu);
2668 }
2669
kvmppc_wait_for_nap(int n_threads)2670 static void kvmppc_wait_for_nap(int n_threads)
2671 {
2672 int cpu = smp_processor_id();
2673 int i, loops;
2674
2675 if (n_threads <= 1)
2676 return;
2677 for (loops = 0; loops < 1000000; ++loops) {
2678 /*
2679 * Check if all threads are finished.
2680 * We set the vcore pointer when starting a thread
2681 * and the thread clears it when finished, so we look
2682 * for any threads that still have a non-NULL vcore ptr.
2683 */
2684 for (i = 1; i < n_threads; ++i)
2685 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
2686 break;
2687 if (i == n_threads) {
2688 HMT_medium();
2689 return;
2690 }
2691 HMT_low();
2692 }
2693 HMT_medium();
2694 for (i = 1; i < n_threads; ++i)
2695 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
2696 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
2697 }
2698
2699 /*
2700 * Check that we are on thread 0 and that any other threads in
2701 * this core are off-line. Then grab the threads so they can't
2702 * enter the kernel.
2703 */
on_primary_thread(void)2704 static int on_primary_thread(void)
2705 {
2706 int cpu = smp_processor_id();
2707 int thr;
2708
2709 /* Are we on a primary subcore? */
2710 if (cpu_thread_in_subcore(cpu))
2711 return 0;
2712
2713 thr = 0;
2714 while (++thr < threads_per_subcore)
2715 if (cpu_online(cpu + thr))
2716 return 0;
2717
2718 /* Grab all hw threads so they can't go into the kernel */
2719 for (thr = 1; thr < threads_per_subcore; ++thr) {
2720 if (kvmppc_grab_hwthread(cpu + thr)) {
2721 /* Couldn't grab one; let the others go */
2722 do {
2723 kvmppc_release_hwthread(cpu + thr);
2724 } while (--thr > 0);
2725 return 0;
2726 }
2727 }
2728 return 1;
2729 }
2730
2731 /*
2732 * A list of virtual cores for each physical CPU.
2733 * These are vcores that could run but their runner VCPU tasks are
2734 * (or may be) preempted.
2735 */
2736 struct preempted_vcore_list {
2737 struct list_head list;
2738 spinlock_t lock;
2739 };
2740
2741 static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
2742
init_vcore_lists(void)2743 static void init_vcore_lists(void)
2744 {
2745 int cpu;
2746
2747 for_each_possible_cpu(cpu) {
2748 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
2749 spin_lock_init(&lp->lock);
2750 INIT_LIST_HEAD(&lp->list);
2751 }
2752 }
2753
kvmppc_vcore_preempt(struct kvmppc_vcore * vc)2754 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
2755 {
2756 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2757
2758 vc->vcore_state = VCORE_PREEMPT;
2759 vc->pcpu = smp_processor_id();
2760 if (vc->num_threads < threads_per_vcore(vc->kvm)) {
2761 spin_lock(&lp->lock);
2762 list_add_tail(&vc->preempt_list, &lp->list);
2763 spin_unlock(&lp->lock);
2764 }
2765
2766 /* Start accumulating stolen time */
2767 kvmppc_core_start_stolen(vc);
2768 }
2769
kvmppc_vcore_end_preempt(struct kvmppc_vcore * vc)2770 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
2771 {
2772 struct preempted_vcore_list *lp;
2773
2774 kvmppc_core_end_stolen(vc);
2775 if (!list_empty(&vc->preempt_list)) {
2776 lp = &per_cpu(preempted_vcores, vc->pcpu);
2777 spin_lock(&lp->lock);
2778 list_del_init(&vc->preempt_list);
2779 spin_unlock(&lp->lock);
2780 }
2781 vc->vcore_state = VCORE_INACTIVE;
2782 }
2783
2784 /*
2785 * This stores information about the virtual cores currently
2786 * assigned to a physical core.
2787 */
2788 struct core_info {
2789 int n_subcores;
2790 int max_subcore_threads;
2791 int total_threads;
2792 int subcore_threads[MAX_SUBCORES];
2793 struct kvmppc_vcore *vc[MAX_SUBCORES];
2794 };
2795
2796 /*
2797 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2798 * respectively in 2-way micro-threading (split-core) mode on POWER8.
2799 */
2800 static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2801
init_core_info(struct core_info * cip,struct kvmppc_vcore * vc)2802 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2803 {
2804 memset(cip, 0, sizeof(*cip));
2805 cip->n_subcores = 1;
2806 cip->max_subcore_threads = vc->num_threads;
2807 cip->total_threads = vc->num_threads;
2808 cip->subcore_threads[0] = vc->num_threads;
2809 cip->vc[0] = vc;
2810 }
2811
subcore_config_ok(int n_subcores,int n_threads)2812 static bool subcore_config_ok(int n_subcores, int n_threads)
2813 {
2814 /*
2815 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way
2816 * split-core mode, with one thread per subcore.
2817 */
2818 if (cpu_has_feature(CPU_FTR_ARCH_300))
2819 return n_subcores <= 4 && n_threads == 1;
2820
2821 /* On POWER8, can only dynamically split if unsplit to begin with */
2822 if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2823 return false;
2824 if (n_subcores > MAX_SUBCORES)
2825 return false;
2826 if (n_subcores > 1) {
2827 if (!(dynamic_mt_modes & 2))
2828 n_subcores = 4;
2829 if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2830 return false;
2831 }
2832
2833 return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
2834 }
2835
init_vcore_to_run(struct kvmppc_vcore * vc)2836 static void init_vcore_to_run(struct kvmppc_vcore *vc)
2837 {
2838 vc->entry_exit_map = 0;
2839 vc->in_guest = 0;
2840 vc->napping_threads = 0;
2841 vc->conferring_threads = 0;
2842 vc->tb_offset_applied = 0;
2843 }
2844
can_dynamic_split(struct kvmppc_vcore * vc,struct core_info * cip)2845 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2846 {
2847 int n_threads = vc->num_threads;
2848 int sub;
2849
2850 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2851 return false;
2852
2853 /* In one_vm_per_core mode, require all vcores to be from the same vm */
2854 if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
2855 return false;
2856
2857 /* Some POWER9 chips require all threads to be in the same MMU mode */
2858 if (no_mixing_hpt_and_radix &&
2859 kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
2860 return false;
2861
2862 if (n_threads < cip->max_subcore_threads)
2863 n_threads = cip->max_subcore_threads;
2864 if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
2865 return false;
2866 cip->max_subcore_threads = n_threads;
2867
2868 sub = cip->n_subcores;
2869 ++cip->n_subcores;
2870 cip->total_threads += vc->num_threads;
2871 cip->subcore_threads[sub] = vc->num_threads;
2872 cip->vc[sub] = vc;
2873 init_vcore_to_run(vc);
2874 list_del_init(&vc->preempt_list);
2875
2876 return true;
2877 }
2878
2879 /*
2880 * Work out whether it is possible to piggyback the execution of
2881 * vcore *pvc onto the execution of the other vcores described in *cip.
2882 */
can_piggyback(struct kvmppc_vcore * pvc,struct core_info * cip,int target_threads)2883 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2884 int target_threads)
2885 {
2886 if (cip->total_threads + pvc->num_threads > target_threads)
2887 return false;
2888
2889 return can_dynamic_split(pvc, cip);
2890 }
2891
prepare_threads(struct kvmppc_vcore * vc)2892 static void prepare_threads(struct kvmppc_vcore *vc)
2893 {
2894 int i;
2895 struct kvm_vcpu *vcpu;
2896
2897 for_each_runnable_thread(i, vcpu, vc) {
2898 if (signal_pending(vcpu->arch.run_task))
2899 vcpu->arch.ret = -EINTR;
2900 else if (vcpu->arch.vpa.update_pending ||
2901 vcpu->arch.slb_shadow.update_pending ||
2902 vcpu->arch.dtl.update_pending)
2903 vcpu->arch.ret = RESUME_GUEST;
2904 else
2905 continue;
2906 kvmppc_remove_runnable(vc, vcpu);
2907 wake_up(&vcpu->arch.cpu_run);
2908 }
2909 }
2910
collect_piggybacks(struct core_info * cip,int target_threads)2911 static void collect_piggybacks(struct core_info *cip, int target_threads)
2912 {
2913 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2914 struct kvmppc_vcore *pvc, *vcnext;
2915
2916 spin_lock(&lp->lock);
2917 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2918 if (!spin_trylock(&pvc->lock))
2919 continue;
2920 prepare_threads(pvc);
2921 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
2922 list_del_init(&pvc->preempt_list);
2923 if (pvc->runner == NULL) {
2924 pvc->vcore_state = VCORE_INACTIVE;
2925 kvmppc_core_end_stolen(pvc);
2926 }
2927 spin_unlock(&pvc->lock);
2928 continue;
2929 }
2930 if (!can_piggyback(pvc, cip, target_threads)) {
2931 spin_unlock(&pvc->lock);
2932 continue;
2933 }
2934 kvmppc_core_end_stolen(pvc);
2935 pvc->vcore_state = VCORE_PIGGYBACK;
2936 if (cip->total_threads >= target_threads)
2937 break;
2938 }
2939 spin_unlock(&lp->lock);
2940 }
2941
recheck_signals_and_mmu(struct core_info * cip)2942 static bool recheck_signals_and_mmu(struct core_info *cip)
2943 {
2944 int sub, i;
2945 struct kvm_vcpu *vcpu;
2946 struct kvmppc_vcore *vc;
2947
2948 for (sub = 0; sub < cip->n_subcores; ++sub) {
2949 vc = cip->vc[sub];
2950 if (!vc->kvm->arch.mmu_ready)
2951 return true;
2952 for_each_runnable_thread(i, vcpu, vc)
2953 if (signal_pending(vcpu->arch.run_task))
2954 return true;
2955 }
2956 return false;
2957 }
2958
post_guest_process(struct kvmppc_vcore * vc,bool is_master)2959 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
2960 {
2961 int still_running = 0, i;
2962 u64 now;
2963 long ret;
2964 struct kvm_vcpu *vcpu;
2965
2966 spin_lock(&vc->lock);
2967 now = get_tb();
2968 for_each_runnable_thread(i, vcpu, vc) {
2969 /*
2970 * It's safe to unlock the vcore in the loop here, because
2971 * for_each_runnable_thread() is safe against removal of
2972 * the vcpu, and the vcore state is VCORE_EXITING here,
2973 * so any vcpus becoming runnable will have their arch.trap
2974 * set to zero and can't actually run in the guest.
2975 */
2976 spin_unlock(&vc->lock);
2977 /* cancel pending dec exception if dec is positive */
2978 if (now < vcpu->arch.dec_expires &&
2979 kvmppc_core_pending_dec(vcpu))
2980 kvmppc_core_dequeue_dec(vcpu);
2981
2982 trace_kvm_guest_exit(vcpu);
2983
2984 ret = RESUME_GUEST;
2985 if (vcpu->arch.trap)
2986 ret = kvmppc_handle_exit_hv(vcpu,
2987 vcpu->arch.run_task);
2988
2989 vcpu->arch.ret = ret;
2990 vcpu->arch.trap = 0;
2991
2992 spin_lock(&vc->lock);
2993 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2994 if (vcpu->arch.pending_exceptions)
2995 kvmppc_core_prepare_to_enter(vcpu);
2996 if (vcpu->arch.ceded)
2997 kvmppc_set_timer(vcpu);
2998 else
2999 ++still_running;
3000 } else {
3001 kvmppc_remove_runnable(vc, vcpu);
3002 wake_up(&vcpu->arch.cpu_run);
3003 }
3004 }
3005 if (!is_master) {
3006 if (still_running > 0) {
3007 kvmppc_vcore_preempt(vc);
3008 } else if (vc->runner) {
3009 vc->vcore_state = VCORE_PREEMPT;
3010 kvmppc_core_start_stolen(vc);
3011 } else {
3012 vc->vcore_state = VCORE_INACTIVE;
3013 }
3014 if (vc->n_runnable > 0 && vc->runner == NULL) {
3015 /* make sure there's a candidate runner awake */
3016 i = -1;
3017 vcpu = next_runnable_thread(vc, &i);
3018 wake_up(&vcpu->arch.cpu_run);
3019 }
3020 }
3021 spin_unlock(&vc->lock);
3022 }
3023
3024 /*
3025 * Clear core from the list of active host cores as we are about to
3026 * enter the guest. Only do this if it is the primary thread of the
3027 * core (not if a subcore) that is entering the guest.
3028 */
kvmppc_clear_host_core(unsigned int cpu)3029 static inline int kvmppc_clear_host_core(unsigned int cpu)
3030 {
3031 int core;
3032
3033 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3034 return 0;
3035 /*
3036 * Memory barrier can be omitted here as we will do a smp_wmb()
3037 * later in kvmppc_start_thread and we need ensure that state is
3038 * visible to other CPUs only after we enter guest.
3039 */
3040 core = cpu >> threads_shift;
3041 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
3042 return 0;
3043 }
3044
3045 /*
3046 * Advertise this core as an active host core since we exited the guest
3047 * Only need to do this if it is the primary thread of the core that is
3048 * exiting.
3049 */
kvmppc_set_host_core(unsigned int cpu)3050 static inline int kvmppc_set_host_core(unsigned int cpu)
3051 {
3052 int core;
3053
3054 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3055 return 0;
3056
3057 /*
3058 * Memory barrier can be omitted here because we do a spin_unlock
3059 * immediately after this which provides the memory barrier.
3060 */
3061 core = cpu >> threads_shift;
3062 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
3063 return 0;
3064 }
3065
set_irq_happened(int trap)3066 static void set_irq_happened(int trap)
3067 {
3068 switch (trap) {
3069 case BOOK3S_INTERRUPT_EXTERNAL:
3070 local_paca->irq_happened |= PACA_IRQ_EE;
3071 break;
3072 case BOOK3S_INTERRUPT_H_DOORBELL:
3073 local_paca->irq_happened |= PACA_IRQ_DBELL;
3074 break;
3075 case BOOK3S_INTERRUPT_HMI:
3076 local_paca->irq_happened |= PACA_IRQ_HMI;
3077 break;
3078 case BOOK3S_INTERRUPT_SYSTEM_RESET:
3079 replay_system_reset();
3080 break;
3081 }
3082 }
3083
3084 /*
3085 * Run a set of guest threads on a physical core.
3086 * Called with vc->lock held.
3087 */
kvmppc_run_core(struct kvmppc_vcore * vc)3088 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3089 {
3090 struct kvm_vcpu *vcpu;
3091 int i;
3092 int srcu_idx;
3093 struct core_info core_info;
3094 struct kvmppc_vcore *pvc;
3095 struct kvm_split_mode split_info, *sip;
3096 int split, subcore_size, active;
3097 int sub;
3098 bool thr0_done;
3099 unsigned long cmd_bit, stat_bit;
3100 int pcpu, thr;
3101 int target_threads;
3102 int controlled_threads;
3103 int trap;
3104 bool is_power8;
3105 bool hpt_on_radix;
3106
3107 /*
3108 * Remove from the list any threads that have a signal pending
3109 * or need a VPA update done
3110 */
3111 prepare_threads(vc);
3112
3113 /* if the runner is no longer runnable, let the caller pick a new one */
3114 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3115 return;
3116
3117 /*
3118 * Initialize *vc.
3119 */
3120 init_vcore_to_run(vc);
3121 vc->preempt_tb = TB_NIL;
3122
3123 /*
3124 * Number of threads that we will be controlling: the same as
3125 * the number of threads per subcore, except on POWER9,
3126 * where it's 1 because the threads are (mostly) independent.
3127 */
3128 controlled_threads = threads_per_vcore(vc->kvm);
3129
3130 /*
3131 * Make sure we are running on primary threads, and that secondary
3132 * threads are offline. Also check if the number of threads in this
3133 * guest are greater than the current system threads per guest.
3134 * On POWER9, we need to be not in independent-threads mode if
3135 * this is a HPT guest on a radix host machine where the
3136 * CPU threads may not be in different MMU modes.
3137 */
3138 hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() &&
3139 !kvm_is_radix(vc->kvm);
3140 if (((controlled_threads > 1) &&
3141 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
3142 (hpt_on_radix && vc->kvm->arch.threads_indep)) {
3143 for_each_runnable_thread(i, vcpu, vc) {
3144 vcpu->arch.ret = -EBUSY;
3145 kvmppc_remove_runnable(vc, vcpu);
3146 wake_up(&vcpu->arch.cpu_run);
3147 }
3148 goto out;
3149 }
3150
3151 /*
3152 * See if we could run any other vcores on the physical core
3153 * along with this one.
3154 */
3155 init_core_info(&core_info, vc);
3156 pcpu = smp_processor_id();
3157 target_threads = controlled_threads;
3158 if (target_smt_mode && target_smt_mode < target_threads)
3159 target_threads = target_smt_mode;
3160 if (vc->num_threads < target_threads)
3161 collect_piggybacks(&core_info, target_threads);
3162
3163 /*
3164 * On radix, arrange for TLB flushing if necessary.
3165 * This has to be done before disabling interrupts since
3166 * it uses smp_call_function().
3167 */
3168 pcpu = smp_processor_id();
3169 if (kvm_is_radix(vc->kvm)) {
3170 for (sub = 0; sub < core_info.n_subcores; ++sub)
3171 for_each_runnable_thread(i, vcpu, core_info.vc[sub])
3172 kvmppc_prepare_radix_vcpu(vcpu, pcpu);
3173 }
3174
3175 /*
3176 * Hard-disable interrupts, and check resched flag and signals.
3177 * If we need to reschedule or deliver a signal, clean up
3178 * and return without going into the guest(s).
3179 * If the mmu_ready flag has been cleared, don't go into the
3180 * guest because that means a HPT resize operation is in progress.
3181 */
3182 local_irq_disable();
3183 hard_irq_disable();
3184 if (lazy_irq_pending() || need_resched() ||
3185 recheck_signals_and_mmu(&core_info)) {
3186 local_irq_enable();
3187 vc->vcore_state = VCORE_INACTIVE;
3188 /* Unlock all except the primary vcore */
3189 for (sub = 1; sub < core_info.n_subcores; ++sub) {
3190 pvc = core_info.vc[sub];
3191 /* Put back on to the preempted vcores list */
3192 kvmppc_vcore_preempt(pvc);
3193 spin_unlock(&pvc->lock);
3194 }
3195 for (i = 0; i < controlled_threads; ++i)
3196 kvmppc_release_hwthread(pcpu + i);
3197 return;
3198 }
3199
3200 kvmppc_clear_host_core(pcpu);
3201
3202 /* Decide on micro-threading (split-core) mode */
3203 subcore_size = threads_per_subcore;
3204 cmd_bit = stat_bit = 0;
3205 split = core_info.n_subcores;
3206 sip = NULL;
3207 is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
3208 && !cpu_has_feature(CPU_FTR_ARCH_300);
3209
3210 if (split > 1 || hpt_on_radix) {
3211 sip = &split_info;
3212 memset(&split_info, 0, sizeof(split_info));
3213 for (sub = 0; sub < core_info.n_subcores; ++sub)
3214 split_info.vc[sub] = core_info.vc[sub];
3215
3216 if (is_power8) {
3217 if (split == 2 && (dynamic_mt_modes & 2)) {
3218 cmd_bit = HID0_POWER8_1TO2LPAR;
3219 stat_bit = HID0_POWER8_2LPARMODE;
3220 } else {
3221 split = 4;
3222 cmd_bit = HID0_POWER8_1TO4LPAR;
3223 stat_bit = HID0_POWER8_4LPARMODE;
3224 }
3225 subcore_size = MAX_SMT_THREADS / split;
3226 split_info.rpr = mfspr(SPRN_RPR);
3227 split_info.pmmar = mfspr(SPRN_PMMAR);
3228 split_info.ldbar = mfspr(SPRN_LDBAR);
3229 split_info.subcore_size = subcore_size;
3230 } else {
3231 split_info.subcore_size = 1;
3232 if (hpt_on_radix) {
3233 /* Use the split_info for LPCR/LPIDR changes */
3234 split_info.lpcr_req = vc->lpcr;
3235 split_info.lpidr_req = vc->kvm->arch.lpid;
3236 split_info.host_lpcr = vc->kvm->arch.host_lpcr;
3237 split_info.do_set = 1;
3238 }
3239 }
3240
3241 /* order writes to split_info before kvm_split_mode pointer */
3242 smp_wmb();
3243 }
3244
3245 for (thr = 0; thr < controlled_threads; ++thr) {
3246 struct paca_struct *paca = paca_ptrs[pcpu + thr];
3247
3248 paca->kvm_hstate.tid = thr;
3249 paca->kvm_hstate.napping = 0;
3250 paca->kvm_hstate.kvm_split_mode = sip;
3251 }
3252
3253 /* Initiate micro-threading (split-core) on POWER8 if required */
3254 if (cmd_bit) {
3255 unsigned long hid0 = mfspr(SPRN_HID0);
3256
3257 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
3258 mb();
3259 mtspr(SPRN_HID0, hid0);
3260 isync();
3261 for (;;) {
3262 hid0 = mfspr(SPRN_HID0);
3263 if (hid0 & stat_bit)
3264 break;
3265 cpu_relax();
3266 }
3267 }
3268
3269 /*
3270 * On POWER8, set RWMR register.
3271 * Since it only affects PURR and SPURR, it doesn't affect
3272 * the host, so we don't save/restore the host value.
3273 */
3274 if (is_power8) {
3275 unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
3276 int n_online = atomic_read(&vc->online_count);
3277
3278 /*
3279 * Use the 8-thread value if we're doing split-core
3280 * or if the vcore's online count looks bogus.
3281 */
3282 if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
3283 n_online >= 1 && n_online <= MAX_SMT_THREADS)
3284 rwmr_val = p8_rwmr_values[n_online];
3285 mtspr(SPRN_RWMR, rwmr_val);
3286 }
3287
3288 /* Start all the threads */
3289 active = 0;
3290 for (sub = 0; sub < core_info.n_subcores; ++sub) {
3291 thr = is_power8 ? subcore_thread_map[sub] : sub;
3292 thr0_done = false;
3293 active |= 1 << thr;
3294 pvc = core_info.vc[sub];
3295 pvc->pcpu = pcpu + thr;
3296 for_each_runnable_thread(i, vcpu, pvc) {
3297 kvmppc_start_thread(vcpu, pvc);
3298 kvmppc_create_dtl_entry(vcpu, pvc);
3299 trace_kvm_guest_enter(vcpu);
3300 if (!vcpu->arch.ptid)
3301 thr0_done = true;
3302 active |= 1 << (thr + vcpu->arch.ptid);
3303 }
3304 /*
3305 * We need to start the first thread of each subcore
3306 * even if it doesn't have a vcpu.
3307 */
3308 if (!thr0_done)
3309 kvmppc_start_thread(NULL, pvc);
3310 }
3311
3312 /*
3313 * Ensure that split_info.do_nap is set after setting
3314 * the vcore pointer in the PACA of the secondaries.
3315 */
3316 smp_mb();
3317
3318 /*
3319 * When doing micro-threading, poke the inactive threads as well.
3320 * This gets them to the nap instruction after kvm_do_nap,
3321 * which reduces the time taken to unsplit later.
3322 * For POWER9 HPT guest on radix host, we need all the secondary
3323 * threads woken up so they can do the LPCR/LPIDR change.
3324 */
3325 if (cmd_bit || hpt_on_radix) {
3326 split_info.do_nap = 1; /* ask secondaries to nap when done */
3327 for (thr = 1; thr < threads_per_subcore; ++thr)
3328 if (!(active & (1 << thr)))
3329 kvmppc_ipi_thread(pcpu + thr);
3330 }
3331
3332 vc->vcore_state = VCORE_RUNNING;
3333 preempt_disable();
3334
3335 trace_kvmppc_run_core(vc, 0);
3336
3337 for (sub = 0; sub < core_info.n_subcores; ++sub)
3338 spin_unlock(&core_info.vc[sub]->lock);
3339
3340 guest_enter_irqoff();
3341
3342 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
3343
3344 this_cpu_disable_ftrace();
3345
3346 /*
3347 * Interrupts will be enabled once we get into the guest,
3348 * so tell lockdep that we're about to enable interrupts.
3349 */
3350 trace_hardirqs_on();
3351
3352 trap = __kvmppc_vcore_entry();
3353
3354 trace_hardirqs_off();
3355
3356 this_cpu_enable_ftrace();
3357
3358 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
3359
3360 set_irq_happened(trap);
3361
3362 spin_lock(&vc->lock);
3363 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
3364 vc->vcore_state = VCORE_EXITING;
3365
3366 /* wait for secondary threads to finish writing their state to memory */
3367 kvmppc_wait_for_nap(controlled_threads);
3368
3369 /* Return to whole-core mode if we split the core earlier */
3370 if (cmd_bit) {
3371 unsigned long hid0 = mfspr(SPRN_HID0);
3372 unsigned long loops = 0;
3373
3374 hid0 &= ~HID0_POWER8_DYNLPARDIS;
3375 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
3376 mb();
3377 mtspr(SPRN_HID0, hid0);
3378 isync();
3379 for (;;) {
3380 hid0 = mfspr(SPRN_HID0);
3381 if (!(hid0 & stat_bit))
3382 break;
3383 cpu_relax();
3384 ++loops;
3385 }
3386 } else if (hpt_on_radix) {
3387 /* Wait for all threads to have seen final sync */
3388 for (thr = 1; thr < controlled_threads; ++thr) {
3389 struct paca_struct *paca = paca_ptrs[pcpu + thr];
3390
3391 while (paca->kvm_hstate.kvm_split_mode) {
3392 HMT_low();
3393 barrier();
3394 }
3395 HMT_medium();
3396 }
3397 }
3398 split_info.do_nap = 0;
3399
3400 kvmppc_set_host_core(pcpu);
3401
3402 context_tracking_guest_exit();
3403 if (!vtime_accounting_enabled_this_cpu()) {
3404 local_irq_enable();
3405 /*
3406 * Service IRQs here before vtime_account_guest_exit() so any
3407 * ticks that occurred while running the guest are accounted to
3408 * the guest. If vtime accounting is enabled, accounting uses
3409 * TB rather than ticks, so it can be done without enabling
3410 * interrupts here, which has the problem that it accounts
3411 * interrupt processing overhead to the host.
3412 */
3413 local_irq_disable();
3414 }
3415 vtime_account_guest_exit();
3416
3417 local_irq_enable();
3418
3419 /* Let secondaries go back to the offline loop */
3420 for (i = 0; i < controlled_threads; ++i) {
3421 kvmppc_release_hwthread(pcpu + i);
3422 if (sip && sip->napped[i])
3423 kvmppc_ipi_thread(pcpu + i);
3424 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
3425 }
3426
3427 spin_unlock(&vc->lock);
3428
3429 /* make sure updates to secondary vcpu structs are visible now */
3430 smp_mb();
3431
3432 preempt_enable();
3433
3434 for (sub = 0; sub < core_info.n_subcores; ++sub) {
3435 pvc = core_info.vc[sub];
3436 post_guest_process(pvc, pvc == vc);
3437 }
3438
3439 spin_lock(&vc->lock);
3440
3441 out:
3442 vc->vcore_state = VCORE_INACTIVE;
3443 trace_kvmppc_run_core(vc, 1);
3444 }
3445
3446 /*
3447 * Load up hypervisor-mode registers on P9.
3448 */
kvmhv_load_hv_regs_and_go(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)3449 static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
3450 unsigned long lpcr)
3451 {
3452 struct kvmppc_vcore *vc = vcpu->arch.vcore;
3453 s64 hdec;
3454 u64 tb, purr, spurr;
3455 int trap;
3456 unsigned long host_hfscr = mfspr(SPRN_HFSCR);
3457 unsigned long host_ciabr = mfspr(SPRN_CIABR);
3458 unsigned long host_dawr = mfspr(SPRN_DAWR0);
3459 unsigned long host_dawrx = mfspr(SPRN_DAWRX0);
3460 unsigned long host_psscr = mfspr(SPRN_PSSCR);
3461 unsigned long host_pidr = mfspr(SPRN_PID);
3462
3463 /*
3464 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
3465 * so set HDICE before writing HDEC.
3466 */
3467 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
3468 isync();
3469
3470 hdec = time_limit - mftb();
3471 if (hdec < 0) {
3472 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3473 isync();
3474 return BOOK3S_INTERRUPT_HV_DECREMENTER;
3475 }
3476 mtspr(SPRN_HDEC, hdec);
3477
3478 if (vc->tb_offset) {
3479 u64 new_tb = mftb() + vc->tb_offset;
3480 mtspr(SPRN_TBU40, new_tb);
3481 tb = mftb();
3482 if ((tb & 0xffffff) < (new_tb & 0xffffff))
3483 mtspr(SPRN_TBU40, new_tb + 0x1000000);
3484 vc->tb_offset_applied = vc->tb_offset;
3485 }
3486
3487 if (vc->pcr)
3488 mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
3489 mtspr(SPRN_DPDES, vc->dpdes);
3490 mtspr(SPRN_VTB, vc->vtb);
3491
3492 local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
3493 local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
3494 mtspr(SPRN_PURR, vcpu->arch.purr);
3495 mtspr(SPRN_SPURR, vcpu->arch.spurr);
3496
3497 if (dawr_enabled()) {
3498 mtspr(SPRN_DAWR0, vcpu->arch.dawr);
3499 mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
3500 }
3501 mtspr(SPRN_CIABR, vcpu->arch.ciabr);
3502 mtspr(SPRN_IC, vcpu->arch.ic);
3503 mtspr(SPRN_PID, vcpu->arch.pid);
3504
3505 mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
3506 (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
3507
3508 mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
3509
3510 mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
3511 mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
3512 mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
3513 mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
3514
3515 mtspr(SPRN_AMOR, ~0UL);
3516
3517 mtspr(SPRN_LPCR, lpcr);
3518 isync();
3519
3520 kvmppc_xive_push_vcpu(vcpu);
3521
3522 mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
3523 mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
3524
3525 trap = __kvmhv_vcpu_entry_p9(vcpu);
3526
3527 /* Advance host PURR/SPURR by the amount used by guest */
3528 purr = mfspr(SPRN_PURR);
3529 spurr = mfspr(SPRN_SPURR);
3530 mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
3531 purr - vcpu->arch.purr);
3532 mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
3533 spurr - vcpu->arch.spurr);
3534 vcpu->arch.purr = purr;
3535 vcpu->arch.spurr = spurr;
3536
3537 vcpu->arch.ic = mfspr(SPRN_IC);
3538 vcpu->arch.pid = mfspr(SPRN_PID);
3539 vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
3540
3541 vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
3542 vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
3543 vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
3544 vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
3545
3546 /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
3547 mtspr(SPRN_PSSCR, host_psscr |
3548 (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
3549 mtspr(SPRN_HFSCR, host_hfscr);
3550 mtspr(SPRN_CIABR, host_ciabr);
3551 mtspr(SPRN_DAWR0, host_dawr);
3552 mtspr(SPRN_DAWRX0, host_dawrx);
3553 mtspr(SPRN_PID, host_pidr);
3554
3555 /*
3556 * Since this is radix, do a eieio; tlbsync; ptesync sequence in
3557 * case we interrupted the guest between a tlbie and a ptesync.
3558 */
3559 asm volatile("eieio; tlbsync; ptesync");
3560
3561 /*
3562 * cp_abort is required if the processor supports local copy-paste
3563 * to clear the copy buffer that was under control of the guest.
3564 */
3565 if (cpu_has_feature(CPU_FTR_ARCH_31))
3566 asm volatile(PPC_CP_ABORT);
3567
3568 mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
3569 isync();
3570
3571 vc->dpdes = mfspr(SPRN_DPDES);
3572 vc->vtb = mfspr(SPRN_VTB);
3573 mtspr(SPRN_DPDES, 0);
3574 if (vc->pcr)
3575 mtspr(SPRN_PCR, PCR_MASK);
3576
3577 if (vc->tb_offset_applied) {
3578 u64 new_tb = mftb() - vc->tb_offset_applied;
3579 mtspr(SPRN_TBU40, new_tb);
3580 tb = mftb();
3581 if ((tb & 0xffffff) < (new_tb & 0xffffff))
3582 mtspr(SPRN_TBU40, new_tb + 0x1000000);
3583 vc->tb_offset_applied = 0;
3584 }
3585
3586 mtspr(SPRN_HDEC, 0x7fffffff);
3587 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3588
3589 return trap;
3590 }
3591
3592 /*
3593 * Virtual-mode guest entry for POWER9 and later when the host and
3594 * guest are both using the radix MMU. The LPIDR has already been set.
3595 */
kvmhv_p9_guest_entry(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)3596 static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3597 unsigned long lpcr)
3598 {
3599 struct kvmppc_vcore *vc = vcpu->arch.vcore;
3600 unsigned long host_dscr = mfspr(SPRN_DSCR);
3601 unsigned long host_tidr = mfspr(SPRN_TIDR);
3602 unsigned long host_iamr = mfspr(SPRN_IAMR);
3603 unsigned long host_amr = mfspr(SPRN_AMR);
3604 unsigned long host_fscr = mfspr(SPRN_FSCR);
3605 s64 dec;
3606 u64 tb;
3607 int trap, save_pmu;
3608
3609 dec = mfspr(SPRN_DEC);
3610 tb = mftb();
3611 if (dec < 0)
3612 return BOOK3S_INTERRUPT_HV_DECREMENTER;
3613 local_paca->kvm_hstate.dec_expires = dec + tb;
3614 if (local_paca->kvm_hstate.dec_expires < time_limit)
3615 time_limit = local_paca->kvm_hstate.dec_expires;
3616
3617 vcpu->arch.ceded = 0;
3618
3619 kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
3620
3621 kvmppc_subcore_enter_guest();
3622
3623 vc->entry_exit_map = 1;
3624 vc->in_guest = 1;
3625
3626 if (vcpu->arch.vpa.pinned_addr) {
3627 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3628 u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3629 lp->yield_count = cpu_to_be32(yield_count);
3630 vcpu->arch.vpa.dirty = 1;
3631 }
3632
3633 if (cpu_has_feature(CPU_FTR_TM) ||
3634 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3635 kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3636
3637 #ifdef CONFIG_PPC_PSERIES
3638 if (kvmhv_on_pseries()) {
3639 barrier();
3640 if (vcpu->arch.vpa.pinned_addr) {
3641 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3642 get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
3643 } else {
3644 get_lppaca()->pmcregs_in_use = 1;
3645 }
3646 barrier();
3647 }
3648 #endif
3649 kvmhv_load_guest_pmu(vcpu);
3650
3651 msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3652 load_fp_state(&vcpu->arch.fp);
3653 #ifdef CONFIG_ALTIVEC
3654 load_vr_state(&vcpu->arch.vr);
3655 #endif
3656 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
3657
3658 mtspr(SPRN_DSCR, vcpu->arch.dscr);
3659 mtspr(SPRN_IAMR, vcpu->arch.iamr);
3660 mtspr(SPRN_PSPB, vcpu->arch.pspb);
3661 mtspr(SPRN_FSCR, vcpu->arch.fscr);
3662 mtspr(SPRN_TAR, vcpu->arch.tar);
3663 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
3664 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
3665 mtspr(SPRN_BESCR, vcpu->arch.bescr);
3666 mtspr(SPRN_WORT, vcpu->arch.wort);
3667 mtspr(SPRN_TIDR, vcpu->arch.tid);
3668 mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
3669 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
3670 mtspr(SPRN_AMR, vcpu->arch.amr);
3671 mtspr(SPRN_UAMOR, vcpu->arch.uamor);
3672
3673 if (!(vcpu->arch.ctrl & 1))
3674 mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
3675
3676 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
3677
3678 if (kvmhv_on_pseries()) {
3679 /*
3680 * We need to save and restore the guest visible part of the
3681 * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
3682 * doesn't do this for us. Note only required if pseries since
3683 * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
3684 */
3685 unsigned long host_psscr;
3686 /* call our hypervisor to load up HV regs and go */
3687 struct hv_guest_state hvregs;
3688
3689 host_psscr = mfspr(SPRN_PSSCR_PR);
3690 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
3691 kvmhv_save_hv_regs(vcpu, &hvregs);
3692 hvregs.lpcr = lpcr;
3693 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
3694 hvregs.version = HV_GUEST_STATE_VERSION;
3695 if (vcpu->arch.nested) {
3696 hvregs.lpid = vcpu->arch.nested->shadow_lpid;
3697 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
3698 } else {
3699 hvregs.lpid = vcpu->kvm->arch.lpid;
3700 hvregs.vcpu_token = vcpu->vcpu_id;
3701 }
3702 hvregs.hdec_expiry = time_limit;
3703 trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
3704 __pa(&vcpu->arch.regs));
3705 kvmhv_restore_hv_return_state(vcpu, &hvregs);
3706 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
3707 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
3708 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
3709 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
3710 mtspr(SPRN_PSSCR_PR, host_psscr);
3711
3712 /* H_CEDE has to be handled now, not later */
3713 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3714 kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
3715 kvmppc_nested_cede(vcpu);
3716 kvmppc_set_gpr(vcpu, 3, 0);
3717 trap = 0;
3718 }
3719 } else {
3720 trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
3721 }
3722
3723 vcpu->arch.slb_max = 0;
3724 dec = mfspr(SPRN_DEC);
3725 if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
3726 dec = (s32) dec;
3727 tb = mftb();
3728 vcpu->arch.dec_expires = dec + tb;
3729 vcpu->cpu = -1;
3730 vcpu->arch.thread_cpu = -1;
3731 /* Save guest CTRL register, set runlatch to 1 */
3732 vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
3733 if (!(vcpu->arch.ctrl & 1))
3734 mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
3735
3736 vcpu->arch.iamr = mfspr(SPRN_IAMR);
3737 vcpu->arch.pspb = mfspr(SPRN_PSPB);
3738 vcpu->arch.fscr = mfspr(SPRN_FSCR);
3739 vcpu->arch.tar = mfspr(SPRN_TAR);
3740 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
3741 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
3742 vcpu->arch.bescr = mfspr(SPRN_BESCR);
3743 vcpu->arch.wort = mfspr(SPRN_WORT);
3744 vcpu->arch.tid = mfspr(SPRN_TIDR);
3745 vcpu->arch.amr = mfspr(SPRN_AMR);
3746 vcpu->arch.uamor = mfspr(SPRN_UAMOR);
3747 vcpu->arch.dscr = mfspr(SPRN_DSCR);
3748
3749 mtspr(SPRN_PSPB, 0);
3750 mtspr(SPRN_WORT, 0);
3751 mtspr(SPRN_UAMOR, 0);
3752 mtspr(SPRN_DSCR, host_dscr);
3753 mtspr(SPRN_TIDR, host_tidr);
3754 mtspr(SPRN_IAMR, host_iamr);
3755 mtspr(SPRN_PSPB, 0);
3756
3757 if (host_amr != vcpu->arch.amr)
3758 mtspr(SPRN_AMR, host_amr);
3759
3760 if (host_fscr != vcpu->arch.fscr)
3761 mtspr(SPRN_FSCR, host_fscr);
3762
3763 msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3764 store_fp_state(&vcpu->arch.fp);
3765 #ifdef CONFIG_ALTIVEC
3766 store_vr_state(&vcpu->arch.vr);
3767 #endif
3768 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
3769
3770 if (cpu_has_feature(CPU_FTR_TM) ||
3771 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3772 kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3773
3774 save_pmu = 1;
3775 if (vcpu->arch.vpa.pinned_addr) {
3776 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3777 u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3778 lp->yield_count = cpu_to_be32(yield_count);
3779 vcpu->arch.vpa.dirty = 1;
3780 save_pmu = lp->pmcregs_in_use;
3781 }
3782 /* Must save pmu if this guest is capable of running nested guests */
3783 save_pmu |= nesting_enabled(vcpu->kvm);
3784
3785 kvmhv_save_guest_pmu(vcpu, save_pmu);
3786 #ifdef CONFIG_PPC_PSERIES
3787 if (kvmhv_on_pseries()) {
3788 barrier();
3789 get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
3790 barrier();
3791 }
3792 #endif
3793
3794 vc->entry_exit_map = 0x101;
3795 vc->in_guest = 0;
3796
3797 mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
3798 mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
3799
3800 kvmhv_load_host_pmu();
3801
3802 kvmppc_subcore_exit_guest();
3803
3804 return trap;
3805 }
3806
3807 /*
3808 * Wait for some other vcpu thread to execute us, and
3809 * wake us up when we need to handle something in the host.
3810 */
kvmppc_wait_for_exec(struct kvmppc_vcore * vc,struct kvm_vcpu * vcpu,int wait_state)3811 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
3812 struct kvm_vcpu *vcpu, int wait_state)
3813 {
3814 DEFINE_WAIT(wait);
3815
3816 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
3817 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
3818 spin_unlock(&vc->lock);
3819 schedule();
3820 spin_lock(&vc->lock);
3821 }
3822 finish_wait(&vcpu->arch.cpu_run, &wait);
3823 }
3824
grow_halt_poll_ns(struct kvmppc_vcore * vc)3825 static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
3826 {
3827 if (!halt_poll_ns_grow)
3828 return;
3829
3830 vc->halt_poll_ns *= halt_poll_ns_grow;
3831 if (vc->halt_poll_ns < halt_poll_ns_grow_start)
3832 vc->halt_poll_ns = halt_poll_ns_grow_start;
3833 }
3834
shrink_halt_poll_ns(struct kvmppc_vcore * vc)3835 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
3836 {
3837 if (halt_poll_ns_shrink == 0)
3838 vc->halt_poll_ns = 0;
3839 else
3840 vc->halt_poll_ns /= halt_poll_ns_shrink;
3841 }
3842
3843 #ifdef CONFIG_KVM_XICS
xive_interrupt_pending(struct kvm_vcpu * vcpu)3844 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
3845 {
3846 if (!xics_on_xive())
3847 return false;
3848 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
3849 vcpu->arch.xive_saved_state.cppr;
3850 }
3851 #else
xive_interrupt_pending(struct kvm_vcpu * vcpu)3852 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
3853 {
3854 return false;
3855 }
3856 #endif /* CONFIG_KVM_XICS */
3857
kvmppc_vcpu_woken(struct kvm_vcpu * vcpu)3858 static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
3859 {
3860 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
3861 kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu))
3862 return true;
3863
3864 return false;
3865 }
3866
3867 /*
3868 * Check to see if any of the runnable vcpus on the vcore have pending
3869 * exceptions or are no longer ceded
3870 */
kvmppc_vcore_check_block(struct kvmppc_vcore * vc)3871 static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
3872 {
3873 struct kvm_vcpu *vcpu;
3874 int i;
3875
3876 for_each_runnable_thread(i, vcpu, vc) {
3877 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
3878 return 1;
3879 }
3880
3881 return 0;
3882 }
3883
3884 /*
3885 * All the vcpus in this vcore are idle, so wait for a decrementer
3886 * or external interrupt to one of the vcpus. vc->lock is held.
3887 */
kvmppc_vcore_blocked(struct kvmppc_vcore * vc)3888 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
3889 {
3890 ktime_t cur, start_poll, start_wait;
3891 int do_sleep = 1;
3892 u64 block_ns;
3893
3894 /* Poll for pending exceptions and ceded state */
3895 cur = start_poll = ktime_get();
3896 if (vc->halt_poll_ns) {
3897 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
3898 ++vc->runner->stat.halt_attempted_poll;
3899
3900 vc->vcore_state = VCORE_POLLING;
3901 spin_unlock(&vc->lock);
3902
3903 do {
3904 if (kvmppc_vcore_check_block(vc)) {
3905 do_sleep = 0;
3906 break;
3907 }
3908 cur = ktime_get();
3909 } while (single_task_running() && ktime_before(cur, stop));
3910
3911 spin_lock(&vc->lock);
3912 vc->vcore_state = VCORE_INACTIVE;
3913
3914 if (!do_sleep) {
3915 ++vc->runner->stat.halt_successful_poll;
3916 goto out;
3917 }
3918 }
3919
3920 prepare_to_rcuwait(&vc->wait);
3921 set_current_state(TASK_INTERRUPTIBLE);
3922 if (kvmppc_vcore_check_block(vc)) {
3923 finish_rcuwait(&vc->wait);
3924 do_sleep = 0;
3925 /* If we polled, count this as a successful poll */
3926 if (vc->halt_poll_ns)
3927 ++vc->runner->stat.halt_successful_poll;
3928 goto out;
3929 }
3930
3931 start_wait = ktime_get();
3932
3933 vc->vcore_state = VCORE_SLEEPING;
3934 trace_kvmppc_vcore_blocked(vc, 0);
3935 spin_unlock(&vc->lock);
3936 schedule();
3937 finish_rcuwait(&vc->wait);
3938 spin_lock(&vc->lock);
3939 vc->vcore_state = VCORE_INACTIVE;
3940 trace_kvmppc_vcore_blocked(vc, 1);
3941 ++vc->runner->stat.halt_successful_wait;
3942
3943 cur = ktime_get();
3944
3945 out:
3946 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
3947
3948 /* Attribute wait time */
3949 if (do_sleep) {
3950 vc->runner->stat.halt_wait_ns +=
3951 ktime_to_ns(cur) - ktime_to_ns(start_wait);
3952 /* Attribute failed poll time */
3953 if (vc->halt_poll_ns)
3954 vc->runner->stat.halt_poll_fail_ns +=
3955 ktime_to_ns(start_wait) -
3956 ktime_to_ns(start_poll);
3957 } else {
3958 /* Attribute successful poll time */
3959 if (vc->halt_poll_ns)
3960 vc->runner->stat.halt_poll_success_ns +=
3961 ktime_to_ns(cur) -
3962 ktime_to_ns(start_poll);
3963 }
3964
3965 /* Adjust poll time */
3966 if (halt_poll_ns) {
3967 if (block_ns <= vc->halt_poll_ns)
3968 ;
3969 /* We slept and blocked for longer than the max halt time */
3970 else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
3971 shrink_halt_poll_ns(vc);
3972 /* We slept and our poll time is too small */
3973 else if (vc->halt_poll_ns < halt_poll_ns &&
3974 block_ns < halt_poll_ns)
3975 grow_halt_poll_ns(vc);
3976 if (vc->halt_poll_ns > halt_poll_ns)
3977 vc->halt_poll_ns = halt_poll_ns;
3978 } else
3979 vc->halt_poll_ns = 0;
3980
3981 trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
3982 }
3983
3984 /*
3985 * This never fails for a radix guest, as none of the operations it does
3986 * for a radix guest can fail or have a way to report failure.
3987 * kvmhv_run_single_vcpu() relies on this fact.
3988 */
kvmhv_setup_mmu(struct kvm_vcpu * vcpu)3989 static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3990 {
3991 int r = 0;
3992 struct kvm *kvm = vcpu->kvm;
3993
3994 mutex_lock(&kvm->arch.mmu_setup_lock);
3995 if (!kvm->arch.mmu_ready) {
3996 if (!kvm_is_radix(kvm))
3997 r = kvmppc_hv_setup_htab_rma(vcpu);
3998 if (!r) {
3999 if (cpu_has_feature(CPU_FTR_ARCH_300))
4000 kvmppc_setup_partition_table(kvm);
4001 kvm->arch.mmu_ready = 1;
4002 }
4003 }
4004 mutex_unlock(&kvm->arch.mmu_setup_lock);
4005 return r;
4006 }
4007
kvmppc_run_vcpu(struct kvm_vcpu * vcpu)4008 static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
4009 {
4010 struct kvm_run *run = vcpu->run;
4011 int n_ceded, i, r;
4012 struct kvmppc_vcore *vc;
4013 struct kvm_vcpu *v;
4014
4015 trace_kvmppc_run_vcpu_enter(vcpu);
4016
4017 run->exit_reason = 0;
4018 vcpu->arch.ret = RESUME_GUEST;
4019 vcpu->arch.trap = 0;
4020 kvmppc_update_vpas(vcpu);
4021
4022 /*
4023 * Synchronize with other threads in this virtual core
4024 */
4025 vc = vcpu->arch.vcore;
4026 spin_lock(&vc->lock);
4027 vcpu->arch.ceded = 0;
4028 vcpu->arch.run_task = current;
4029 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4030 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4031 vcpu->arch.busy_preempt = TB_NIL;
4032 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
4033 ++vc->n_runnable;
4034
4035 /*
4036 * This happens the first time this is called for a vcpu.
4037 * If the vcore is already running, we may be able to start
4038 * this thread straight away and have it join in.
4039 */
4040 if (!signal_pending(current)) {
4041 if ((vc->vcore_state == VCORE_PIGGYBACK ||
4042 vc->vcore_state == VCORE_RUNNING) &&
4043 !VCORE_IS_EXITING(vc)) {
4044 kvmppc_create_dtl_entry(vcpu, vc);
4045 kvmppc_start_thread(vcpu, vc);
4046 trace_kvm_guest_enter(vcpu);
4047 } else if (vc->vcore_state == VCORE_SLEEPING) {
4048 rcuwait_wake_up(&vc->wait);
4049 }
4050
4051 }
4052
4053 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4054 !signal_pending(current)) {
4055 /* See if the MMU is ready to go */
4056 if (!vcpu->kvm->arch.mmu_ready) {
4057 spin_unlock(&vc->lock);
4058 r = kvmhv_setup_mmu(vcpu);
4059 spin_lock(&vc->lock);
4060 if (r) {
4061 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4062 run->fail_entry.
4063 hardware_entry_failure_reason = 0;
4064 vcpu->arch.ret = r;
4065 break;
4066 }
4067 }
4068
4069 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4070 kvmppc_vcore_end_preempt(vc);
4071
4072 if (vc->vcore_state != VCORE_INACTIVE) {
4073 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
4074 continue;
4075 }
4076 for_each_runnable_thread(i, v, vc) {
4077 kvmppc_core_prepare_to_enter(v);
4078 if (signal_pending(v->arch.run_task)) {
4079 kvmppc_remove_runnable(vc, v);
4080 v->stat.signal_exits++;
4081 v->run->exit_reason = KVM_EXIT_INTR;
4082 v->arch.ret = -EINTR;
4083 wake_up(&v->arch.cpu_run);
4084 }
4085 }
4086 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
4087 break;
4088 n_ceded = 0;
4089 for_each_runnable_thread(i, v, vc) {
4090 if (!kvmppc_vcpu_woken(v))
4091 n_ceded += v->arch.ceded;
4092 else
4093 v->arch.ceded = 0;
4094 }
4095 vc->runner = vcpu;
4096 if (n_ceded == vc->n_runnable) {
4097 kvmppc_vcore_blocked(vc);
4098 } else if (need_resched()) {
4099 kvmppc_vcore_preempt(vc);
4100 /* Let something else run */
4101 cond_resched_lock(&vc->lock);
4102 if (vc->vcore_state == VCORE_PREEMPT)
4103 kvmppc_vcore_end_preempt(vc);
4104 } else {
4105 kvmppc_run_core(vc);
4106 }
4107 vc->runner = NULL;
4108 }
4109
4110 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4111 (vc->vcore_state == VCORE_RUNNING ||
4112 vc->vcore_state == VCORE_EXITING ||
4113 vc->vcore_state == VCORE_PIGGYBACK))
4114 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
4115
4116 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4117 kvmppc_vcore_end_preempt(vc);
4118
4119 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4120 kvmppc_remove_runnable(vc, vcpu);
4121 vcpu->stat.signal_exits++;
4122 run->exit_reason = KVM_EXIT_INTR;
4123 vcpu->arch.ret = -EINTR;
4124 }
4125
4126 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
4127 /* Wake up some vcpu to run the core */
4128 i = -1;
4129 v = next_runnable_thread(vc, &i);
4130 wake_up(&v->arch.cpu_run);
4131 }
4132
4133 trace_kvmppc_run_vcpu_exit(vcpu);
4134 spin_unlock(&vc->lock);
4135 return vcpu->arch.ret;
4136 }
4137
kvmhv_run_single_vcpu(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)4138 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
4139 unsigned long lpcr)
4140 {
4141 struct kvm_run *run = vcpu->run;
4142 int trap, r, pcpu;
4143 int srcu_idx, lpid;
4144 struct kvmppc_vcore *vc;
4145 struct kvm *kvm = vcpu->kvm;
4146 struct kvm_nested_guest *nested = vcpu->arch.nested;
4147
4148 trace_kvmppc_run_vcpu_enter(vcpu);
4149
4150 run->exit_reason = 0;
4151 vcpu->arch.ret = RESUME_GUEST;
4152 vcpu->arch.trap = 0;
4153
4154 vc = vcpu->arch.vcore;
4155 vcpu->arch.ceded = 0;
4156 vcpu->arch.run_task = current;
4157 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4158 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4159 vcpu->arch.busy_preempt = TB_NIL;
4160 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
4161 vc->runnable_threads[0] = vcpu;
4162 vc->n_runnable = 1;
4163 vc->runner = vcpu;
4164
4165 /* See if the MMU is ready to go */
4166 if (!kvm->arch.mmu_ready)
4167 kvmhv_setup_mmu(vcpu);
4168
4169 if (need_resched())
4170 cond_resched();
4171
4172 kvmppc_update_vpas(vcpu);
4173
4174 init_vcore_to_run(vc);
4175 vc->preempt_tb = TB_NIL;
4176
4177 preempt_disable();
4178 pcpu = smp_processor_id();
4179 vc->pcpu = pcpu;
4180 kvmppc_prepare_radix_vcpu(vcpu, pcpu);
4181
4182 local_irq_disable();
4183 hard_irq_disable();
4184 if (signal_pending(current))
4185 goto sigpend;
4186 if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
4187 goto out;
4188
4189 if (!nested) {
4190 kvmppc_core_prepare_to_enter(vcpu);
4191 if (vcpu->arch.doorbell_request) {
4192 vc->dpdes = 1;
4193 smp_wmb();
4194 vcpu->arch.doorbell_request = 0;
4195 }
4196 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
4197 &vcpu->arch.pending_exceptions))
4198 lpcr |= LPCR_MER;
4199 } else if (vcpu->arch.pending_exceptions ||
4200 vcpu->arch.doorbell_request ||
4201 xive_interrupt_pending(vcpu)) {
4202 vcpu->arch.ret = RESUME_HOST;
4203 goto out;
4204 }
4205
4206 kvmppc_clear_host_core(pcpu);
4207
4208 local_paca->kvm_hstate.tid = 0;
4209 local_paca->kvm_hstate.napping = 0;
4210 local_paca->kvm_hstate.kvm_split_mode = NULL;
4211 kvmppc_start_thread(vcpu, vc);
4212 kvmppc_create_dtl_entry(vcpu, vc);
4213 trace_kvm_guest_enter(vcpu);
4214
4215 vc->vcore_state = VCORE_RUNNING;
4216 trace_kvmppc_run_core(vc, 0);
4217
4218 if (cpu_has_feature(CPU_FTR_HVMODE)) {
4219 lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
4220 mtspr(SPRN_LPID, lpid);
4221 isync();
4222 kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
4223 }
4224
4225 guest_enter_irqoff();
4226
4227 srcu_idx = srcu_read_lock(&kvm->srcu);
4228
4229 this_cpu_disable_ftrace();
4230
4231 /* Tell lockdep that we're about to enable interrupts */
4232 trace_hardirqs_on();
4233
4234 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
4235 vcpu->arch.trap = trap;
4236
4237 trace_hardirqs_off();
4238
4239 this_cpu_enable_ftrace();
4240
4241 srcu_read_unlock(&kvm->srcu, srcu_idx);
4242
4243 if (cpu_has_feature(CPU_FTR_HVMODE)) {
4244 mtspr(SPRN_LPID, kvm->arch.host_lpid);
4245 isync();
4246 }
4247
4248 set_irq_happened(trap);
4249
4250 kvmppc_set_host_core(pcpu);
4251
4252 context_tracking_guest_exit();
4253 if (!vtime_accounting_enabled_this_cpu()) {
4254 local_irq_enable();
4255 /*
4256 * Service IRQs here before vtime_account_guest_exit() so any
4257 * ticks that occurred while running the guest are accounted to
4258 * the guest. If vtime accounting is enabled, accounting uses
4259 * TB rather than ticks, so it can be done without enabling
4260 * interrupts here, which has the problem that it accounts
4261 * interrupt processing overhead to the host.
4262 */
4263 local_irq_disable();
4264 }
4265 vtime_account_guest_exit();
4266
4267 local_irq_enable();
4268
4269 cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
4270
4271 preempt_enable();
4272
4273 /*
4274 * cancel pending decrementer exception if DEC is now positive, or if
4275 * entering a nested guest in which case the decrementer is now owned
4276 * by L2 and the L1 decrementer is provided in hdec_expires
4277 */
4278 if (kvmppc_core_pending_dec(vcpu) &&
4279 ((get_tb() < vcpu->arch.dec_expires) ||
4280 (trap == BOOK3S_INTERRUPT_SYSCALL &&
4281 kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
4282 kvmppc_core_dequeue_dec(vcpu);
4283
4284 trace_kvm_guest_exit(vcpu);
4285 r = RESUME_GUEST;
4286 if (trap) {
4287 if (!nested)
4288 r = kvmppc_handle_exit_hv(vcpu, current);
4289 else
4290 r = kvmppc_handle_nested_exit(vcpu);
4291 }
4292 vcpu->arch.ret = r;
4293
4294 if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
4295 !kvmppc_vcpu_woken(vcpu)) {
4296 kvmppc_set_timer(vcpu);
4297 while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
4298 if (signal_pending(current)) {
4299 vcpu->stat.signal_exits++;
4300 run->exit_reason = KVM_EXIT_INTR;
4301 vcpu->arch.ret = -EINTR;
4302 break;
4303 }
4304 spin_lock(&vc->lock);
4305 kvmppc_vcore_blocked(vc);
4306 spin_unlock(&vc->lock);
4307 }
4308 }
4309 vcpu->arch.ceded = 0;
4310
4311 vc->vcore_state = VCORE_INACTIVE;
4312 trace_kvmppc_run_core(vc, 1);
4313
4314 done:
4315 kvmppc_remove_runnable(vc, vcpu);
4316 trace_kvmppc_run_vcpu_exit(vcpu);
4317
4318 return vcpu->arch.ret;
4319
4320 sigpend:
4321 vcpu->stat.signal_exits++;
4322 run->exit_reason = KVM_EXIT_INTR;
4323 vcpu->arch.ret = -EINTR;
4324 out:
4325 local_irq_enable();
4326 preempt_enable();
4327 goto done;
4328 }
4329
kvmppc_vcpu_run_hv(struct kvm_vcpu * vcpu)4330 static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
4331 {
4332 struct kvm_run *run = vcpu->run;
4333 int r;
4334 int srcu_idx;
4335 unsigned long ebb_regs[3] = {}; /* shut up GCC */
4336 unsigned long user_tar = 0;
4337 unsigned int user_vrsave;
4338 struct kvm *kvm;
4339
4340 if (!vcpu->arch.sane) {
4341 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4342 return -EINVAL;
4343 }
4344
4345 /*
4346 * Don't allow entry with a suspended transaction, because
4347 * the guest entry/exit code will lose it.
4348 * If the guest has TM enabled, save away their TM-related SPRs
4349 * (they will get restored by the TM unavailable interrupt).
4350 */
4351 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4352 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
4353 (current->thread.regs->msr & MSR_TM)) {
4354 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
4355 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4356 run->fail_entry.hardware_entry_failure_reason = 0;
4357 return -EINVAL;
4358 }
4359 /* Enable TM so we can read the TM SPRs */
4360 mtmsr(mfmsr() | MSR_TM);
4361 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
4362 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
4363 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
4364 current->thread.regs->msr &= ~MSR_TM;
4365 }
4366 #endif
4367
4368 /*
4369 * Force online to 1 for the sake of old userspace which doesn't
4370 * set it.
4371 */
4372 if (!vcpu->arch.online) {
4373 atomic_inc(&vcpu->arch.vcore->online_count);
4374 vcpu->arch.online = 1;
4375 }
4376
4377 kvmppc_core_prepare_to_enter(vcpu);
4378
4379 /* No need to go into the guest when all we'll do is come back out */
4380 if (signal_pending(current)) {
4381 run->exit_reason = KVM_EXIT_INTR;
4382 return -EINTR;
4383 }
4384
4385 kvm = vcpu->kvm;
4386 atomic_inc(&kvm->arch.vcpus_running);
4387 /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
4388 smp_mb();
4389
4390 flush_all_to_thread(current);
4391
4392 /* Save userspace EBB and other register values */
4393 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
4394 ebb_regs[0] = mfspr(SPRN_EBBHR);
4395 ebb_regs[1] = mfspr(SPRN_EBBRR);
4396 ebb_regs[2] = mfspr(SPRN_BESCR);
4397 user_tar = mfspr(SPRN_TAR);
4398 }
4399 user_vrsave = mfspr(SPRN_VRSAVE);
4400
4401 vcpu->arch.waitp = &vcpu->arch.vcore->wait;
4402 vcpu->arch.pgdir = kvm->mm->pgd;
4403 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
4404
4405 do {
4406 /*
4407 * The early POWER9 chips that can't mix radix and HPT threads
4408 * on the same core also need the workaround for the problem
4409 * where the TLB would prefetch entries in the guest exit path
4410 * for radix guests using the guest PIDR value and LPID 0.
4411 * The workaround is in the old path (kvmppc_run_vcpu())
4412 * but not the new path (kvmhv_run_single_vcpu()).
4413 */
4414 if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
4415 !no_mixing_hpt_and_radix)
4416 r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
4417 vcpu->arch.vcore->lpcr);
4418 else
4419 r = kvmppc_run_vcpu(vcpu);
4420
4421 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
4422 !(vcpu->arch.shregs.msr & MSR_PR)) {
4423 trace_kvm_hcall_enter(vcpu);
4424 r = kvmppc_pseries_do_hcall(vcpu);
4425 trace_kvm_hcall_exit(vcpu, r);
4426 kvmppc_core_prepare_to_enter(vcpu);
4427 } else if (r == RESUME_PAGE_FAULT) {
4428 srcu_idx = srcu_read_lock(&kvm->srcu);
4429 r = kvmppc_book3s_hv_page_fault(vcpu,
4430 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
4431 srcu_read_unlock(&kvm->srcu, srcu_idx);
4432 } else if (r == RESUME_PASSTHROUGH) {
4433 if (WARN_ON(xics_on_xive()))
4434 r = H_SUCCESS;
4435 else
4436 r = kvmppc_xics_rm_complete(vcpu, 0);
4437 }
4438 } while (is_kvmppc_resume_guest(r));
4439
4440 /* Restore userspace EBB and other register values */
4441 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
4442 mtspr(SPRN_EBBHR, ebb_regs[0]);
4443 mtspr(SPRN_EBBRR, ebb_regs[1]);
4444 mtspr(SPRN_BESCR, ebb_regs[2]);
4445 mtspr(SPRN_TAR, user_tar);
4446 mtspr(SPRN_FSCR, current->thread.fscr);
4447 }
4448 mtspr(SPRN_VRSAVE, user_vrsave);
4449
4450 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
4451 atomic_dec(&kvm->arch.vcpus_running);
4452 return r;
4453 }
4454
kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size ** sps,int shift,int sllp)4455 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
4456 int shift, int sllp)
4457 {
4458 (*sps)->page_shift = shift;
4459 (*sps)->slb_enc = sllp;
4460 (*sps)->enc[0].page_shift = shift;
4461 (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift);
4462 /*
4463 * Add 16MB MPSS support (may get filtered out by userspace)
4464 */
4465 if (shift != 24) {
4466 int penc = kvmppc_pgsize_lp_encoding(shift, 24);
4467 if (penc != -1) {
4468 (*sps)->enc[1].page_shift = 24;
4469 (*sps)->enc[1].pte_enc = penc;
4470 }
4471 }
4472 (*sps)++;
4473 }
4474
kvm_vm_ioctl_get_smmu_info_hv(struct kvm * kvm,struct kvm_ppc_smmu_info * info)4475 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
4476 struct kvm_ppc_smmu_info *info)
4477 {
4478 struct kvm_ppc_one_seg_page_size *sps;
4479
4480 /*
4481 * POWER7, POWER8 and POWER9 all support 32 storage keys for data.
4482 * POWER7 doesn't support keys for instruction accesses,
4483 * POWER8 and POWER9 do.
4484 */
4485 info->data_keys = 32;
4486 info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0;
4487
4488 /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */
4489 info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS;
4490 info->slb_size = 32;
4491
4492 /* We only support these sizes for now, and no muti-size segments */
4493 sps = &info->sps[0];
4494 kvmppc_add_seg_page_size(&sps, 12, 0);
4495 kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01);
4496 kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L);
4497
4498 /* If running as a nested hypervisor, we don't support HPT guests */
4499 if (kvmhv_on_pseries())
4500 info->flags |= KVM_PPC_NO_HASH;
4501
4502 return 0;
4503 }
4504
4505 /*
4506 * Get (and clear) the dirty memory log for a memory slot.
4507 */
kvm_vm_ioctl_get_dirty_log_hv(struct kvm * kvm,struct kvm_dirty_log * log)4508 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
4509 struct kvm_dirty_log *log)
4510 {
4511 struct kvm_memslots *slots;
4512 struct kvm_memory_slot *memslot;
4513 int i, r;
4514 unsigned long n;
4515 unsigned long *buf, *p;
4516 struct kvm_vcpu *vcpu;
4517
4518 mutex_lock(&kvm->slots_lock);
4519
4520 r = -EINVAL;
4521 if (log->slot >= KVM_USER_MEM_SLOTS)
4522 goto out;
4523
4524 slots = kvm_memslots(kvm);
4525 memslot = id_to_memslot(slots, log->slot);
4526 r = -ENOENT;
4527 if (!memslot || !memslot->dirty_bitmap)
4528 goto out;
4529
4530 /*
4531 * Use second half of bitmap area because both HPT and radix
4532 * accumulate bits in the first half.
4533 */
4534 n = kvm_dirty_bitmap_bytes(memslot);
4535 buf = memslot->dirty_bitmap + n / sizeof(long);
4536 memset(buf, 0, n);
4537
4538 if (kvm_is_radix(kvm))
4539 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
4540 else
4541 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
4542 if (r)
4543 goto out;
4544
4545 /*
4546 * We accumulate dirty bits in the first half of the
4547 * memslot's dirty_bitmap area, for when pages are paged
4548 * out or modified by the host directly. Pick up these
4549 * bits and add them to the map.
4550 */
4551 p = memslot->dirty_bitmap;
4552 for (i = 0; i < n / sizeof(long); ++i)
4553 buf[i] |= xchg(&p[i], 0);
4554
4555 /* Harvest dirty bits from VPA and DTL updates */
4556 /* Note: we never modify the SLB shadow buffer areas */
4557 kvm_for_each_vcpu(i, vcpu, kvm) {
4558 spin_lock(&vcpu->arch.vpa_update_lock);
4559 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
4560 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
4561 spin_unlock(&vcpu->arch.vpa_update_lock);
4562 }
4563
4564 r = -EFAULT;
4565 if (copy_to_user(log->dirty_bitmap, buf, n))
4566 goto out;
4567
4568 r = 0;
4569 out:
4570 mutex_unlock(&kvm->slots_lock);
4571 return r;
4572 }
4573
kvmppc_core_free_memslot_hv(struct kvm_memory_slot * slot)4574 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
4575 {
4576 vfree(slot->arch.rmap);
4577 slot->arch.rmap = NULL;
4578 }
4579
kvmppc_core_prepare_memory_region_hv(struct kvm * kvm,struct kvm_memory_slot * slot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)4580 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
4581 struct kvm_memory_slot *slot,
4582 const struct kvm_userspace_memory_region *mem,
4583 enum kvm_mr_change change)
4584 {
4585 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
4586
4587 if (change == KVM_MR_CREATE) {
4588 unsigned long size = array_size(npages, sizeof(*slot->arch.rmap));
4589
4590 if ((size >> PAGE_SHIFT) > totalram_pages())
4591 return -ENOMEM;
4592
4593 slot->arch.rmap = vzalloc(size);
4594 if (!slot->arch.rmap)
4595 return -ENOMEM;
4596 }
4597
4598 return 0;
4599 }
4600
kvmppc_core_commit_memory_region_hv(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)4601 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
4602 const struct kvm_userspace_memory_region *mem,
4603 const struct kvm_memory_slot *old,
4604 const struct kvm_memory_slot *new,
4605 enum kvm_mr_change change)
4606 {
4607 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
4608
4609 /*
4610 * If we are making a new memslot, it might make
4611 * some address that was previously cached as emulated
4612 * MMIO be no longer emulated MMIO, so invalidate
4613 * all the caches of emulated MMIO translations.
4614 */
4615 if (npages)
4616 atomic64_inc(&kvm->arch.mmio_update);
4617
4618 /*
4619 * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels
4620 * have already called kvm_arch_flush_shadow_memslot() to
4621 * flush shadow mappings. For KVM_MR_CREATE we have no
4622 * previous mappings. So the only case to handle is
4623 * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit
4624 * has been changed.
4625 * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES
4626 * to get rid of any THP PTEs in the partition-scoped page tables
4627 * so we can track dirtiness at the page level; we flush when
4628 * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to
4629 * using THP PTEs.
4630 */
4631 if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) &&
4632 ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES))
4633 kvmppc_radix_flush_memslot(kvm, old);
4634 /*
4635 * If UV hasn't yet called H_SVM_INIT_START, don't register memslots.
4636 */
4637 if (!kvm->arch.secure_guest)
4638 return;
4639
4640 switch (change) {
4641 case KVM_MR_CREATE:
4642 /*
4643 * @TODO kvmppc_uvmem_memslot_create() can fail and
4644 * return error. Fix this.
4645 */
4646 kvmppc_uvmem_memslot_create(kvm, new);
4647 break;
4648 case KVM_MR_DELETE:
4649 kvmppc_uvmem_memslot_delete(kvm, old);
4650 break;
4651 default:
4652 /* TODO: Handle KVM_MR_MOVE */
4653 break;
4654 }
4655 }
4656
4657 /*
4658 * Update LPCR values in kvm->arch and in vcores.
4659 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4660 * of kvm->arch.lpcr update).
4661 */
kvmppc_update_lpcr(struct kvm * kvm,unsigned long lpcr,unsigned long mask)4662 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
4663 {
4664 long int i;
4665 u32 cores_done = 0;
4666
4667 if ((kvm->arch.lpcr & mask) == lpcr)
4668 return;
4669
4670 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
4671
4672 for (i = 0; i < KVM_MAX_VCORES; ++i) {
4673 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
4674 if (!vc)
4675 continue;
4676 spin_lock(&vc->lock);
4677 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
4678 spin_unlock(&vc->lock);
4679 if (++cores_done >= kvm->arch.online_vcores)
4680 break;
4681 }
4682 }
4683
kvmppc_setup_partition_table(struct kvm * kvm)4684 void kvmppc_setup_partition_table(struct kvm *kvm)
4685 {
4686 unsigned long dw0, dw1;
4687
4688 if (!kvm_is_radix(kvm)) {
4689 /* PS field - page size for VRMA */
4690 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
4691 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
4692 /* HTABSIZE and HTABORG fields */
4693 dw0 |= kvm->arch.sdr1;
4694
4695 /* Second dword as set by userspace */
4696 dw1 = kvm->arch.process_table;
4697 } else {
4698 dw0 = PATB_HR | radix__get_tree_size() |
4699 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
4700 dw1 = PATB_GR | kvm->arch.process_table;
4701 }
4702 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
4703 }
4704
4705 /*
4706 * Set up HPT (hashed page table) and RMA (real-mode area).
4707 * Must be called with kvm->arch.mmu_setup_lock held.
4708 */
kvmppc_hv_setup_htab_rma(struct kvm_vcpu * vcpu)4709 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
4710 {
4711 int err = 0;
4712 struct kvm *kvm = vcpu->kvm;
4713 unsigned long hva;
4714 struct kvm_memory_slot *memslot;
4715 struct vm_area_struct *vma;
4716 unsigned long lpcr = 0, senc;
4717 unsigned long psize, porder;
4718 int srcu_idx;
4719
4720 /* Allocate hashed page table (if not done already) and reset it */
4721 if (!kvm->arch.hpt.virt) {
4722 int order = KVM_DEFAULT_HPT_ORDER;
4723 struct kvm_hpt_info info;
4724
4725 err = kvmppc_allocate_hpt(&info, order);
4726 /* If we get here, it means userspace didn't specify a
4727 * size explicitly. So, try successively smaller
4728 * sizes if the default failed. */
4729 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER)
4730 err = kvmppc_allocate_hpt(&info, order);
4731
4732 if (err < 0) {
4733 pr_err("KVM: Couldn't alloc HPT\n");
4734 goto out;
4735 }
4736
4737 kvmppc_set_hpt(kvm, &info);
4738 }
4739
4740 /* Look up the memslot for guest physical address 0 */
4741 srcu_idx = srcu_read_lock(&kvm->srcu);
4742 memslot = gfn_to_memslot(kvm, 0);
4743
4744 /* We must have some memory at 0 by now */
4745 err = -EINVAL;
4746 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
4747 goto out_srcu;
4748
4749 /* Look up the VMA for the start of this memory slot */
4750 hva = memslot->userspace_addr;
4751 mmap_read_lock(kvm->mm);
4752 vma = find_vma(kvm->mm, hva);
4753 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
4754 goto up_out;
4755
4756 psize = vma_kernel_pagesize(vma);
4757
4758 mmap_read_unlock(kvm->mm);
4759
4760 /* We can handle 4k, 64k or 16M pages in the VRMA */
4761 if (psize >= 0x1000000)
4762 psize = 0x1000000;
4763 else if (psize >= 0x10000)
4764 psize = 0x10000;
4765 else
4766 psize = 0x1000;
4767 porder = __ilog2(psize);
4768
4769 senc = slb_pgsize_encoding(psize);
4770 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
4771 (VRMA_VSID << SLB_VSID_SHIFT_1T);
4772 /* Create HPTEs in the hash page table for the VRMA */
4773 kvmppc_map_vrma(vcpu, memslot, porder);
4774
4775 /* Update VRMASD field in the LPCR */
4776 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
4777 /* the -4 is to account for senc values starting at 0x10 */
4778 lpcr = senc << (LPCR_VRMASD_SH - 4);
4779 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
4780 }
4781
4782 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
4783 smp_wmb();
4784 err = 0;
4785 out_srcu:
4786 srcu_read_unlock(&kvm->srcu, srcu_idx);
4787 out:
4788 return err;
4789
4790 up_out:
4791 mmap_read_unlock(kvm->mm);
4792 goto out_srcu;
4793 }
4794
4795 /*
4796 * Must be called with kvm->arch.mmu_setup_lock held and
4797 * mmu_ready = 0 and no vcpus running.
4798 */
kvmppc_switch_mmu_to_hpt(struct kvm * kvm)4799 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
4800 {
4801 if (nesting_enabled(kvm))
4802 kvmhv_release_all_nested(kvm);
4803 kvmppc_rmap_reset(kvm);
4804 kvm->arch.process_table = 0;
4805 /* Mutual exclusion with kvm_unmap_hva_range etc. */
4806 spin_lock(&kvm->mmu_lock);
4807 kvm->arch.radix = 0;
4808 spin_unlock(&kvm->mmu_lock);
4809 kvmppc_free_radix(kvm);
4810 kvmppc_update_lpcr(kvm, LPCR_VPM1,
4811 LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
4812 return 0;
4813 }
4814
4815 /*
4816 * Must be called with kvm->arch.mmu_setup_lock held and
4817 * mmu_ready = 0 and no vcpus running.
4818 */
kvmppc_switch_mmu_to_radix(struct kvm * kvm)4819 int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
4820 {
4821 int err;
4822
4823 err = kvmppc_init_vm_radix(kvm);
4824 if (err)
4825 return err;
4826 kvmppc_rmap_reset(kvm);
4827 /* Mutual exclusion with kvm_unmap_hva_range etc. */
4828 spin_lock(&kvm->mmu_lock);
4829 kvm->arch.radix = 1;
4830 spin_unlock(&kvm->mmu_lock);
4831 kvmppc_free_hpt(&kvm->arch.hpt);
4832 kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
4833 LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
4834 return 0;
4835 }
4836
4837 #ifdef CONFIG_KVM_XICS
4838 /*
4839 * Allocate a per-core structure for managing state about which cores are
4840 * running in the host versus the guest and for exchanging data between
4841 * real mode KVM and CPU running in the host.
4842 * This is only done for the first VM.
4843 * The allocated structure stays even if all VMs have stopped.
4844 * It is only freed when the kvm-hv module is unloaded.
4845 * It's OK for this routine to fail, we just don't support host
4846 * core operations like redirecting H_IPI wakeups.
4847 */
kvmppc_alloc_host_rm_ops(void)4848 void kvmppc_alloc_host_rm_ops(void)
4849 {
4850 struct kvmppc_host_rm_ops *ops;
4851 unsigned long l_ops;
4852 int cpu, core;
4853 int size;
4854
4855 /* Not the first time here ? */
4856 if (kvmppc_host_rm_ops_hv != NULL)
4857 return;
4858
4859 ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
4860 if (!ops)
4861 return;
4862
4863 size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
4864 ops->rm_core = kzalloc(size, GFP_KERNEL);
4865
4866 if (!ops->rm_core) {
4867 kfree(ops);
4868 return;
4869 }
4870
4871 cpus_read_lock();
4872
4873 for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
4874 if (!cpu_online(cpu))
4875 continue;
4876
4877 core = cpu >> threads_shift;
4878 ops->rm_core[core].rm_state.in_host = 1;
4879 }
4880
4881 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
4882
4883 /*
4884 * Make the contents of the kvmppc_host_rm_ops structure visible
4885 * to other CPUs before we assign it to the global variable.
4886 * Do an atomic assignment (no locks used here), but if someone
4887 * beats us to it, just free our copy and return.
4888 */
4889 smp_wmb();
4890 l_ops = (unsigned long) ops;
4891
4892 if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
4893 cpus_read_unlock();
4894 kfree(ops->rm_core);
4895 kfree(ops);
4896 return;
4897 }
4898
4899 cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE,
4900 "ppc/kvm_book3s:prepare",
4901 kvmppc_set_host_core,
4902 kvmppc_clear_host_core);
4903 cpus_read_unlock();
4904 }
4905
kvmppc_free_host_rm_ops(void)4906 void kvmppc_free_host_rm_ops(void)
4907 {
4908 if (kvmppc_host_rm_ops_hv) {
4909 cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
4910 kfree(kvmppc_host_rm_ops_hv->rm_core);
4911 kfree(kvmppc_host_rm_ops_hv);
4912 kvmppc_host_rm_ops_hv = NULL;
4913 }
4914 }
4915 #endif
4916
kvmppc_core_init_vm_hv(struct kvm * kvm)4917 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
4918 {
4919 unsigned long lpcr, lpid;
4920 char buf[32];
4921 int ret;
4922
4923 mutex_init(&kvm->arch.uvmem_lock);
4924 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns);
4925 mutex_init(&kvm->arch.mmu_setup_lock);
4926
4927 /* Allocate the guest's logical partition ID */
4928
4929 lpid = kvmppc_alloc_lpid();
4930 if ((long)lpid < 0)
4931 return -ENOMEM;
4932 kvm->arch.lpid = lpid;
4933
4934 kvmppc_alloc_host_rm_ops();
4935
4936 kvmhv_vm_nested_init(kvm);
4937
4938 /*
4939 * Since we don't flush the TLB when tearing down a VM,
4940 * and this lpid might have previously been used,
4941 * make sure we flush on each core before running the new VM.
4942 * On POWER9, the tlbie in mmu_partition_table_set_entry()
4943 * does this flush for us.
4944 */
4945 if (!cpu_has_feature(CPU_FTR_ARCH_300))
4946 cpumask_setall(&kvm->arch.need_tlb_flush);
4947
4948 /* Start out with the default set of hcalls enabled */
4949 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
4950 sizeof(kvm->arch.enabled_hcalls));
4951
4952 if (!cpu_has_feature(CPU_FTR_ARCH_300))
4953 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
4954
4955 /* Init LPCR for virtual RMA mode */
4956 if (cpu_has_feature(CPU_FTR_HVMODE)) {
4957 kvm->arch.host_lpid = mfspr(SPRN_LPID);
4958 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
4959 lpcr &= LPCR_PECE | LPCR_LPES;
4960 } else {
4961 lpcr = 0;
4962 }
4963 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
4964 LPCR_VPM0 | LPCR_VPM1;
4965 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
4966 (VRMA_VSID << SLB_VSID_SHIFT_1T);
4967 /* On POWER8 turn on online bit to enable PURR/SPURR */
4968 if (cpu_has_feature(CPU_FTR_ARCH_207S))
4969 lpcr |= LPCR_ONL;
4970 /*
4971 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
4972 * Set HVICE bit to enable hypervisor virtualization interrupts.
4973 * Set HEIC to prevent OS interrupts to go to hypervisor (should
4974 * be unnecessary but better safe than sorry in case we re-enable
4975 * EE in HV mode with this LPCR still set)
4976 */
4977 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
4978 lpcr &= ~LPCR_VPM0;
4979 lpcr |= LPCR_HVICE | LPCR_HEIC;
4980
4981 /*
4982 * If xive is enabled, we route 0x500 interrupts directly
4983 * to the guest.
4984 */
4985 if (xics_on_xive())
4986 lpcr |= LPCR_LPES;
4987 }
4988
4989 /*
4990 * If the host uses radix, the guest starts out as radix.
4991 */
4992 if (radix_enabled()) {
4993 kvm->arch.radix = 1;
4994 kvm->arch.mmu_ready = 1;
4995 lpcr &= ~LPCR_VPM1;
4996 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
4997 ret = kvmppc_init_vm_radix(kvm);
4998 if (ret) {
4999 kvmppc_free_lpid(kvm->arch.lpid);
5000 return ret;
5001 }
5002 kvmppc_setup_partition_table(kvm);
5003 }
5004
5005 kvm->arch.lpcr = lpcr;
5006
5007 /* Initialization for future HPT resizes */
5008 kvm->arch.resize_hpt = NULL;
5009
5010 /*
5011 * Work out how many sets the TLB has, for the use of
5012 * the TLB invalidation loop in book3s_hv_rmhandlers.S.
5013 */
5014 if (radix_enabled())
5015 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */
5016 else if (cpu_has_feature(CPU_FTR_ARCH_300))
5017 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
5018 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
5019 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
5020 else
5021 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
5022
5023 /*
5024 * Track that we now have a HV mode VM active. This blocks secondary
5025 * CPU threads from coming online.
5026 * On POWER9, we only need to do this if the "indep_threads_mode"
5027 * module parameter has been set to N.
5028 */
5029 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5030 if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
5031 pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
5032 kvm->arch.threads_indep = true;
5033 } else {
5034 kvm->arch.threads_indep = indep_threads_mode;
5035 }
5036 }
5037 if (!kvm->arch.threads_indep)
5038 kvm_hv_vm_activated();
5039
5040 /*
5041 * Initialize smt_mode depending on processor.
5042 * POWER8 and earlier have to use "strict" threading, where
5043 * all vCPUs in a vcore have to run on the same (sub)core,
5044 * whereas on POWER9 the threads can each run a different
5045 * guest.
5046 */
5047 if (!cpu_has_feature(CPU_FTR_ARCH_300))
5048 kvm->arch.smt_mode = threads_per_subcore;
5049 else
5050 kvm->arch.smt_mode = 1;
5051 kvm->arch.emul_smt_mode = 1;
5052
5053 /*
5054 * Create a debugfs directory for the VM
5055 */
5056 snprintf(buf, sizeof(buf), "vm%d", current->pid);
5057 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
5058 kvmppc_mmu_debugfs_init(kvm);
5059 if (radix_enabled())
5060 kvmhv_radix_debugfs_init(kvm);
5061
5062 return 0;
5063 }
5064
kvmppc_free_vcores(struct kvm * kvm)5065 static void kvmppc_free_vcores(struct kvm *kvm)
5066 {
5067 long int i;
5068
5069 for (i = 0; i < KVM_MAX_VCORES; ++i)
5070 kfree(kvm->arch.vcores[i]);
5071 kvm->arch.online_vcores = 0;
5072 }
5073
kvmppc_core_destroy_vm_hv(struct kvm * kvm)5074 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
5075 {
5076 debugfs_remove_recursive(kvm->arch.debugfs_dir);
5077
5078 if (!kvm->arch.threads_indep)
5079 kvm_hv_vm_deactivated();
5080
5081 kvmppc_free_vcores(kvm);
5082
5083
5084 if (kvm_is_radix(kvm))
5085 kvmppc_free_radix(kvm);
5086 else
5087 kvmppc_free_hpt(&kvm->arch.hpt);
5088
5089 /* Perform global invalidation and return lpid to the pool */
5090 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5091 if (nesting_enabled(kvm))
5092 kvmhv_release_all_nested(kvm);
5093 kvm->arch.process_table = 0;
5094 if (kvm->arch.secure_guest)
5095 uv_svm_terminate(kvm->arch.lpid);
5096 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
5097 }
5098
5099 kvmppc_free_lpid(kvm->arch.lpid);
5100
5101 kvmppc_free_pimap(kvm);
5102 }
5103
5104 /* We don't need to emulate any privileged instructions or dcbz */
kvmppc_core_emulate_op_hv(struct kvm_vcpu * vcpu,unsigned int inst,int * advance)5105 static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu,
5106 unsigned int inst, int *advance)
5107 {
5108 return EMULATE_FAIL;
5109 }
5110
kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu * vcpu,int sprn,ulong spr_val)5111 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
5112 ulong spr_val)
5113 {
5114 return EMULATE_FAIL;
5115 }
5116
kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu * vcpu,int sprn,ulong * spr_val)5117 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
5118 ulong *spr_val)
5119 {
5120 return EMULATE_FAIL;
5121 }
5122
kvmppc_core_check_processor_compat_hv(void)5123 static int kvmppc_core_check_processor_compat_hv(void)
5124 {
5125 if (cpu_has_feature(CPU_FTR_HVMODE) &&
5126 cpu_has_feature(CPU_FTR_ARCH_206))
5127 return 0;
5128
5129 /* POWER9 in radix mode is capable of being a nested hypervisor. */
5130 if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
5131 return 0;
5132
5133 return -EIO;
5134 }
5135
5136 #ifdef CONFIG_KVM_XICS
5137
kvmppc_free_pimap(struct kvm * kvm)5138 void kvmppc_free_pimap(struct kvm *kvm)
5139 {
5140 kfree(kvm->arch.pimap);
5141 }
5142
kvmppc_alloc_pimap(void)5143 static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void)
5144 {
5145 return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL);
5146 }
5147
kvmppc_set_passthru_irq(struct kvm * kvm,int host_irq,int guest_gsi)5148 static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
5149 {
5150 struct irq_desc *desc;
5151 struct kvmppc_irq_map *irq_map;
5152 struct kvmppc_passthru_irqmap *pimap;
5153 struct irq_chip *chip;
5154 int i, rc = 0;
5155
5156 if (!kvm_irq_bypass)
5157 return 1;
5158
5159 desc = irq_to_desc(host_irq);
5160 if (!desc)
5161 return -EIO;
5162
5163 mutex_lock(&kvm->lock);
5164
5165 pimap = kvm->arch.pimap;
5166 if (pimap == NULL) {
5167 /* First call, allocate structure to hold IRQ map */
5168 pimap = kvmppc_alloc_pimap();
5169 if (pimap == NULL) {
5170 mutex_unlock(&kvm->lock);
5171 return -ENOMEM;
5172 }
5173 kvm->arch.pimap = pimap;
5174 }
5175
5176 /*
5177 * For now, we only support interrupts for which the EOI operation
5178 * is an OPAL call followed by a write to XIRR, since that's
5179 * what our real-mode EOI code does, or a XIVE interrupt
5180 */
5181 chip = irq_data_get_irq_chip(&desc->irq_data);
5182 if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
5183 pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
5184 host_irq, guest_gsi);
5185 mutex_unlock(&kvm->lock);
5186 return -ENOENT;
5187 }
5188
5189 /*
5190 * See if we already have an entry for this guest IRQ number.
5191 * If it's mapped to a hardware IRQ number, that's an error,
5192 * otherwise re-use this entry.
5193 */
5194 for (i = 0; i < pimap->n_mapped; i++) {
5195 if (guest_gsi == pimap->mapped[i].v_hwirq) {
5196 if (pimap->mapped[i].r_hwirq) {
5197 mutex_unlock(&kvm->lock);
5198 return -EINVAL;
5199 }
5200 break;
5201 }
5202 }
5203
5204 if (i == KVMPPC_PIRQ_MAPPED) {
5205 mutex_unlock(&kvm->lock);
5206 return -EAGAIN; /* table is full */
5207 }
5208
5209 irq_map = &pimap->mapped[i];
5210
5211 irq_map->v_hwirq = guest_gsi;
5212 irq_map->desc = desc;
5213
5214 /*
5215 * Order the above two stores before the next to serialize with
5216 * the KVM real mode handler.
5217 */
5218 smp_wmb();
5219 irq_map->r_hwirq = desc->irq_data.hwirq;
5220
5221 if (i == pimap->n_mapped)
5222 pimap->n_mapped++;
5223
5224 if (xics_on_xive())
5225 rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
5226 else
5227 kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
5228 if (rc)
5229 irq_map->r_hwirq = 0;
5230
5231 mutex_unlock(&kvm->lock);
5232
5233 return 0;
5234 }
5235
kvmppc_clr_passthru_irq(struct kvm * kvm,int host_irq,int guest_gsi)5236 static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
5237 {
5238 struct irq_desc *desc;
5239 struct kvmppc_passthru_irqmap *pimap;
5240 int i, rc = 0;
5241
5242 if (!kvm_irq_bypass)
5243 return 0;
5244
5245 desc = irq_to_desc(host_irq);
5246 if (!desc)
5247 return -EIO;
5248
5249 mutex_lock(&kvm->lock);
5250 if (!kvm->arch.pimap)
5251 goto unlock;
5252
5253 pimap = kvm->arch.pimap;
5254
5255 for (i = 0; i < pimap->n_mapped; i++) {
5256 if (guest_gsi == pimap->mapped[i].v_hwirq)
5257 break;
5258 }
5259
5260 if (i == pimap->n_mapped) {
5261 mutex_unlock(&kvm->lock);
5262 return -ENODEV;
5263 }
5264
5265 if (xics_on_xive())
5266 rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
5267 else
5268 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
5269
5270 /* invalidate the entry (what do do on error from the above ?) */
5271 pimap->mapped[i].r_hwirq = 0;
5272
5273 /*
5274 * We don't free this structure even when the count goes to
5275 * zero. The structure is freed when we destroy the VM.
5276 */
5277 unlock:
5278 mutex_unlock(&kvm->lock);
5279 return rc;
5280 }
5281
kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)5282 static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
5283 struct irq_bypass_producer *prod)
5284 {
5285 int ret = 0;
5286 struct kvm_kernel_irqfd *irqfd =
5287 container_of(cons, struct kvm_kernel_irqfd, consumer);
5288
5289 irqfd->producer = prod;
5290
5291 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
5292 if (ret)
5293 pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n",
5294 prod->irq, irqfd->gsi, ret);
5295
5296 return ret;
5297 }
5298
kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)5299 static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons,
5300 struct irq_bypass_producer *prod)
5301 {
5302 int ret;
5303 struct kvm_kernel_irqfd *irqfd =
5304 container_of(cons, struct kvm_kernel_irqfd, consumer);
5305
5306 irqfd->producer = NULL;
5307
5308 /*
5309 * When producer of consumer is unregistered, we change back to
5310 * default external interrupt handling mode - KVM real mode
5311 * will switch back to host.
5312 */
5313 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
5314 if (ret)
5315 pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n",
5316 prod->irq, irqfd->gsi, ret);
5317 }
5318 #endif
5319
kvm_arch_vm_ioctl_hv(struct file * filp,unsigned int ioctl,unsigned long arg)5320 static long kvm_arch_vm_ioctl_hv(struct file *filp,
5321 unsigned int ioctl, unsigned long arg)
5322 {
5323 struct kvm *kvm __maybe_unused = filp->private_data;
5324 void __user *argp = (void __user *)arg;
5325 long r;
5326
5327 switch (ioctl) {
5328
5329 case KVM_PPC_ALLOCATE_HTAB: {
5330 u32 htab_order;
5331
5332 /* If we're a nested hypervisor, we currently only support radix */
5333 if (kvmhv_on_pseries()) {
5334 r = -EOPNOTSUPP;
5335 break;
5336 }
5337
5338 r = -EFAULT;
5339 if (get_user(htab_order, (u32 __user *)argp))
5340 break;
5341 r = kvmppc_alloc_reset_hpt(kvm, htab_order);
5342 if (r)
5343 break;
5344 r = 0;
5345 break;
5346 }
5347
5348 case KVM_PPC_GET_HTAB_FD: {
5349 struct kvm_get_htab_fd ghf;
5350
5351 r = -EFAULT;
5352 if (copy_from_user(&ghf, argp, sizeof(ghf)))
5353 break;
5354 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
5355 break;
5356 }
5357
5358 case KVM_PPC_RESIZE_HPT_PREPARE: {
5359 struct kvm_ppc_resize_hpt rhpt;
5360
5361 r = -EFAULT;
5362 if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
5363 break;
5364
5365 r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt);
5366 break;
5367 }
5368
5369 case KVM_PPC_RESIZE_HPT_COMMIT: {
5370 struct kvm_ppc_resize_hpt rhpt;
5371
5372 r = -EFAULT;
5373 if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
5374 break;
5375
5376 r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt);
5377 break;
5378 }
5379
5380 default:
5381 r = -ENOTTY;
5382 }
5383
5384 return r;
5385 }
5386
5387 /*
5388 * List of hcall numbers to enable by default.
5389 * For compatibility with old userspace, we enable by default
5390 * all hcalls that were implemented before the hcall-enabling
5391 * facility was added. Note this list should not include H_RTAS.
5392 */
5393 static unsigned int default_hcall_list[] = {
5394 H_REMOVE,
5395 H_ENTER,
5396 H_READ,
5397 H_PROTECT,
5398 H_BULK_REMOVE,
5399 H_GET_TCE,
5400 H_PUT_TCE,
5401 H_SET_DABR,
5402 H_SET_XDABR,
5403 H_CEDE,
5404 H_PROD,
5405 H_CONFER,
5406 H_REGISTER_VPA,
5407 #ifdef CONFIG_KVM_XICS
5408 H_EOI,
5409 H_CPPR,
5410 H_IPI,
5411 H_IPOLL,
5412 H_XIRR,
5413 H_XIRR_X,
5414 #endif
5415 0
5416 };
5417
init_default_hcalls(void)5418 static void init_default_hcalls(void)
5419 {
5420 int i;
5421 unsigned int hcall;
5422
5423 for (i = 0; default_hcall_list[i]; ++i) {
5424 hcall = default_hcall_list[i];
5425 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
5426 __set_bit(hcall / 4, default_enabled_hcalls);
5427 }
5428 }
5429
kvmhv_configure_mmu(struct kvm * kvm,struct kvm_ppc_mmuv3_cfg * cfg)5430 static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
5431 {
5432 unsigned long lpcr;
5433 int radix;
5434 int err;
5435
5436 /* If not on a POWER9, reject it */
5437 if (!cpu_has_feature(CPU_FTR_ARCH_300))
5438 return -ENODEV;
5439
5440 /* If any unknown flags set, reject it */
5441 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE))
5442 return -EINVAL;
5443
5444 /* GR (guest radix) bit in process_table field must match */
5445 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX);
5446 if (!!(cfg->process_table & PATB_GR) != radix)
5447 return -EINVAL;
5448
5449 /* Process table size field must be reasonable, i.e. <= 24 */
5450 if ((cfg->process_table & PRTS_MASK) > 24)
5451 return -EINVAL;
5452
5453 /* We can change a guest to/from radix now, if the host is radix */
5454 if (radix && !radix_enabled())
5455 return -EINVAL;
5456
5457 /* If we're a nested hypervisor, we currently only support radix */
5458 if (kvmhv_on_pseries() && !radix)
5459 return -EINVAL;
5460
5461 mutex_lock(&kvm->arch.mmu_setup_lock);
5462 if (radix != kvm_is_radix(kvm)) {
5463 if (kvm->arch.mmu_ready) {
5464 kvm->arch.mmu_ready = 0;
5465 /* order mmu_ready vs. vcpus_running */
5466 smp_mb();
5467 if (atomic_read(&kvm->arch.vcpus_running)) {
5468 kvm->arch.mmu_ready = 1;
5469 err = -EBUSY;
5470 goto out_unlock;
5471 }
5472 }
5473 if (radix)
5474 err = kvmppc_switch_mmu_to_radix(kvm);
5475 else
5476 err = kvmppc_switch_mmu_to_hpt(kvm);
5477 if (err)
5478 goto out_unlock;
5479 }
5480
5481 kvm->arch.process_table = cfg->process_table;
5482 kvmppc_setup_partition_table(kvm);
5483
5484 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
5485 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
5486 err = 0;
5487
5488 out_unlock:
5489 mutex_unlock(&kvm->arch.mmu_setup_lock);
5490 return err;
5491 }
5492
kvmhv_enable_nested(struct kvm * kvm)5493 static int kvmhv_enable_nested(struct kvm *kvm)
5494 {
5495 if (!nested)
5496 return -EPERM;
5497 if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
5498 return -ENODEV;
5499
5500 /* kvm == NULL means the caller is testing if the capability exists */
5501 if (kvm)
5502 kvm->arch.nested_enable = true;
5503 return 0;
5504 }
5505
kvmhv_load_from_eaddr(struct kvm_vcpu * vcpu,ulong * eaddr,void * ptr,int size)5506 static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
5507 int size)
5508 {
5509 int rc = -EINVAL;
5510
5511 if (kvmhv_vcpu_is_radix(vcpu)) {
5512 rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size);
5513
5514 if (rc > 0)
5515 rc = -EINVAL;
5516 }
5517
5518 /* For now quadrants are the only way to access nested guest memory */
5519 if (rc && vcpu->arch.nested)
5520 rc = -EAGAIN;
5521
5522 return rc;
5523 }
5524
kvmhv_store_to_eaddr(struct kvm_vcpu * vcpu,ulong * eaddr,void * ptr,int size)5525 static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
5526 int size)
5527 {
5528 int rc = -EINVAL;
5529
5530 if (kvmhv_vcpu_is_radix(vcpu)) {
5531 rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size);
5532
5533 if (rc > 0)
5534 rc = -EINVAL;
5535 }
5536
5537 /* For now quadrants are the only way to access nested guest memory */
5538 if (rc && vcpu->arch.nested)
5539 rc = -EAGAIN;
5540
5541 return rc;
5542 }
5543
unpin_vpa_reset(struct kvm * kvm,struct kvmppc_vpa * vpa)5544 static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
5545 {
5546 unpin_vpa(kvm, vpa);
5547 vpa->gpa = 0;
5548 vpa->pinned_addr = NULL;
5549 vpa->dirty = false;
5550 vpa->update_pending = 0;
5551 }
5552
5553 /*
5554 * Enable a guest to become a secure VM, or test whether
5555 * that could be enabled.
5556 * Called when the KVM_CAP_PPC_SECURE_GUEST capability is
5557 * tested (kvm == NULL) or enabled (kvm != NULL).
5558 */
kvmhv_enable_svm(struct kvm * kvm)5559 static int kvmhv_enable_svm(struct kvm *kvm)
5560 {
5561 if (!kvmppc_uvmem_available())
5562 return -EINVAL;
5563 if (kvm)
5564 kvm->arch.svm_enabled = 1;
5565 return 0;
5566 }
5567
5568 /*
5569 * IOCTL handler to turn off secure mode of guest
5570 *
5571 * - Release all device pages
5572 * - Issue ucall to terminate the guest on the UV side
5573 * - Unpin the VPA pages.
5574 * - Reinit the partition scoped page tables
5575 */
kvmhv_svm_off(struct kvm * kvm)5576 static int kvmhv_svm_off(struct kvm *kvm)
5577 {
5578 struct kvm_vcpu *vcpu;
5579 int mmu_was_ready;
5580 int srcu_idx;
5581 int ret = 0;
5582 int i;
5583
5584 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
5585 return ret;
5586
5587 mutex_lock(&kvm->arch.mmu_setup_lock);
5588 mmu_was_ready = kvm->arch.mmu_ready;
5589 if (kvm->arch.mmu_ready) {
5590 kvm->arch.mmu_ready = 0;
5591 /* order mmu_ready vs. vcpus_running */
5592 smp_mb();
5593 if (atomic_read(&kvm->arch.vcpus_running)) {
5594 kvm->arch.mmu_ready = 1;
5595 ret = -EBUSY;
5596 goto out;
5597 }
5598 }
5599
5600 srcu_idx = srcu_read_lock(&kvm->srcu);
5601 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5602 struct kvm_memory_slot *memslot;
5603 struct kvm_memslots *slots = __kvm_memslots(kvm, i);
5604
5605 if (!slots)
5606 continue;
5607
5608 kvm_for_each_memslot(memslot, slots) {
5609 kvmppc_uvmem_drop_pages(memslot, kvm, true);
5610 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
5611 }
5612 }
5613 srcu_read_unlock(&kvm->srcu, srcu_idx);
5614
5615 ret = uv_svm_terminate(kvm->arch.lpid);
5616 if (ret != U_SUCCESS) {
5617 ret = -EINVAL;
5618 goto out;
5619 }
5620
5621 /*
5622 * When secure guest is reset, all the guest pages are sent
5623 * to UV via UV_PAGE_IN before the non-boot vcpus get a
5624 * chance to run and unpin their VPA pages. Unpinning of all
5625 * VPA pages is done here explicitly so that VPA pages
5626 * can be migrated to the secure side.
5627 *
5628 * This is required to for the secure SMP guest to reboot
5629 * correctly.
5630 */
5631 kvm_for_each_vcpu(i, vcpu, kvm) {
5632 spin_lock(&vcpu->arch.vpa_update_lock);
5633 unpin_vpa_reset(kvm, &vcpu->arch.dtl);
5634 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow);
5635 unpin_vpa_reset(kvm, &vcpu->arch.vpa);
5636 spin_unlock(&vcpu->arch.vpa_update_lock);
5637 }
5638
5639 kvmppc_setup_partition_table(kvm);
5640 kvm->arch.secure_guest = 0;
5641 kvm->arch.mmu_ready = mmu_was_ready;
5642 out:
5643 mutex_unlock(&kvm->arch.mmu_setup_lock);
5644 return ret;
5645 }
5646
5647 static struct kvmppc_ops kvm_ops_hv = {
5648 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
5649 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
5650 .get_one_reg = kvmppc_get_one_reg_hv,
5651 .set_one_reg = kvmppc_set_one_reg_hv,
5652 .vcpu_load = kvmppc_core_vcpu_load_hv,
5653 .vcpu_put = kvmppc_core_vcpu_put_hv,
5654 .inject_interrupt = kvmppc_inject_interrupt_hv,
5655 .set_msr = kvmppc_set_msr_hv,
5656 .vcpu_run = kvmppc_vcpu_run_hv,
5657 .vcpu_create = kvmppc_core_vcpu_create_hv,
5658 .vcpu_free = kvmppc_core_vcpu_free_hv,
5659 .check_requests = kvmppc_core_check_requests_hv,
5660 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
5661 .flush_memslot = kvmppc_core_flush_memslot_hv,
5662 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
5663 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
5664 .unmap_hva_range = kvm_unmap_hva_range_hv,
5665 .age_hva = kvm_age_hva_hv,
5666 .test_age_hva = kvm_test_age_hva_hv,
5667 .set_spte_hva = kvm_set_spte_hva_hv,
5668 .free_memslot = kvmppc_core_free_memslot_hv,
5669 .init_vm = kvmppc_core_init_vm_hv,
5670 .destroy_vm = kvmppc_core_destroy_vm_hv,
5671 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
5672 .emulate_op = kvmppc_core_emulate_op_hv,
5673 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
5674 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
5675 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
5676 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
5677 .hcall_implemented = kvmppc_hcall_impl_hv,
5678 #ifdef CONFIG_KVM_XICS
5679 .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
5680 .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
5681 #endif
5682 .configure_mmu = kvmhv_configure_mmu,
5683 .get_rmmu_info = kvmhv_get_rmmu_info,
5684 .set_smt_mode = kvmhv_set_smt_mode,
5685 .enable_nested = kvmhv_enable_nested,
5686 .load_from_eaddr = kvmhv_load_from_eaddr,
5687 .store_to_eaddr = kvmhv_store_to_eaddr,
5688 .enable_svm = kvmhv_enable_svm,
5689 .svm_off = kvmhv_svm_off,
5690 };
5691
kvm_init_subcore_bitmap(void)5692 static int kvm_init_subcore_bitmap(void)
5693 {
5694 int i, j;
5695 int nr_cores = cpu_nr_cores();
5696 struct sibling_subcore_state *sibling_subcore_state;
5697
5698 for (i = 0; i < nr_cores; i++) {
5699 int first_cpu = i * threads_per_core;
5700 int node = cpu_to_node(first_cpu);
5701
5702 /* Ignore if it is already allocated. */
5703 if (paca_ptrs[first_cpu]->sibling_subcore_state)
5704 continue;
5705
5706 sibling_subcore_state =
5707 kzalloc_node(sizeof(struct sibling_subcore_state),
5708 GFP_KERNEL, node);
5709 if (!sibling_subcore_state)
5710 return -ENOMEM;
5711
5712
5713 for (j = 0; j < threads_per_core; j++) {
5714 int cpu = first_cpu + j;
5715
5716 paca_ptrs[cpu]->sibling_subcore_state =
5717 sibling_subcore_state;
5718 }
5719 }
5720 return 0;
5721 }
5722
kvmppc_radix_possible(void)5723 static int kvmppc_radix_possible(void)
5724 {
5725 return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled();
5726 }
5727
kvmppc_book3s_init_hv(void)5728 static int kvmppc_book3s_init_hv(void)
5729 {
5730 int r;
5731
5732 if (!tlbie_capable) {
5733 pr_err("KVM-HV: Host does not support TLBIE\n");
5734 return -ENODEV;
5735 }
5736
5737 /*
5738 * FIXME!! Do we need to check on all cpus ?
5739 */
5740 r = kvmppc_core_check_processor_compat_hv();
5741 if (r < 0)
5742 return -ENODEV;
5743
5744 r = kvmhv_nested_init();
5745 if (r)
5746 return r;
5747
5748 r = kvm_init_subcore_bitmap();
5749 if (r)
5750 return r;
5751
5752 /*
5753 * We need a way of accessing the XICS interrupt controller,
5754 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or
5755 * indirectly, via OPAL.
5756 */
5757 #ifdef CONFIG_SMP
5758 if (!xics_on_xive() && !kvmhv_on_pseries() &&
5759 !local_paca->kvm_hstate.xics_phys) {
5760 struct device_node *np;
5761
5762 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
5763 if (!np) {
5764 pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
5765 return -ENODEV;
5766 }
5767 /* presence of intc confirmed - node can be dropped again */
5768 of_node_put(np);
5769 }
5770 #endif
5771
5772 kvm_ops_hv.owner = THIS_MODULE;
5773 kvmppc_hv_ops = &kvm_ops_hv;
5774
5775 init_default_hcalls();
5776
5777 init_vcore_lists();
5778
5779 r = kvmppc_mmu_hv_init();
5780 if (r)
5781 return r;
5782
5783 if (kvmppc_radix_possible()) {
5784 r = kvmppc_radix_init();
5785 if (r)
5786 return r;
5787 }
5788
5789 /*
5790 * POWER9 chips before version 2.02 can't have some threads in
5791 * HPT mode and some in radix mode on the same core.
5792 */
5793 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5794 unsigned int pvr = mfspr(SPRN_PVR);
5795 if ((pvr >> 16) == PVR_POWER9 &&
5796 (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
5797 ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
5798 no_mixing_hpt_and_radix = true;
5799 }
5800
5801 r = kvmppc_uvmem_init();
5802 if (r < 0)
5803 pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
5804
5805 return r;
5806 }
5807
kvmppc_book3s_exit_hv(void)5808 static void kvmppc_book3s_exit_hv(void)
5809 {
5810 kvmppc_uvmem_free();
5811 kvmppc_free_host_rm_ops();
5812 if (kvmppc_radix_possible())
5813 kvmppc_radix_exit();
5814 kvmppc_hv_ops = NULL;
5815 kvmhv_nested_exit();
5816 }
5817
5818 module_init(kvmppc_book3s_init_hv);
5819 module_exit(kvmppc_book3s_exit_hv);
5820 MODULE_LICENSE("GPL");
5821 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5822 MODULE_ALIAS("devname:kvm");
5823