• Home
  • Raw
  • Download

Lines Matching +full:fw +full:- +full:cfg +full:- +full:mmio

1 // SPDX-License-Identifier: GPL-2.0-only
51 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); in kvm_arch_vcpu_runnable()
95 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
96 r = -EINTR; in kvmppc_prepare_to_enter()
100 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
103 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
146 shared->sprg0 = swab64(shared->sprg0); in kvmppc_swab_shared()
147 shared->sprg1 = swab64(shared->sprg1); in kvmppc_swab_shared()
148 shared->sprg2 = swab64(shared->sprg2); in kvmppc_swab_shared()
149 shared->sprg3 = swab64(shared->sprg3); in kvmppc_swab_shared()
150 shared->srr0 = swab64(shared->srr0); in kvmppc_swab_shared()
151 shared->srr1 = swab64(shared->srr1); in kvmppc_swab_shared()
152 shared->dar = swab64(shared->dar); in kvmppc_swab_shared()
153 shared->msr = swab64(shared->msr); in kvmppc_swab_shared()
154 shared->dsisr = swab32(shared->dsisr); in kvmppc_swab_shared()
155 shared->int_pending = swab32(shared->int_pending); in kvmppc_swab_shared()
156 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) in kvmppc_swab_shared()
157 shared->sr[i] = swab32(shared->sr[i]); in kvmppc_swab_shared()
185 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
187 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
189 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
198 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
210 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
211 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
212 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
213 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
217 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
220 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
278 return r ? 0 : -EINVAL; in kvmppc_sanity_check()
290 /* Future optimization: only reload non-volatiles if they were in kvmppc_emulate_mmio()
298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
301 /* Future optimization: only reload non-volatiles if they were in kvmppc_emulate_mmio()
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
329 int r = -EINVAL; in kvmppc_st()
331 vcpu->stat.st++; in kvmppc_st()
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
337 if ((!r) || (r == -EAGAIN)) in kvmppc_st()
348 return -EPERM; in kvmppc_st()
354 void *magic = vcpu->arch.shared; in kvmppc_st()
360 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
372 int rc = -EINVAL; in kvmppc_ld()
374 vcpu->stat.ld++; in kvmppc_ld()
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
380 if ((!rc) || (rc == -EAGAIN)) in kvmppc_ld()
391 return -EPERM; in kvmppc_ld()
394 return -ENOEXEC; in kvmppc_ld()
400 void *magic = vcpu->arch.shared; in kvmppc_ld()
406 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_ld()
407 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
408 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmppc_ld()
455 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) in kvm_arch_init_vm()
456 return -ENOENT; in kvm_arch_init_vm()
458 kvm->arch.kvm_ops = kvm_ops; in kvm_arch_init_vm()
461 return -EINVAL; in kvm_arch_init_vm()
482 mutex_lock(&kvm->lock); in kvm_arch_destroy_vm()
483 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_arch_destroy_vm()
484 kvm->vcpus[i] = NULL; in kvm_arch_destroy_vm()
486 atomic_set(&kvm->online_vcpus, 0); in kvm_arch_destroy_vm()
490 mutex_unlock(&kvm->lock); in kvm_arch_destroy_vm()
493 module_put(kvm->arch.kvm_ops->owner); in kvm_arch_destroy_vm()
504 * Hooray - we know which VM type we're running on. Depend on in kvm_vm_ioctl_check_extension()
583 if (kvm->arch.emul_smt_mode > 1) in kvm_vm_ioctl_check_extension()
584 r = kvm->arch.emul_smt_mode; in kvm_vm_ioctl_check_extension()
586 r = kvm->arch.smt_mode; in kvm_vm_ioctl_check_extension()
598 r = ((threads_per_subcore << 1) - 1); in kvm_vm_ioctl_check_extension()
618 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && in kvm_vm_ioctl_check_extension()
619 !kvmppc_hv_ops->enable_nested(NULL)); in kvm_vm_ioctl_check_extension()
639 * return the number of present CPUs for -HV (since a host in kvm_vm_ioctl_check_extension()
672 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || in kvm_vm_ioctl_check_extension()
678 r = hv_enabled && kvmppc_hv_ops->enable_svm && in kvm_vm_ioctl_check_extension()
679 !kvmppc_hv_ops->enable_svm(NULL); in kvm_vm_ioctl_check_extension()
693 return -EINVAL; in kvm_arch_dev_ioctl()
743 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
744 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
745 vcpu->arch.dec_expires = get_tb(); in kvm_arch_vcpu_create()
748 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
758 vcpu->arch.waitp = &vcpu->wait; in kvm_arch_vcpu_create()
759 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); in kvm_arch_vcpu_create()
774 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
778 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
780 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
810 * On non-booke this is associated with Altivec and in kvm_arch_vcpu_load()
813 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
822 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
834 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || in kvm_arch_has_irq_bypass()
835 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); in kvm_arch_has_irq_bypass()
843 struct kvm *kvm = irqfd->kvm; in kvm_arch_irq_bypass_add_producer()
845 if (kvm->arch.kvm_ops->irq_bypass_add_producer) in kvm_arch_irq_bypass_add_producer()
846 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); in kvm_arch_irq_bypass_add_producer()
856 struct kvm *kvm = irqfd->kvm; in kvm_arch_irq_bypass_del_producer()
858 if (kvm->arch.kvm_ops->irq_bypass_del_producer) in kvm_arch_irq_bypass_del_producer()
859 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); in kvm_arch_irq_bypass_del_producer()
868 return -1; in kvmppc_get_vsr_dword_offset()
873 offset = 1 - index; in kvmppc_get_vsr_dword_offset()
884 return -1; in kvmppc_get_vsr_word_offset()
889 offset = 3 - index; in kvmppc_get_vsr_word_offset()
898 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
899 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
901 if (offset == -1) in kvmppc_set_vsr_dword()
905 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword()
907 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword()
917 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
920 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword_dump()
923 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword_dump()
934 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
941 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word_dump()
954 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
955 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
958 if (offset == -1) in kvmppc_set_vsr_word()
962 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_word()
964 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word()
983 return -1; in kvmppc_get_vmx_offset_generic()
986 offset = elts - index - 1; in kvmppc_get_vmx_offset_generic()
1023 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1024 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1026 if (offset == -1) in kvmppc_set_vmx_dword()
1039 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1040 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1042 if (offset == -1) in kvmppc_set_vmx_word()
1055 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1056 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1058 if (offset == -1) in kvmppc_set_vmx_hword()
1071 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1072 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1074 if (offset == -1) in kvmppc_set_vmx_byte()
1115 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1118 if (run->mmio.len > sizeof(gpr)) { in kvmppc_complete_mmio_load()
1119 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); in kvmppc_complete_mmio_load()
1123 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1124 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
1125 case 8: gpr = *(u64 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1126 case 4: gpr = *(u32 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1127 case 2: gpr = *(u16 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1128 case 1: gpr = *(u8 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1131 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
1132 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
1133 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
1134 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
1135 case 1: gpr = *(u8 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1140 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1143 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1144 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
1159 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1161 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1164 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1165 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1167 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1171 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1174 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1175 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1180 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1181 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1183 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1185 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1187 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1190 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1197 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1198 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1200 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1202 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1204 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1207 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1216 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1229 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1240 if (bytes > sizeof(run->mmio.data)) { in __kvmppc_handle_load()
1241 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, in __kvmppc_handle_load()
1242 run->mmio.len); in __kvmppc_handle_load()
1245 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1246 run->mmio.len = bytes; in __kvmppc_handle_load()
1247 run->mmio.is_write = 0; in __kvmppc_handle_load()
1249 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1250 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1251 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1252 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1253 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1255 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1257 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1258 bytes, &run->mmio.data); in __kvmppc_handle_load()
1260 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1264 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1295 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1298 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1305 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1307 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1308 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1317 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1318 void *data = run->mmio.data; in kvmppc_handle_store()
1329 if (bytes > sizeof(run->mmio.data)) { in kvmppc_handle_store()
1330 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, in kvmppc_handle_store()
1331 run->mmio.len); in kvmppc_handle_store()
1334 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1335 run->mmio.len = bytes; in kvmppc_handle_store()
1336 run->mmio.is_write = 1; in kvmppc_handle_store()
1337 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1338 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1340 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1360 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1362 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1363 bytes, &run->mmio.data); in kvmppc_handle_store()
1365 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1368 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1382 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1388 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1390 if (vsx_offset == -1) { in kvmppc_get_vsr_data()
1391 result = -1; in kvmppc_get_vsr_data()
1398 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1405 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1407 if (vsx_offset == -1) { in kvmppc_get_vsr_data()
1408 result = -1; in kvmppc_get_vsr_data()
1418 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1424 result = -1; in kvmppc_get_vsr_data()
1437 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1440 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1443 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1444 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1453 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1455 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1456 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1464 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1468 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1470 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1471 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1472 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1475 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1480 run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio_vsx_loadstore()
1484 pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); in kvmppc_emulate_mmio_vsx_loadstore()
1485 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_emulate_mmio_vsx_loadstore()
1486 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvmppc_emulate_mmio_vsx_loadstore()
1503 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1506 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1513 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1514 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1515 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1528 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1530 if (vmx_offset == -1) in kvmppc_get_vmx_dword()
1531 return -1; in kvmppc_get_vmx_dword()
1546 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1548 if (vmx_offset == -1) in kvmppc_get_vmx_word()
1549 return -1; in kvmppc_get_vmx_word()
1564 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1566 if (vmx_offset == -1) in kvmppc_get_vmx_hword()
1567 return -1; in kvmppc_get_vmx_hword()
1582 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1584 if (vmx_offset == -1) in kvmppc_get_vmx_byte()
1585 return -1; in kvmppc_get_vmx_byte()
1600 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1603 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1605 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1606 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1608 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1613 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1617 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1621 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1633 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1634 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1635 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1643 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1647 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1649 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1651 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1654 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1659 run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio_vmx_loadstore()
1663 pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); in kvmppc_emulate_mmio_vmx_loadstore()
1664 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_emulate_mmio_vmx_loadstore()
1665 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvmppc_emulate_mmio_vmx_loadstore()
1682 size = one_reg_size(reg->id); in kvm_vcpu_ioctl_get_one_reg()
1684 return -EINVAL; in kvm_vcpu_ioctl_get_one_reg()
1686 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1687 if (r == -EINVAL) { in kvm_vcpu_ioctl_get_one_reg()
1689 switch (reg->id) { in kvm_vcpu_ioctl_get_one_reg()
1693 r = -ENXIO; in kvm_vcpu_ioctl_get_one_reg()
1696 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1700 r = -ENXIO; in kvm_vcpu_ioctl_get_one_reg()
1703 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1706 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1710 r = -EINVAL; in kvm_vcpu_ioctl_get_one_reg()
1718 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) in kvm_vcpu_ioctl_get_one_reg()
1719 r = -EFAULT; in kvm_vcpu_ioctl_get_one_reg()
1730 size = one_reg_size(reg->id); in kvm_vcpu_ioctl_set_one_reg()
1732 return -EINVAL; in kvm_vcpu_ioctl_set_one_reg()
1734 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) in kvm_vcpu_ioctl_set_one_reg()
1735 return -EFAULT; in kvm_vcpu_ioctl_set_one_reg()
1737 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1738 if (r == -EINVAL) { in kvm_vcpu_ioctl_set_one_reg()
1740 switch (reg->id) { in kvm_vcpu_ioctl_set_one_reg()
1744 r = -ENXIO; in kvm_vcpu_ioctl_set_one_reg()
1747 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1751 r = -ENXIO; in kvm_vcpu_ioctl_set_one_reg()
1754 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1758 r = -ENXIO; in kvm_vcpu_ioctl_set_one_reg()
1761 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1765 r = -EINVAL; in kvm_vcpu_ioctl_set_one_reg()
1775 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1780 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1781 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1782 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1785 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1786 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1787 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1790 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1793 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1799 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1800 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1801 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1804 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1807 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1812 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1813 u64 *gprs = run->osi.gprs; in kvm_arch_vcpu_ioctl_run()
1818 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1819 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1822 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1824 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1825 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1827 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1828 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1829 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1835 if (run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
1836 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
1851 if (irq->irq == KVM_INTERRUPT_UNSET) { in kvm_vcpu_ioctl_interrupt()
1868 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
1869 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
1871 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
1874 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1878 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1882 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
1883 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1885 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1890 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1895 struct kvm_config_tlb cfg; in kvm_vcpu_ioctl_enable_cap() local
1896 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
1898 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
1899 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) in kvm_vcpu_ioctl_enable_cap()
1902 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1911 r = -EBADF; in kvm_vcpu_ioctl_enable_cap()
1912 f = fdget(cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
1916 r = -EPERM; in kvm_vcpu_ioctl_enable_cap()
1919 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1930 r = -EBADF; in kvm_vcpu_ioctl_enable_cap()
1931 f = fdget(cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
1935 r = -EPERM; in kvm_vcpu_ioctl_enable_cap()
1939 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1941 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1953 r = -EBADF; in kvm_vcpu_ioctl_enable_cap()
1954 f = fdget(cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
1958 r = -ENXIO; in kvm_vcpu_ioctl_enable_cap()
1962 r = -EPERM; in kvm_vcpu_ioctl_enable_cap()
1966 cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1974 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
1975 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
1978 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1982 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
1995 if (kvm->arch.mpic) in kvm_arch_intc_initialized()
1999 if (kvm->arch.xics || kvm->arch.xive) in kvm_arch_intc_initialized()
2008 return -EINVAL; in kvm_arch_vcpu_ioctl_get_mpstate()
2014 return -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
2020 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
2026 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
2029 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
2035 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
2043 r = -EFAULT; in kvm_arch_vcpu_ioctl()
2056 r = -EFAULT; in kvm_arch_vcpu_ioctl()
2069 r = -EFAULT; in kvm_arch_vcpu_ioctl()
2079 r = -EINVAL; in kvm_arch_vcpu_ioctl()
2096 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); in kvm_vm_ioctl_get_pvinfo()
2097 pvinfo->hcall[1] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2098 pvinfo->hcall[2] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2099 pvinfo->hcall[3] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2115 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); in kvm_vm_ioctl_get_pvinfo()
2116 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); in kvm_vm_ioctl_get_pvinfo()
2117 pvinfo->hcall[2] = cpu_to_be32(inst_sc); in kvm_vm_ioctl_get_pvinfo()
2118 pvinfo->hcall[3] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2121 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; in kvm_vm_ioctl_get_pvinfo()
2130 return -ENXIO; in kvm_vm_ioctl_irq_line()
2132 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
2133 irq_event->irq, irq_event->level, in kvm_vm_ioctl_irq_line()
2144 if (cap->flags) in kvm_vm_ioctl_enable_cap()
2145 return -EINVAL; in kvm_vm_ioctl_enable_cap()
2147 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
2150 unsigned long hcall = cap->args[0]; in kvm_vm_ioctl_enable_cap()
2152 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2154 cap->args[1] > 1) in kvm_vm_ioctl_enable_cap()
2158 if (cap->args[1]) in kvm_vm_ioctl_enable_cap()
2159 set_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2161 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2166 unsigned long mode = cap->args[0]; in kvm_vm_ioctl_enable_cap()
2167 unsigned long flags = cap->args[1]; in kvm_vm_ioctl_enable_cap()
2169 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2170 if (kvm->arch.kvm_ops->set_smt_mode) in kvm_vm_ioctl_enable_cap()
2171 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); in kvm_vm_ioctl_enable_cap()
2176 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2178 !kvm->arch.kvm_ops->enable_nested) in kvm_vm_ioctl_enable_cap()
2180 r = kvm->arch.kvm_ops->enable_nested(kvm); in kvm_vm_ioctl_enable_cap()
2185 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2186 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) in kvm_vm_ioctl_enable_cap()
2188 r = kvm->arch.kvm_ops->enable_svm(kvm); in kvm_vm_ioctl_enable_cap()
2192 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2215 return -ENOTTY; in pseries_get_cpu_char()
2219 cp->character = c.character; in pseries_get_cpu_char()
2220 cp->behaviour = c.behaviour; in pseries_get_cpu_char()
2221 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | in pseries_get_cpu_char()
2230 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | in pseries_get_cpu_char()
2240 return -ENOTTY; in pseries_get_cpu_char()
2265 if (r != -ENOTTY) in kvmppc_get_cpu_char()
2270 fw_features = of_get_child_by_name(np, "fw-features"); in kvmppc_get_cpu_char()
2275 "inst-spec-barrier-ori31,31,0")) in kvmppc_get_cpu_char()
2276 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; in kvmppc_get_cpu_char()
2278 "fw-bcctrl-serialized")) in kvmppc_get_cpu_char()
2279 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; in kvmppc_get_cpu_char()
2281 "inst-l1d-flush-ori30,30,0")) in kvmppc_get_cpu_char()
2282 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; in kvmppc_get_cpu_char()
2284 "inst-l1d-flush-trig2")) in kvmppc_get_cpu_char()
2285 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; in kvmppc_get_cpu_char()
2287 "fw-l1d-thread-split")) in kvmppc_get_cpu_char()
2288 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; in kvmppc_get_cpu_char()
2290 "fw-count-cache-disabled")) in kvmppc_get_cpu_char()
2291 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; in kvmppc_get_cpu_char()
2293 "fw-count-cache-flush-bcctr2,0,0")) in kvmppc_get_cpu_char()
2294 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; in kvmppc_get_cpu_char()
2295 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | in kvmppc_get_cpu_char()
2304 "speculation-policy-favor-security")) in kvmppc_get_cpu_char()
2305 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; in kvmppc_get_cpu_char()
2307 "needs-l1d-flush-msr-pr-0-to-1")) in kvmppc_get_cpu_char()
2308 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; in kvmppc_get_cpu_char()
2310 "needs-spec-barrier-for-bound-checks")) in kvmppc_get_cpu_char()
2311 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; in kvmppc_get_cpu_char()
2313 "needs-count-cache-flush-on-context-switch")) in kvmppc_get_cpu_char()
2314 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; in kvmppc_get_cpu_char()
2315 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | in kvmppc_get_cpu_char()
2330 struct kvm *kvm __maybe_unused = filp->private_data; in kvm_arch_vm_ioctl()
2340 r = -EFAULT; in kvm_arch_vm_ioctl()
2350 r = -EFAULT; in kvm_arch_vm_ioctl()
2354 r = -EINVAL; in kvm_arch_vm_ioctl()
2364 r = -EFAULT; in kvm_arch_vm_ioctl()
2381 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2384 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2386 r = -EFAULT; in kvm_arch_vm_ioctl()
2390 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2396 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2397 struct kvm_ppc_mmuv3_cfg cfg; in kvm_arch_vm_ioctl() local
2399 r = -EINVAL; in kvm_arch_vm_ioctl()
2400 if (!kvm->arch.kvm_ops->configure_mmu) in kvm_arch_vm_ioctl()
2402 r = -EFAULT; in kvm_arch_vm_ioctl()
2403 if (copy_from_user(&cfg, argp, sizeof(cfg))) in kvm_arch_vm_ioctl()
2405 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); in kvm_arch_vm_ioctl()
2409 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2412 r = -EINVAL; in kvm_arch_vm_ioctl()
2413 if (!kvm->arch.kvm_ops->get_rmmu_info) in kvm_arch_vm_ioctl()
2415 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2417 r = -EFAULT; in kvm_arch_vm_ioctl()
2425 r = -EFAULT; in kvm_arch_vm_ioctl()
2429 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2432 if (!kvm->arch.kvm_ops->svm_off) in kvm_arch_vm_ioctl()
2435 r = kvm->arch.kvm_ops->svm_off(kvm); in kvm_arch_vm_ioctl()
2439 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2440 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); in kvm_arch_vm_ioctl()
2444 r = -ENOTTY; in kvm_arch_vm_ioctl()
2462 return -ENOMEM; in kvmppc_alloc_lpid()