Lines Matching full:arch
51 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); in kvm_arch_vcpu_runnable()
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
185 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
187 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
189 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
198 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
210 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
211 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
212 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
213 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
217 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
220 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
354 void *magic = vcpu->arch.shared; in kvmppc_st()
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
400 void *magic = vcpu->arch.shared; in kvmppc_ld()
458 kvm->arch.kvm_ops = kvm_ops; in kvm_arch_init_vm()
493 module_put(kvm->arch.kvm_ops->owner); in kvm_arch_destroy_vm()
583 if (kvm->arch.emul_smt_mode > 1) in kvm_vm_ioctl_check_extension()
584 r = kvm->arch.emul_smt_mode; in kvm_vm_ioctl_check_extension()
586 r = kvm->arch.smt_mode; in kvm_vm_ioctl_check_extension()
733 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
743 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
744 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
745 vcpu->arch.dec_expires = get_tb(); in kvm_arch_vcpu_create()
748 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
758 vcpu->arch.waitp = &vcpu->wait; in kvm_arch_vcpu_create()
774 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
778 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
780 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
813 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
822 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
845 if (kvm->arch.kvm_ops->irq_bypass_add_producer) in kvm_arch_irq_bypass_add_producer()
846 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); in kvm_arch_irq_bypass_add_producer()
858 if (kvm->arch.kvm_ops->irq_bypass_del_producer) in kvm_arch_irq_bypass_del_producer()
859 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); in kvm_arch_irq_bypass_del_producer()
898 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
899 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
917 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
934 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
954 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
955 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
1023 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1024 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1039 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1040 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1055 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1056 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1071 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1072 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1123 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1140 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1143 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1159 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1161 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1164 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1165 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1167 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1171 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1174 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1175 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1180 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1181 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1183 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1185 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1187 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1190 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1197 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1198 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1200 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1202 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1204 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1207 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1216 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1245 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1249 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1250 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1253 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1295 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1298 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1305 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1307 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1308 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1334 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1340 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1382 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1388 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1405 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1437 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1440 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1443 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1453 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1455 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1456 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1468 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1471 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1472 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1475 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1503 if (vcpu->arch.mmio_vsx_copy_nums > 2) in kvmppc_handle_vmx_load()
1506 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1513 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1514 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1515 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1528 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1546 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1564 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1582 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1600 if (vcpu->arch.mmio_vsx_copy_nums > 2) in kvmppc_handle_vmx_store()
1603 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1605 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1606 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1633 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1634 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1635 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1647 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1651 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1654 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1696 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1703 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1706 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1747 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1754 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1761 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1785 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1786 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1787 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1790 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1799 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1800 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1801 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1804 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1812 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1818 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1819 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1825 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1827 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1829 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1874 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1878 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1883 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1885 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1890 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1978 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1995 if (kvm->arch.mpic) in kvm_arch_intc_initialized()
1999 if (kvm->arch.xics || kvm->arch.xive) in kvm_arch_intc_initialized()
2159 set_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2161 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2170 if (kvm->arch.kvm_ops->set_smt_mode) in kvm_vm_ioctl_enable_cap()
2171 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); in kvm_vm_ioctl_enable_cap()
2178 !kvm->arch.kvm_ops->enable_nested) in kvm_vm_ioctl_enable_cap()
2180 r = kvm->arch.kvm_ops->enable_nested(kvm); in kvm_vm_ioctl_enable_cap()
2186 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) in kvm_vm_ioctl_enable_cap()
2188 r = kvm->arch.kvm_ops->enable_svm(kvm); in kvm_vm_ioctl_enable_cap()
2384 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2400 if (!kvm->arch.kvm_ops->configure_mmu) in kvm_arch_vm_ioctl()
2405 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); in kvm_arch_vm_ioctl()
2413 if (!kvm->arch.kvm_ops->get_rmmu_info) in kvm_arch_vm_ioctl()
2415 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2432 if (!kvm->arch.kvm_ops->svm_off) in kvm_arch_vm_ioctl()
2435 r = kvm->arch.kvm_ops->svm_off(kvm); in kvm_arch_vm_ioctl()
2440 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); in kvm_arch_vm_ioctl()