Lines Matching refs:arch
68 vcpu->arch.guest_kernel_asid[i] = 0; in kvm_mips_reset_vcpu()
69 vcpu->arch.guest_user_asid[i] = 0; in kvm_mips_reset_vcpu()
81 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
115 kvm->arch.commpage_tlb = wired; in kvm_mips_init_tlbs()
118 kvm->arch.commpage_tlb); in kvm_mips_init_tlbs()
147 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { in kvm_mips_free_vcpus()
148 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) in kvm_mips_free_vcpus()
149 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); in kvm_mips_free_vcpus()
151 kfree(kvm->arch.guest_pmap); in kvm_mips_free_vcpus()
222 if (!kvm->arch.guest_pmap) { in kvm_arch_commit_memory_region()
227 kvm->arch.guest_pmap_npages = npages; in kvm_arch_commit_memory_region()
228 kvm->arch.guest_pmap = in kvm_arch_commit_memory_region()
231 if (!kvm->arch.guest_pmap) { in kvm_arch_commit_memory_region()
237 npages, kvm->arch.guest_pmap); in kvm_arch_commit_memory_region()
241 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; in kvm_arch_commit_memory_region()
276 vcpu->arch.host_ebase = (void *)read_c0_ebase(); in kvm_arch_vcpu_create()
288 vcpu->arch.guest_ebase = gebase; in kvm_arch_vcpu_create()
321 vcpu->arch.vcpu_run = gebase + offset; in kvm_arch_vcpu_create()
323 vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; in kvm_arch_vcpu_create()
334 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); in kvm_arch_vcpu_create()
336 if (!vcpu->arch.kseg0_commpage) { in kvm_arch_vcpu_create()
341 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_create()
345 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
367 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_arch_vcpu_free()
373 kfree(vcpu->arch.guest_ebase); in kvm_arch_vcpu_free()
374 kfree(vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_free()
408 kvm_read_c0_guest_cause(vcpu->arch.cop0)); in kvm_arch_vcpu_ioctl_run()
415 r = vcpu->arch.vcpu_run(run, vcpu); in kvm_arch_vcpu_ioctl_run()
455 dvcpu->arch.wait = 0; in kvm_vcpu_ioctl_interrupt()
544 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_get_reg()
545 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_get_reg()
554 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; in kvm_mips_get_reg()
557 v = (long)vcpu->arch.hi; in kvm_mips_get_reg()
560 v = (long)vcpu->arch.lo; in kvm_mips_get_reg()
563 v = (long)vcpu->arch.pc; in kvm_mips_get_reg()
568 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
578 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
587 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
592 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
599 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
616 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
621 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
723 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_reg()
724 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_set_reg()
755 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; in kvm_mips_set_reg()
758 vcpu->arch.hi = v; in kvm_mips_set_reg()
761 vcpu->arch.lo = v; in kvm_mips_set_reg()
764 vcpu->arch.pc = v; in kvm_mips_set_reg()
769 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
779 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
788 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
793 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
800 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
814 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
819 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
895 vcpu->arch.fpu_enabled = true; in kvm_vcpu_ioctl_enable_cap()
898 vcpu->arch.msa_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1129 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
1130 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvm_arch_vcpu_dump_regs()
1134 vcpu->arch.gprs[i], in kvm_arch_vcpu_dump_regs()
1135 vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
1136 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
1138 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); in kvm_arch_vcpu_dump_regs()
1139 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); in kvm_arch_vcpu_dump_regs()
1141 cop0 = vcpu->arch.cop0; in kvm_arch_vcpu_dump_regs()
1155 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
1156 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
1157 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
1158 vcpu->arch.hi = regs->hi; in kvm_arch_vcpu_ioctl_set_regs()
1159 vcpu->arch.lo = regs->lo; in kvm_arch_vcpu_ioctl_set_regs()
1160 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1169 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
1170 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
1172 regs->hi = vcpu->arch.hi; in kvm_arch_vcpu_ioctl_get_regs()
1173 regs->lo = vcpu->arch.lo; in kvm_arch_vcpu_ioctl_get_regs()
1174 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
1185 vcpu->arch.wait = 0; in kvm_mips_comparecount_func()
1195 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); in kvm_mips_comparecount_wakeup()
1203 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, in kvm_arch_vcpu_init()
1205 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; in kvm_arch_vcpu_init()
1237 uint32_t cause = vcpu->arch.host_cp0_cause; in kvm_mips_handle_exit()
1239 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; in kvm_mips_handle_exit()
1240 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_mips_handle_exit()
1307 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, in kvm_mips_handle_exit()
1381 kvm_read_c0_guest_status(vcpu->arch.cop0)); in kvm_mips_handle_exit()
1415 if (kvm_mips_guest_has_fpu(&vcpu->arch) && in kvm_mips_handle_exit()
1417 __kvm_restore_fcsr(&vcpu->arch); in kvm_mips_handle_exit()
1419 if (kvm_mips_guest_has_msa(&vcpu->arch) && in kvm_mips_handle_exit()
1421 __kvm_restore_msacsr(&vcpu->arch); in kvm_mips_handle_exit()
1433 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_fpu()
1451 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) in kvm_own_fpu()
1466 if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { in kvm_own_fpu()
1467 __kvm_restore_fpu(&vcpu->arch); in kvm_own_fpu()
1468 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; in kvm_own_fpu()
1478 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_msa()
1487 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_own_msa()
1495 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | in kvm_own_msa()
1510 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { in kvm_own_msa()
1515 __kvm_restore_msa_upper(&vcpu->arch); in kvm_own_msa()
1516 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; in kvm_own_msa()
1520 __kvm_restore_msa(&vcpu->arch); in kvm_own_msa()
1521 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; in kvm_own_msa()
1522 if (kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_own_msa()
1523 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; in kvm_own_msa()
1537 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { in kvm_drop_fpu()
1539 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; in kvm_drop_fpu()
1541 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { in kvm_drop_fpu()
1543 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; in kvm_drop_fpu()
1559 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { in kvm_lose_fpu()
1563 __kvm_save_msa(&vcpu->arch); in kvm_lose_fpu()
1567 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) in kvm_lose_fpu()
1569 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); in kvm_lose_fpu()
1570 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { in kvm_lose_fpu()
1574 __kvm_save_fpu(&vcpu->arch); in kvm_lose_fpu()
1575 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; in kvm_lose_fpu()