Lines Matching refs:vcpu
100 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
102 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
105 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
110 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
168 struct kvm_vcpu *vcpu; in kvm_mips_free_vcpus() local
170 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_mips_free_vcpus()
171 kvm_vcpu_destroy(vcpu); in kvm_mips_free_vcpus()
292 struct kvm_vcpu *vcpu; in kvm_mips_comparecount_wakeup() local
294 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); in kvm_mips_comparecount_wakeup()
296 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_comparecount_wakeup()
298 vcpu->arch.wait = 0; in kvm_mips_comparecount_wakeup()
299 rcuwait_wake_up(&vcpu->wait); in kvm_mips_comparecount_wakeup()
301 return kvm_mips_count_timeout(vcpu); in kvm_mips_comparecount_wakeup()
309 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
316 vcpu->kvm, vcpu->vcpu_id, vcpu); in kvm_arch_vcpu_create()
318 err = kvm_mips_callbacks->vcpu_init(vcpu); in kvm_arch_vcpu_create()
322 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, in kvm_arch_vcpu_create()
324 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; in kvm_arch_vcpu_create()
357 vcpu->arch.guest_ebase = gebase; in kvm_arch_vcpu_create()
384 vcpu->arch.vcpu_run = p; in kvm_arch_vcpu_create()
391 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); in kvm_arch_vcpu_create()
394 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); in kvm_arch_vcpu_create()
404 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); in kvm_arch_vcpu_create()
406 if (!vcpu->arch.kseg0_commpage) { in kvm_arch_vcpu_create()
411 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_create()
412 kvm_mips_commpage_init(vcpu); in kvm_arch_vcpu_create()
415 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
416 vcpu->arch.last_exec_cpu = -1; in kvm_arch_vcpu_create()
419 err = kvm_mips_callbacks->vcpu_setup(vcpu); in kvm_arch_vcpu_create()
426 kfree(vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_create()
430 kvm_mips_callbacks->vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
434 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
436 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_arch_vcpu_destroy()
438 kvm_mips_dump_stats(vcpu); in kvm_arch_vcpu_destroy()
440 kvm_mmu_free_memory_caches(vcpu); in kvm_arch_vcpu_destroy()
441 kfree(vcpu->arch.guest_ebase); in kvm_arch_vcpu_destroy()
442 kfree(vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_destroy()
444 kvm_mips_callbacks->vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
447 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
453 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
457 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
459 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
461 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
462 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
463 kvm_mips_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
464 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
467 if (vcpu->run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
474 trace_kvm_enter(vcpu); in kvm_arch_vcpu_ioctl_run()
482 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
484 r = kvm_mips_callbacks->vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
486 trace_kvm_out(vcpu); in kvm_arch_vcpu_ioctl_run()
491 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
493 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
497 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
511 dvcpu = vcpu; in kvm_vcpu_ioctl_interrupt()
513 dvcpu = vcpu->kvm->vcpus[irq->cpu]; in kvm_vcpu_ioctl_interrupt()
533 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
539 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
596 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) in kvm_mips_num_regs() argument
601 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_num_regs()
607 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_mips_num_regs()
609 ret += kvm_mips_callbacks->num_regs(vcpu); in kvm_mips_num_regs()
614 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) in kvm_mips_copy_reg_indices() argument
624 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
647 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
661 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); in kvm_mips_copy_reg_indices()
664 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, in kvm_mips_get_reg() argument
667 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_get_reg()
668 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_get_reg()
677 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; in kvm_mips_get_reg()
681 v = (long)vcpu->arch.hi; in kvm_mips_get_reg()
684 v = (long)vcpu->arch.lo; in kvm_mips_get_reg()
688 v = (long)vcpu->arch.pc; in kvm_mips_get_reg()
693 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
703 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
712 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
717 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
724 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
741 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
746 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
753 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); in kvm_mips_get_reg()
776 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, in kvm_mips_set_reg() argument
779 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_reg()
780 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_set_reg()
811 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; in kvm_mips_set_reg()
815 vcpu->arch.hi = v; in kvm_mips_set_reg()
818 vcpu->arch.lo = v; in kvm_mips_set_reg()
822 vcpu->arch.pc = v; in kvm_mips_set_reg()
827 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
837 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
846 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
851 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
858 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
872 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
877 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
884 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); in kvm_mips_set_reg()
889 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
894 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) in kvm_vcpu_ioctl_enable_cap()
903 vcpu->arch.fpu_enabled = true; in kvm_vcpu_ioctl_enable_cap()
906 vcpu->arch.msa_enabled = true; in kvm_vcpu_ioctl_enable_cap()
919 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
927 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, in kvm_arch_vcpu_async_ioctl()
930 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
939 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
943 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
954 r = kvm_mips_set_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
956 r = kvm_mips_get_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
968 reg_list.n = kvm_mips_num_regs(vcpu); in kvm_arch_vcpu_ioctl()
974 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
983 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
990 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
1033 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
1039 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
1045 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
1049 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
1054 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
1059 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
1111 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
1113 return kvm_mips_pending_timer(vcpu) || in kvm_cpu_has_pending_timer()
1114 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; in kvm_cpu_has_pending_timer()
1117 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_dump_regs() argument
1122 if (!vcpu) in kvm_arch_vcpu_dump_regs()
1126 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
1127 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvm_arch_vcpu_dump_regs()
1131 vcpu->arch.gprs[i], in kvm_arch_vcpu_dump_regs()
1132 vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
1133 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
1135 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); in kvm_arch_vcpu_dump_regs()
1136 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); in kvm_arch_vcpu_dump_regs()
1138 cop0 = vcpu->arch.cop0; in kvm_arch_vcpu_dump_regs()
1148 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
1152 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
1154 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
1155 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
1156 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
1157 vcpu->arch.hi = regs->hi; in kvm_arch_vcpu_ioctl_set_regs()
1158 vcpu->arch.lo = regs->lo; in kvm_arch_vcpu_ioctl_set_regs()
1159 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1161 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
1165 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
1169 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
1171 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
1172 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
1174 regs->hi = vcpu->arch.hi; in kvm_arch_vcpu_ioctl_get_regs()
1175 regs->lo = vcpu->arch.lo; in kvm_arch_vcpu_ioctl_get_regs()
1176 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
1178 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
1182 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
1202 int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) in kvm_mips_handle_exit() argument
1204 struct kvm_run *run = vcpu->run; in kvm_mips_handle_exit()
1205 u32 cause = vcpu->arch.host_cp0_cause; in kvm_mips_handle_exit()
1207 u32 __user *opc = (u32 __user *) vcpu->arch.pc; in kvm_mips_handle_exit()
1208 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_mips_handle_exit()
1213 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_mips_handle_exit()
1232 cause, opc, run, vcpu); in kvm_mips_handle_exit()
1233 trace_kvm_exit(vcpu, exccode); in kvm_mips_handle_exit()
1241 er = kvm_mips_check_privilege(cause, opc, vcpu); in kvm_mips_handle_exit()
1253 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); in kvm_mips_handle_exit()
1255 ++vcpu->stat.int_exits; in kvm_mips_handle_exit()
1266 ++vcpu->stat.cop_unusable_exits; in kvm_mips_handle_exit()
1267 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); in kvm_mips_handle_exit()
1274 ++vcpu->stat.tlbmod_exits; in kvm_mips_handle_exit()
1275 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); in kvm_mips_handle_exit()
1280 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, in kvm_mips_handle_exit()
1283 ++vcpu->stat.tlbmiss_st_exits; in kvm_mips_handle_exit()
1284 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); in kvm_mips_handle_exit()
1291 ++vcpu->stat.tlbmiss_ld_exits; in kvm_mips_handle_exit()
1292 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); in kvm_mips_handle_exit()
1296 ++vcpu->stat.addrerr_st_exits; in kvm_mips_handle_exit()
1297 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); in kvm_mips_handle_exit()
1301 ++vcpu->stat.addrerr_ld_exits; in kvm_mips_handle_exit()
1302 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); in kvm_mips_handle_exit()
1306 ++vcpu->stat.syscall_exits; in kvm_mips_handle_exit()
1307 ret = kvm_mips_callbacks->handle_syscall(vcpu); in kvm_mips_handle_exit()
1311 ++vcpu->stat.resvd_inst_exits; in kvm_mips_handle_exit()
1312 ret = kvm_mips_callbacks->handle_res_inst(vcpu); in kvm_mips_handle_exit()
1316 ++vcpu->stat.break_inst_exits; in kvm_mips_handle_exit()
1317 ret = kvm_mips_callbacks->handle_break(vcpu); in kvm_mips_handle_exit()
1321 ++vcpu->stat.trap_inst_exits; in kvm_mips_handle_exit()
1322 ret = kvm_mips_callbacks->handle_trap(vcpu); in kvm_mips_handle_exit()
1326 ++vcpu->stat.msa_fpe_exits; in kvm_mips_handle_exit()
1327 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); in kvm_mips_handle_exit()
1331 ++vcpu->stat.fpe_exits; in kvm_mips_handle_exit()
1332 ret = kvm_mips_callbacks->handle_fpe(vcpu); in kvm_mips_handle_exit()
1336 ++vcpu->stat.msa_disabled_exits; in kvm_mips_handle_exit()
1337 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); in kvm_mips_handle_exit()
1342 ret = kvm_mips_callbacks->handle_guest_exit(vcpu); in kvm_mips_handle_exit()
1349 kvm_get_badinstr(opc, vcpu, &inst); in kvm_mips_handle_exit()
1352 kvm_read_c0_guest_status(vcpu->arch.cop0)); in kvm_mips_handle_exit()
1353 kvm_arch_vcpu_dump_regs(vcpu); in kvm_mips_handle_exit()
1364 kvm_vz_acquire_htimer(vcpu); in kvm_mips_handle_exit()
1367 kvm_mips_deliver_interrupts(vcpu, cause); in kvm_mips_handle_exit()
1374 ++vcpu->stat.signal_exits; in kvm_mips_handle_exit()
1375 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); in kvm_mips_handle_exit()
1380 trace_kvm_reenter(vcpu); in kvm_mips_handle_exit()
1388 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_mips_handle_exit()
1390 kvm_mips_callbacks->vcpu_reenter(vcpu); in kvm_mips_handle_exit()
1401 if (kvm_mips_guest_has_fpu(&vcpu->arch) && in kvm_mips_handle_exit()
1403 __kvm_restore_fcsr(&vcpu->arch); in kvm_mips_handle_exit()
1405 if (kvm_mips_guest_has_msa(&vcpu->arch) && in kvm_mips_handle_exit()
1407 __kvm_restore_msacsr(&vcpu->arch); in kvm_mips_handle_exit()
1418 void kvm_own_fpu(struct kvm_vcpu *vcpu) in kvm_own_fpu() argument
1420 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_fpu()
1438 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_own_fpu()
1439 kvm_lose_fpu(vcpu); in kvm_own_fpu()
1453 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_own_fpu()
1454 __kvm_restore_fpu(&vcpu->arch); in kvm_own_fpu()
1455 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_fpu()
1456 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
1458 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
1466 void kvm_own_msa(struct kvm_vcpu *vcpu) in kvm_own_msa() argument
1468 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_msa()
1477 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_own_msa()
1485 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | in kvm_own_msa()
1487 kvm_lose_fpu(vcpu); in kvm_own_msa()
1500 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { in kvm_own_msa()
1505 __kvm_restore_msa_upper(&vcpu->arch); in kvm_own_msa()
1506 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1507 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); in kvm_own_msa()
1511 __kvm_restore_msa(&vcpu->arch); in kvm_own_msa()
1512 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1513 if (kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_own_msa()
1514 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_msa()
1515 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, in kvm_own_msa()
1519 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); in kvm_own_msa()
1528 void kvm_drop_fpu(struct kvm_vcpu *vcpu) in kvm_drop_fpu() argument
1531 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_drop_fpu()
1533 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); in kvm_drop_fpu()
1534 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; in kvm_drop_fpu()
1536 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_drop_fpu()
1538 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); in kvm_drop_fpu()
1539 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_drop_fpu()
1545 void kvm_lose_fpu(struct kvm_vcpu *vcpu) in kvm_lose_fpu() argument
1555 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_lose_fpu()
1561 __kvm_save_msa(&vcpu->arch); in kvm_lose_fpu()
1562 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); in kvm_lose_fpu()
1566 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1570 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); in kvm_lose_fpu()
1571 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1577 __kvm_save_fpu(&vcpu->arch); in kvm_lose_fpu()
1578 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_lose_fpu()
1579 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); in kvm_lose_fpu()