• Home
  • Raw
  • Download

Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors

8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
98 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
113 return kvm_mips_callbacks->hardware_enable(); in kvm_arch_hardware_enable()
118 kvm_mips_callbacks->hardware_disable(); in kvm_arch_hardware_disable()
144 return -EINVAL; in kvm_arch_init_vm()
147 /* Allocate page table to map GPA -> RPA */ in kvm_arch_init_vm()
148 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); in kvm_arch_init_vm()
149 if (!kvm->arch.gpa_mm.pgd) in kvm_arch_init_vm()
150 return -ENOMEM; in kvm_arch_init_vm()
174 mutex_lock(&kvm->lock); in kvm_mips_free_vcpus()
176 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_mips_free_vcpus()
177 kvm->vcpus[i] = NULL; in kvm_mips_free_vcpus()
179 atomic_set(&kvm->online_vcpus, 0); in kvm_mips_free_vcpus()
181 mutex_unlock(&kvm->lock); in kvm_mips_free_vcpus()
188 pgd_free(NULL, kvm->arch.gpa_mm.pgd); in kvm_mips_free_gpa_pt()
200 return -ENOIOCTLCMD; in kvm_arch_dev_ioctl()
215 kvm_mips_callbacks->flush_shadow_all(kvm); in kvm_arch_flush_shadow_all()
226 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
228 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot()
229 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot()
231 kvm_mips_callbacks->flush_shadow_memslot(kvm, slot); in kvm_arch_flush_shadow_memslot()
232 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
252 __func__, kvm, mem->slot, mem->guest_phys_addr, in kvm_arch_commit_memory_region()
253 mem->memory_size, mem->userspace_addr); in kvm_arch_commit_memory_region()
265 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && in kvm_arch_commit_memory_region()
266 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { in kvm_arch_commit_memory_region()
267 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
269 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
270 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
273 kvm_mips_callbacks->flush_shadow_memslot(kvm, new); in kvm_arch_commit_memory_region()
274 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
304 err = -ENOMEM; in kvm_arch_vcpu_create()
327 err = -ENOMEM; in kvm_arch_vcpu_create()
341 err = -ENOMEM; in kvm_arch_vcpu_create()
346 vcpu->arch.guest_ebase = gebase; in kvm_arch_vcpu_create()
348 /* Build guest exception vectors dynamically in unmapped memory */ in kvm_arch_vcpu_create()
351 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ in kvm_arch_vcpu_create()
360 /* For vectored interrupts poke the exception code @ all offsets 0-7 */ in kvm_arch_vcpu_create()
373 vcpu->arch.vcpu_run = p; in kvm_arch_vcpu_create()
380 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); in kvm_arch_vcpu_create()
383 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); in kvm_arch_vcpu_create()
390 * Allocate comm page for guest kernel, a TLB will be reserved for in kvm_arch_vcpu_create()
393 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); in kvm_arch_vcpu_create()
395 if (!vcpu->arch.kseg0_commpage) { in kvm_arch_vcpu_create()
396 err = -ENOMEM; in kvm_arch_vcpu_create()
400 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_create()
404 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
405 vcpu->arch.last_exec_cpu = -1; in kvm_arch_vcpu_create()
424 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_arch_vcpu_free()
431 kfree(vcpu->arch.guest_ebase); in kvm_arch_vcpu_free()
432 kfree(vcpu->arch.kseg0_commpage); in kvm_arch_vcpu_free()
444 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_guest_debug()
449 int r = -EINTR; in kvm_arch_vcpu_ioctl_run()
455 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
456 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
458 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
461 if (run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
472 * reordered ahead of the write to vcpu->mode, or we could miss a TLB in kvm_arch_vcpu_ioctl_run()
474 * mode and not needing an IPI. in kvm_arch_vcpu_ioctl_run()
476 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
478 r = kvm_mips_callbacks->vcpu_run(run, vcpu); in kvm_arch_vcpu_ioctl_run()
494 int intr = (int)irq->irq; in kvm_vcpu_ioctl_interrupt()
497 if (intr == 3 || intr == -3 || intr == 4 || intr == -4) in kvm_vcpu_ioctl_interrupt()
498 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, in kvm_vcpu_ioctl_interrupt()
501 if (irq->cpu == -1) in kvm_vcpu_ioctl_interrupt()
504 dvcpu = vcpu->kvm->vcpus[irq->cpu]; in kvm_vcpu_ioctl_interrupt()
507 kvm_mips_callbacks->queue_io_int(dvcpu, irq); in kvm_vcpu_ioctl_interrupt()
509 } else if (intr == -2 || intr == -3 || intr == -4) { in kvm_vcpu_ioctl_interrupt()
510 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); in kvm_vcpu_ioctl_interrupt()
513 irq->cpu, irq->irq); in kvm_vcpu_ioctl_interrupt()
514 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
517 dvcpu->arch.wait = 0; in kvm_vcpu_ioctl_interrupt()
519 if (swq_has_sleeper(&dvcpu->wq)) in kvm_vcpu_ioctl_interrupt()
520 swake_up_one(&dvcpu->wq); in kvm_vcpu_ioctl_interrupt()
528 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_mpstate()
534 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_mpstate()
593 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_num_regs()
599 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_mips_num_regs()
601 ret += kvm_mips_callbacks->num_regs(vcpu); in kvm_mips_num_regs()
613 return -EFAULT; in kvm_mips_copy_reg_indices()
616 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
619 return -EFAULT; in kvm_mips_copy_reg_indices()
625 return -EFAULT; in kvm_mips_copy_reg_indices()
634 return -EFAULT; in kvm_mips_copy_reg_indices()
639 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { in kvm_mips_copy_reg_indices()
642 return -EFAULT; in kvm_mips_copy_reg_indices()
648 return -EFAULT; in kvm_mips_copy_reg_indices()
653 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); in kvm_mips_copy_reg_indices()
659 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_get_reg()
660 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_get_reg()
666 switch (reg->id) { in kvm_mips_get_reg()
669 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; in kvm_mips_get_reg()
673 v = (long)vcpu->arch.hi; in kvm_mips_get_reg()
676 v = (long)vcpu->arch.lo; in kvm_mips_get_reg()
680 v = (long)vcpu->arch.pc; in kvm_mips_get_reg()
685 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
686 return -EINVAL; in kvm_mips_get_reg()
687 idx = reg->id - KVM_REG_MIPS_FPR_32(0); in kvm_mips_get_reg()
690 v = get_fpr32(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
692 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); in kvm_mips_get_reg()
695 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
696 return -EINVAL; in kvm_mips_get_reg()
697 idx = reg->id - KVM_REG_MIPS_FPR_64(0); in kvm_mips_get_reg()
700 return -EINVAL; in kvm_mips_get_reg()
701 v = get_fpr64(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
704 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
705 return -EINVAL; in kvm_mips_get_reg()
709 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_get_reg()
710 return -EINVAL; in kvm_mips_get_reg()
711 v = fpu->fcr31; in kvm_mips_get_reg()
716 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
717 return -EINVAL; in kvm_mips_get_reg()
720 return -EINVAL; in kvm_mips_get_reg()
721 idx = reg->id - KVM_REG_MIPS_VEC_128(0); in kvm_mips_get_reg()
724 vs[0] = get_fpr64(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
725 vs[1] = get_fpr64(&fpu->fpr[idx], 1); in kvm_mips_get_reg()
728 vs[0] = get_fpr64(&fpu->fpr[idx], 1); in kvm_mips_get_reg()
729 vs[1] = get_fpr64(&fpu->fpr[idx], 0); in kvm_mips_get_reg()
733 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
734 return -EINVAL; in kvm_mips_get_reg()
738 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_get_reg()
739 return -EINVAL; in kvm_mips_get_reg()
740 v = fpu->msacsr; in kvm_mips_get_reg()
745 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); in kvm_mips_get_reg()
750 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { in kvm_mips_get_reg()
751 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; in kvm_mips_get_reg()
754 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { in kvm_mips_get_reg()
755 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; in kvm_mips_get_reg()
759 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { in kvm_mips_get_reg()
760 void __user *uaddr = (void __user *)(long)reg->addr; in kvm_mips_get_reg()
762 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; in kvm_mips_get_reg()
764 return -EINVAL; in kvm_mips_get_reg()
771 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_reg()
772 struct mips_fpu_struct *fpu = &vcpu->arch.fpu; in kvm_mips_set_reg()
777 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { in kvm_mips_set_reg()
778 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; in kvm_mips_set_reg()
781 return -EFAULT; in kvm_mips_set_reg()
782 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { in kvm_mips_set_reg()
783 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; in kvm_mips_set_reg()
787 return -EFAULT; in kvm_mips_set_reg()
789 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { in kvm_mips_set_reg()
790 void __user *uaddr = (void __user *)(long)reg->addr; in kvm_mips_set_reg()
792 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; in kvm_mips_set_reg()
794 return -EINVAL; in kvm_mips_set_reg()
797 switch (reg->id) { in kvm_mips_set_reg()
803 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; in kvm_mips_set_reg()
807 vcpu->arch.hi = v; in kvm_mips_set_reg()
810 vcpu->arch.lo = v; in kvm_mips_set_reg()
814 vcpu->arch.pc = v; in kvm_mips_set_reg()
819 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
820 return -EINVAL; in kvm_mips_set_reg()
821 idx = reg->id - KVM_REG_MIPS_FPR_32(0); in kvm_mips_set_reg()
824 set_fpr32(&fpu->fpr[idx], 0, v); in kvm_mips_set_reg()
826 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); in kvm_mips_set_reg()
829 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
830 return -EINVAL; in kvm_mips_set_reg()
831 idx = reg->id - KVM_REG_MIPS_FPR_64(0); in kvm_mips_set_reg()
834 return -EINVAL; in kvm_mips_set_reg()
835 set_fpr64(&fpu->fpr[idx], 0, v); in kvm_mips_set_reg()
838 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
839 return -EINVAL; in kvm_mips_set_reg()
840 /* Read-only */ in kvm_mips_set_reg()
843 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_set_reg()
844 return -EINVAL; in kvm_mips_set_reg()
845 fpu->fcr31 = v; in kvm_mips_set_reg()
850 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
851 return -EINVAL; in kvm_mips_set_reg()
852 idx = reg->id - KVM_REG_MIPS_VEC_128(0); in kvm_mips_set_reg()
855 set_fpr64(&fpu->fpr[idx], 0, vs[0]); in kvm_mips_set_reg()
856 set_fpr64(&fpu->fpr[idx], 1, vs[1]); in kvm_mips_set_reg()
859 set_fpr64(&fpu->fpr[idx], 1, vs[0]); in kvm_mips_set_reg()
860 set_fpr64(&fpu->fpr[idx], 0, vs[1]); in kvm_mips_set_reg()
864 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
865 return -EINVAL; in kvm_mips_set_reg()
866 /* Read-only */ in kvm_mips_set_reg()
869 if (!kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_set_reg()
870 return -EINVAL; in kvm_mips_set_reg()
871 fpu->msacsr = v; in kvm_mips_set_reg()
876 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); in kvm_mips_set_reg()
886 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) in kvm_vcpu_ioctl_enable_cap()
887 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
888 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
889 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
890 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
891 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
893 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
895 vcpu->arch.fpu_enabled = true; in kvm_vcpu_ioctl_enable_cap()
898 vcpu->arch.msa_enabled = true; in kvm_vcpu_ioctl_enable_cap()
901 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
911 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
918 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
919 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, in kvm_arch_vcpu_async_ioctl()
925 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
931 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
942 r = -EFAULT; in kvm_arch_vcpu_ioctl()
956 r = -EFAULT; in kvm_arch_vcpu_ioctl()
963 r = -E2BIG; in kvm_arch_vcpu_ioctl()
966 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
972 r = -EFAULT; in kvm_arch_vcpu_ioctl()
979 r = -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl()
987 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
991 * Steps 1-4 below provide general overview of dirty page logging. See
994 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1012 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1018 memslot = id_to_memslot(slots, log->slot); in kvm_vm_ioctl_get_dirty_log()
1021 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); in kvm_vm_ioctl_get_dirty_log()
1024 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1034 r = -ENOIOCTLCMD; in kvm_arch_vm_ioctl()
1044 return -EEXIST; in kvm_arch_init()
1058 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_sregs()
1064 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_sregs()
1073 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_get_fpu()
1078 return -ENOIOCTLCMD; in kvm_arch_vcpu_ioctl_set_fpu()
1127 r = kvm_mips_callbacks->check_extension(kvm, ext); in kvm_vm_ioctl_check_extension()
1136 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; in kvm_cpu_has_pending_timer()
1145 return -1; in kvm_arch_vcpu_dump_regs()
1148 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
1149 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvm_arch_vcpu_dump_regs()
1153 vcpu->arch.gprs[i], in kvm_arch_vcpu_dump_regs()
1154 vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
1155 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
1157 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); in kvm_arch_vcpu_dump_regs()
1158 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); in kvm_arch_vcpu_dump_regs()
1160 cop0 = vcpu->arch.cop0; in kvm_arch_vcpu_dump_regs()
1176 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
1177 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
1178 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
1179 vcpu->arch.hi = regs->hi; in kvm_arch_vcpu_ioctl_set_regs()
1180 vcpu->arch.lo = regs->lo; in kvm_arch_vcpu_ioctl_set_regs()
1181 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1193 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
1194 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
1196 regs->hi = vcpu->arch.hi; in kvm_arch_vcpu_ioctl_get_regs()
1197 regs->lo = vcpu->arch.lo; in kvm_arch_vcpu_ioctl_get_regs()
1198 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
1208 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_comparecount_func()
1210 vcpu->arch.wait = 0; in kvm_mips_comparecount_func()
1211 if (swq_has_sleeper(&vcpu->wq)) in kvm_mips_comparecount_func()
1212 swake_up_one(&vcpu->wq); in kvm_mips_comparecount_func()
1229 err = kvm_mips_callbacks->vcpu_init(vcpu); in kvm_arch_vcpu_init()
1233 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, in kvm_arch_vcpu_init()
1235 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; in kvm_arch_vcpu_init()
1241 kvm_mips_callbacks->vcpu_uninit(vcpu); in kvm_arch_vcpu_uninit()
1253 return kvm_mips_callbacks->vcpu_setup(vcpu); in kvm_arch_vcpu_setup()
1272 u32 cause = vcpu->arch.host_cp0_cause; in kvm_mips_handle_exit()
1274 u32 __user *opc = (u32 __user *) vcpu->arch.pc; in kvm_mips_handle_exit()
1275 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_mips_handle_exit()
1280 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_mips_handle_exit()
1282 /* re-enable HTW before enabling interrupts */ in kvm_mips_handle_exit()
1287 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_mips_handle_exit()
1288 run->ready_for_interrupt_injection = 1; in kvm_mips_handle_exit()
1312 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_mips_handle_exit()
1320 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); in kvm_mips_handle_exit()
1322 ++vcpu->stat.int_exits; in kvm_mips_handle_exit()
1333 ++vcpu->stat.cop_unusable_exits; in kvm_mips_handle_exit()
1334 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); in kvm_mips_handle_exit()
1336 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) in kvm_mips_handle_exit()
1341 ++vcpu->stat.tlbmod_exits; in kvm_mips_handle_exit()
1342 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); in kvm_mips_handle_exit()
1347 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, in kvm_mips_handle_exit()
1350 ++vcpu->stat.tlbmiss_st_exits; in kvm_mips_handle_exit()
1351 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); in kvm_mips_handle_exit()
1358 ++vcpu->stat.tlbmiss_ld_exits; in kvm_mips_handle_exit()
1359 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); in kvm_mips_handle_exit()
1363 ++vcpu->stat.addrerr_st_exits; in kvm_mips_handle_exit()
1364 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); in kvm_mips_handle_exit()
1368 ++vcpu->stat.addrerr_ld_exits; in kvm_mips_handle_exit()
1369 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); in kvm_mips_handle_exit()
1373 ++vcpu->stat.syscall_exits; in kvm_mips_handle_exit()
1374 ret = kvm_mips_callbacks->handle_syscall(vcpu); in kvm_mips_handle_exit()
1378 ++vcpu->stat.resvd_inst_exits; in kvm_mips_handle_exit()
1379 ret = kvm_mips_callbacks->handle_res_inst(vcpu); in kvm_mips_handle_exit()
1383 ++vcpu->stat.break_inst_exits; in kvm_mips_handle_exit()
1384 ret = kvm_mips_callbacks->handle_break(vcpu); in kvm_mips_handle_exit()
1388 ++vcpu->stat.trap_inst_exits; in kvm_mips_handle_exit()
1389 ret = kvm_mips_callbacks->handle_trap(vcpu); in kvm_mips_handle_exit()
1393 ++vcpu->stat.msa_fpe_exits; in kvm_mips_handle_exit()
1394 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); in kvm_mips_handle_exit()
1398 ++vcpu->stat.fpe_exits; in kvm_mips_handle_exit()
1399 ret = kvm_mips_callbacks->handle_fpe(vcpu); in kvm_mips_handle_exit()
1403 ++vcpu->stat.msa_disabled_exits; in kvm_mips_handle_exit()
1404 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); in kvm_mips_handle_exit()
1409 ret = kvm_mips_callbacks->handle_guest_exit(vcpu); in kvm_mips_handle_exit()
1419 kvm_read_c0_guest_status(vcpu->arch.cop0)); in kvm_mips_handle_exit()
1421 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_mips_handle_exit()
1439 run->exit_reason = KVM_EXIT_INTR; in kvm_mips_handle_exit()
1440 ret = (-EINTR << 2) | RESUME_HOST; in kvm_mips_handle_exit()
1441 ++vcpu->stat.signal_exits; in kvm_mips_handle_exit()
1451 * callback is not reordered ahead of the write to vcpu->mode, in kvm_mips_handle_exit()
1453 * the VCPU as outside of guest mode and not needing an IPI. in kvm_mips_handle_exit()
1455 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_mips_handle_exit()
1457 kvm_mips_callbacks->vcpu_reenter(run, vcpu); in kvm_mips_handle_exit()
1468 if (kvm_mips_guest_has_fpu(&vcpu->arch) && in kvm_mips_handle_exit()
1470 __kvm_restore_fcsr(&vcpu->arch); in kvm_mips_handle_exit()
1472 if (kvm_mips_guest_has_msa(&vcpu->arch) && in kvm_mips_handle_exit()
1474 __kvm_restore_msacsr(&vcpu->arch); in kvm_mips_handle_exit()
1487 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_fpu()
1496 * FR=0 FPU state, and we don't want to hit reserved instruction in kvm_own_fpu()
1505 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_own_fpu()
1520 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_own_fpu()
1521 __kvm_restore_fpu(&vcpu->arch); in kvm_own_fpu()
1522 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_fpu()
1535 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_own_msa()
1544 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_own_msa()
1552 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | in kvm_own_msa()
1567 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { in kvm_own_msa()
1572 __kvm_restore_msa_upper(&vcpu->arch); in kvm_own_msa()
1573 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1578 __kvm_restore_msa(&vcpu->arch); in kvm_own_msa()
1579 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; in kvm_own_msa()
1580 if (kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_own_msa()
1581 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; in kvm_own_msa()
1598 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_drop_fpu()
1601 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; in kvm_drop_fpu()
1603 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_drop_fpu()
1606 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_drop_fpu()
1618 * This is why we explicitly re-enable the hardware before saving. in kvm_lose_fpu()
1622 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_lose_fpu()
1628 __kvm_save_msa(&vcpu->arch); in kvm_lose_fpu()
1633 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1637 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); in kvm_lose_fpu()
1638 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { in kvm_lose_fpu()
1644 __kvm_save_fpu(&vcpu->arch); in kvm_lose_fpu()
1645 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; in kvm_lose_fpu()
1664 struct pt_regs *regs = args->regs; in kvm_mips_csr_die_notify()
1672 if (!(current->flags & PF_VCPU)) in kvm_mips_csr_die_notify()