• Home
  • Raw
  • Download

Lines Matching +full:is +full:- +full:wired

2  * This file is subject to the terms and conditions of the GNU General Public
62 * write_gc0_ebase_64() is no longer UNDEFINED since R6. in kvm_vz_write_gc0_ebase()
115 if (kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_vz_config5_guest_wrmask()
119 * Permit guest FPU mode changes if FPU is enabled and the relevant in kvm_vz_config5_guest_wrmask()
122 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_vz_config5_guest_wrmask()
140 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
157 /* Permit FPU to be present if FPU is supported */ in kvm_vz_config1_user_wrmask()
158 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) in kvm_vz_config1_user_wrmask()
174 /* Permit MSA to be present if MSA is supported */ in kvm_vz_config3_user_wrmask()
175 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_vz_config3_user_wrmask()
205 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_queue_irq()
206 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_queue_irq()
211 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_dequeue_irq()
212 set_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_dequeue_irq()
218 * timer expiry is asynchronous to vcpu execution therefore defer guest in kvm_vz_queue_timer_int_cb()
227 * timer expiry is asynchronous to vcpu execution therefore defer guest in kvm_vz_dequeue_timer_int_cb()
236 int intr = (int)irq->irq; in kvm_vz_queue_io_int_cb()
248 int intr = (int)irq->irq; in kvm_vz_dequeue_io_int_cb()
254 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); in kvm_vz_dequeue_io_int_cb()
282 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_irq_deliver_cb()
325 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_irq_clear_cb()
334 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
347 if (mips_hpt_frequency != vcpu->arch.count_hz) in kvm_vz_should_use_htimer()
358 * _kvm_vz_restore_stimer() - Restore soft timer state.
373 write_c0_gtoffset(compare - read_c0_count()); in _kvm_vz_restore_stimer()
380 * _kvm_vz_restore_htimer() - Restore hard timer state.
396 * Freeze the soft-timer and sync the guest CP0_Count with it. We do in _kvm_vz_restore_htimer()
401 write_c0_gtoffset(start_count - read_c0_count()); in _kvm_vz_restore_htimer()
410 * interrupts if we're not careful. Detect if a timer interrupt is due in _kvm_vz_restore_htimer()
415 if (after_count - start_count > compare - start_count - 1) in _kvm_vz_restore_htimer()
420 * kvm_vz_restore_timer() - Restore timer state.
427 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_restore_timer()
438 * kvm_vz_acquire_htimer() - Switch to hard timer state.
461 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
483 * at which no pending timer interrupt is missing. in _kvm_vz_save_htimer()
491 * Record a final CP0_Count which we will transfer to the soft-timer. in _kvm_vz_save_htimer()
492 * This is recorded *after* saving CP0_Cause, so we don't get any timer in _kvm_vz_save_htimer()
503 if (end_count - before_count > compare - before_count - 1) in _kvm_vz_save_htimer()
507 * Restore soft-timer, ignoring a small amount of negative drift due to in _kvm_vz_save_htimer()
510 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); in _kvm_vz_save_htimer()
514 * kvm_vz_save_timer() - Save guest timer state.
522 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_save_timer()
537 /* save timer-related state to VCPU context */ in kvm_vz_save_timer()
543 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
569 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
570 * @inst: 32-bit instruction encoding.
608 * is_eva_am_mapped() - Find whether an access mode is mapped.
610 * @am: 3-bit encoded access mode.
634 * - 6 110 0 0 in is_eva_am_mapped()
667 opc = (u32 *)vcpu->arch.pc; in is_eva_am_mapped()
668 if (vcpu->arch.host_cp0_cause & CAUSEF_BD) in is_eva_am_mapped()
680 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
685 * Convert a guest virtual address (GVA) which is valid according to the guest
689 * -errno on failure.
698 /* Handle canonical 32-bit virtual address */ in kvm_vz_gva_to_gpa()
784 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
796 * -errno on failure.
801 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_vz_badvaddr_to_gpa()
804 /* If BadVAddr is GPA, then all is well in the world */ in kvm_vz_badvaddr_to_gpa()
813 return -EINVAL; in kvm_vz_badvaddr_to_gpa()
815 /* ... and we need to perform the GVA->GPA translation in software */ in kvm_vz_badvaddr_to_gpa()
821 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_no_handler()
822 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_no_handler()
824 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_no_handler()
838 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_no_handler()
868 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_write_maari()
872 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); in kvm_write_maari()
873 else if (val < ARRAY_SIZE(vcpu->arch.maar)) in kvm_write_maari()
881 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_gpsi_cop0()
888 * Update PC and hold onto current PC in case there is in kvm_vz_gpsi_cop0()
891 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cop0()
913 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
934 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
935 val = vcpu->arch.maar[ in kvm_vz_gpsi_cop0()
953 val = cop0->reg[rd][sel]; in kvm_vz_gpsi_cop0()
957 val = cop0->reg[rd][sel]; in kvm_vz_gpsi_cop0()
968 vcpu->arch.gprs[rt] = val; in kvm_vz_gpsi_cop0()
979 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
981 val = vcpu->arch.gprs[rt]; in kvm_vz_gpsi_cop0()
989 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); in kvm_vz_gpsi_cop0()
993 vcpu->arch.gprs[rt], in kvm_vz_gpsi_cop0()
1013 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
1014 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = in kvm_vz_gpsi_cop0()
1023 cop0->reg[rd][sel] = (int)val; in kvm_vz_gpsi_cop0()
1070 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cop0()
1083 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_vz_gpsi_cache()
1087 * Update PC and hold onto current PC in case there is in kvm_vz_gpsi_cache()
1090 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cache()
1104 va = arch->gprs[base] + offset; in kvm_vz_gpsi_cache()
1107 cache, op, base, arch->gprs[base], offset); in kvm_vz_gpsi_cache()
1136 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], in kvm_vz_gpsi_cache()
1139 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cache()
1155 * Update PC and hold onto current PC in case there is in kvm_vz_gpsi_lwc2()
1158 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_lwc2()
1167 ++vcpu->stat.vz_cpucfg_exits; in kvm_vz_gpsi_lwc2()
1168 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]); in kvm_vz_gpsi_lwc2()
1170 switch (vcpu->arch.gprs[rs]) { in kvm_vz_gpsi_lwc2()
1172 vcpu->arch.gprs[rd] = 0x14c000; in kvm_vz_gpsi_lwc2()
1178 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1183 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1186 vcpu->arch.gprs[rd] = hostcfg; in kvm_vz_gpsi_lwc2()
1190 vcpu->arch.gprs[rd] = 0; in kvm_vz_gpsi_lwc2()
1197 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc); in kvm_vz_gpsi_lwc2()
1207 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_lwc2()
1218 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gpsi()
1265 arch->gprs[rt] = in kvm_trap_vz_handle_gpsi()
1275 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); in kvm_trap_vz_handle_gpsi()
1300 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gsfc()
1320 unsigned int val = arch->gprs[rt]; in kvm_trap_vz_handle_gsfc()
1328 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_trap_vz_handle_gsfc()
1343 * FPU and Vector register state is made in kvm_trap_vz_handle_gsfc()
1351 * If MSA state is already live, it is undefined how it in kvm_trap_vz_handle_gsfc()
1358 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_trap_vz_handle_gsfc()
1394 * context is already loaded. in kvm_trap_vz_handle_gsfc()
1397 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) in kvm_trap_vz_handle_gsfc()
1426 * Presumably this is due to MC (guest mode change), so lets trace some in kvm_trap_vz_handle_ghfc()
1449 * Update PC and hold onto current PC in case there is in kvm_trap_vz_handle_hc()
1452 curr_pc = vcpu->arch.pc; in kvm_trap_vz_handle_hc()
1459 vcpu->arch.pc = curr_pc; in kvm_trap_vz_handle_hc()
1486 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_guest_exit()
1487 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_guest_exit()
1489 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_trap_vz_handle_guest_exit()
1496 ++vcpu->stat.vz_gpsi_exits; in kvm_trap_vz_handle_guest_exit()
1500 ++vcpu->stat.vz_gsfc_exits; in kvm_trap_vz_handle_guest_exit()
1504 ++vcpu->stat.vz_hc_exits; in kvm_trap_vz_handle_guest_exit()
1508 ++vcpu->stat.vz_grr_exits; in kvm_trap_vz_handle_guest_exit()
1513 ++vcpu->stat.vz_gva_exits; in kvm_trap_vz_handle_guest_exit()
1518 ++vcpu->stat.vz_ghfc_exits; in kvm_trap_vz_handle_guest_exit()
1522 ++vcpu->stat.vz_gpa_exits; in kvm_trap_vz_handle_guest_exit()
1527 ++vcpu->stat.vz_resvd_exits; in kvm_trap_vz_handle_guest_exit()
1539 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_guest_exit()
1546 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1554 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_cop_unusable()
1564 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || in kvm_trap_vz_handle_cop_unusable()
1565 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_trap_vz_handle_cop_unusable()
1581 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_cop_unusable()
1592 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1595 * Handle when the guest attempts to use MSA when it is disabled in the root
1606 if (!kvm_mips_guest_has_msa(&vcpu->arch) || in kvm_trap_vz_handle_msa_disabled()
1609 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_trap_vz_handle_msa_disabled()
1610 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_msa_disabled()
1621 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_ld_miss()
1622 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_ld_miss()
1623 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_ld_miss()
1624 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_ld_miss()
1631 if (kvm_is_ifetch_fault(&vcpu->arch)) { in kvm_trap_vz_handle_tlb_ld_miss()
1632 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1641 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1650 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1657 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_ld_miss()
1660 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1668 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_st_miss()
1669 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_st_miss()
1670 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_st_miss()
1671 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1680 vcpu->arch.host_cp0_badvaddr = badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1688 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1697 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1704 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_st_miss()
1707 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1796 ret += 1 + ARRAY_SIZE(vcpu->arch.maar); in kvm_vz_num_regs()
1809 return -EFAULT; in kvm_vz_copy_reg_indices()
1815 return -EFAULT; in kvm_vz_copy_reg_indices()
1821 return -EFAULT; in kvm_vz_copy_reg_indices()
1827 return -EFAULT; in kvm_vz_copy_reg_indices()
1833 return -EFAULT; in kvm_vz_copy_reg_indices()
1839 return -EFAULT; in kvm_vz_copy_reg_indices()
1845 return -EFAULT; in kvm_vz_copy_reg_indices()
1849 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { in kvm_vz_copy_reg_indices()
1852 return -EFAULT; in kvm_vz_copy_reg_indices()
1858 return -EFAULT; in kvm_vz_copy_reg_indices()
1867 return -EFAULT; in kvm_vz_copy_reg_indices()
1880 * KVM API exposes 64-bit version of the register, so move the in entrylo_kvm_to_user()
1896 * KVM API exposes 64-bit versiono of the register, so move the in entrylo_user_to_kvm()
1910 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_get_one_reg()
1913 switch (reg->id) { in kvm_vz_get_one_reg()
1928 return -EINVAL; in kvm_vz_get_one_reg()
1933 return -EINVAL; in kvm_vz_get_one_reg()
1939 return -EINVAL; in kvm_vz_get_one_reg()
1951 return -EINVAL; in kvm_vz_get_one_reg()
1956 return -EINVAL; in kvm_vz_get_one_reg()
1961 return -EINVAL; in kvm_vz_get_one_reg()
1966 return -EINVAL; in kvm_vz_get_one_reg()
1971 return -EINVAL; in kvm_vz_get_one_reg()
1976 return -EINVAL; in kvm_vz_get_one_reg()
1984 return -EINVAL; in kvm_vz_get_one_reg()
1995 return -EINVAL; in kvm_vz_get_one_reg()
2000 return -EINVAL; in kvm_vz_get_one_reg()
2027 /* Octeon III has a read-only guest.PRid */ in kvm_vz_get_one_reg()
2043 return -EINVAL; in kvm_vz_get_one_reg()
2048 return -EINVAL; in kvm_vz_get_one_reg()
2053 return -EINVAL; in kvm_vz_get_one_reg()
2058 return -EINVAL; in kvm_vz_get_one_reg()
2063 return -EINVAL; in kvm_vz_get_one_reg()
2071 return -EINVAL; in kvm_vz_get_one_reg()
2072 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_get_one_reg()
2073 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_get_one_reg()
2074 return -EINVAL; in kvm_vz_get_one_reg()
2075 *v = vcpu->arch.maar[idx]; in kvm_vz_get_one_reg()
2079 return -EINVAL; in kvm_vz_get_one_reg()
2080 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); in kvm_vz_get_one_reg()
2091 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_get_one_reg()
2093 return -EINVAL; in kvm_vz_get_one_reg()
2116 *v = vcpu->arch.count_ctl; in kvm_vz_get_one_reg()
2119 *v = ktime_to_ns(vcpu->arch.count_resume); in kvm_vz_get_one_reg()
2122 *v = vcpu->arch.count_hz; in kvm_vz_get_one_reg()
2125 return -EINVAL; in kvm_vz_get_one_reg()
2134 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_set_one_reg()
2139 switch (reg->id) { in kvm_vz_set_one_reg()
2154 return -EINVAL; in kvm_vz_set_one_reg()
2159 return -EINVAL; in kvm_vz_set_one_reg()
2165 return -EINVAL; in kvm_vz_set_one_reg()
2177 return -EINVAL; in kvm_vz_set_one_reg()
2182 return -EINVAL; in kvm_vz_set_one_reg()
2187 return -EINVAL; in kvm_vz_set_one_reg()
2192 return -EINVAL; in kvm_vz_set_one_reg()
2197 return -EINVAL; in kvm_vz_set_one_reg()
2202 return -EINVAL; in kvm_vz_set_one_reg()
2210 return -EINVAL; in kvm_vz_set_one_reg()
2221 return -EINVAL; in kvm_vz_set_one_reg()
2226 return -EINVAL; in kvm_vz_set_one_reg()
2246 * If the timer is stopped or started (DC bit) it must look in kvm_vz_set_one_reg()
2270 /* Octeon III has a guest.PRid, but its read-only */ in kvm_vz_set_one_reg()
2348 return -EINVAL; in kvm_vz_set_one_reg()
2349 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_set_one_reg()
2350 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_set_one_reg()
2351 return -EINVAL; in kvm_vz_set_one_reg()
2352 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); in kvm_vz_set_one_reg()
2356 return -EINVAL; in kvm_vz_set_one_reg()
2368 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_set_one_reg()
2370 return -EINVAL; in kvm_vz_set_one_reg()
2402 return -EINVAL; in kvm_vz_set_one_reg()
2442 vcpu->arch.vzguestid[i] = 0; in kvm_vz_check_requests()
2459 unsigned int wired = read_gc0_wired(); in kvm_vz_vcpu_save_wired() local
2463 /* Expand the wired TLB array if necessary */ in kvm_vz_vcpu_save_wired()
2464 wired &= MIPSR6_WIRED_WIRED; in kvm_vz_vcpu_save_wired()
2465 if (wired > vcpu->arch.wired_tlb_limit) { in kvm_vz_vcpu_save_wired()
2466 tlbs = krealloc(vcpu->arch.wired_tlb, wired * in kvm_vz_vcpu_save_wired()
2467 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); in kvm_vz_vcpu_save_wired()
2470 wired = vcpu->arch.wired_tlb_limit; in kvm_vz_vcpu_save_wired()
2472 vcpu->arch.wired_tlb = tlbs; in kvm_vz_vcpu_save_wired()
2473 vcpu->arch.wired_tlb_limit = wired; in kvm_vz_vcpu_save_wired()
2477 if (wired) in kvm_vz_vcpu_save_wired()
2478 /* Save wired entries from the guest TLB */ in kvm_vz_vcpu_save_wired()
2479 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); in kvm_vz_vcpu_save_wired()
2481 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { in kvm_vz_vcpu_save_wired()
2482 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); in kvm_vz_vcpu_save_wired()
2483 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; in kvm_vz_vcpu_save_wired()
2484 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; in kvm_vz_vcpu_save_wired()
2485 vcpu->arch.wired_tlb[i].tlb_mask = 0; in kvm_vz_vcpu_save_wired()
2487 vcpu->arch.wired_tlb_used = wired; in kvm_vz_vcpu_save_wired()
2492 /* Load wired entries into the guest TLB */ in kvm_vz_vcpu_load_wired()
2493 if (vcpu->arch.wired_tlb) in kvm_vz_vcpu_load_wired()
2494 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, in kvm_vz_vcpu_load_wired()
2495 vcpu->arch.wired_tlb_used); in kvm_vz_vcpu_load_wired()
2500 struct kvm *kvm = vcpu->kvm; in kvm_vz_vcpu_load_tlb()
2501 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; in kvm_vz_vcpu_load_tlb()
2508 migrated = (vcpu->arch.last_exec_cpu != cpu); in kvm_vz_vcpu_load_tlb()
2509 vcpu->arch.last_exec_cpu = cpu; in kvm_vz_vcpu_load_tlb()
2512 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and in kvm_vz_vcpu_load_tlb()
2513 * remains set until another vcpu is loaded in. As a rule GuestRID in kvm_vz_vcpu_load_tlb()
2514 * remains zeroed when in root context unless the kernel is busy in kvm_vz_vcpu_load_tlb()
2519 * Check if our GuestID is of an older version and thus invalid. in kvm_vz_vcpu_load_tlb()
2526 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & in kvm_vz_vcpu_load_tlb()
2529 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); in kvm_vz_vcpu_load_tlb()
2531 vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2535 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2552 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) in kvm_vz_vcpu_load_tlb()
2561 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_vcpu_load()
2568 migrated = (vcpu->arch.last_sched_cpu != cpu); in kvm_vz_vcpu_load()
2579 * restore wired guest TLB entries (while in guest context). in kvm_vz_vcpu_load()
2582 if (current->flags & PF_VCPU) { in kvm_vz_vcpu_load()
2607 * set. For example Status.CU1 cannot be set unless Config1.FP is set. in kvm_vz_vcpu_load()
2688 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); in kvm_vz_vcpu_load()
2695 if (vcpu->kvm->created_vcpus > 1) in kvm_vz_vcpu_load()
2703 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_vcpu_put()
2705 if (current->flags & PF_VCPU) in kvm_vz_vcpu_put()
2724 /* allow wired TLB entries to be overwritten */ in kvm_vz_vcpu_put()
2795 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = in kvm_vz_vcpu_put()
2802 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2805 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2806 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2815 /* Write MMUSize - 1 into guest Config registers */ in kvm_vz_resize_guest_vtlb()
2818 (size - 1) << MIPS_CONF1_TLBS_SHIFT); in kvm_vz_resize_guest_vtlb()
2824 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2829 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2836 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it in kvm_vz_resize_guest_vtlb()
2837 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write in kvm_vz_resize_guest_vtlb()
2843 if (size - 1 <= limit) in kvm_vz_resize_guest_vtlb()
2848 /* Read back MMUSize - 1 */ in kvm_vz_resize_guest_vtlb()
2890 mmu_size -= guest_mmu_size; in kvm_vz_hardware_enable()
2892 cvmvmconfig |= mmu_size - 1; in kvm_vz_hardware_enable()
2906 * overlap of root wired and guest entries, the guest TLB may in kvm_vz_hardware_enable()
2910 ftlb_size = current_cpu_data.tlbsize - mmu_size; in kvm_vz_hardware_enable()
2918 * Reduce to make space for root wired entries and at least 2 in kvm_vz_hardware_enable()
2919 * root non-wired entries. This does assume that long-term wired in kvm_vz_hardware_enable()
2922 guest_mmu_size = mmu_size - num_wired_entries() - 2; in kvm_vz_hardware_enable()
2930 * of wired entries. in kvm_vz_hardware_enable()
2935 return -EINVAL; in kvm_vz_hardware_enable()
3001 cvmvmconfig |= mmu_size - 1; in kvm_vz_hardware_disable()
3031 /* We support 64-bit registers/operations and addresses */ in kvm_vz_check_extension()
3051 vcpu->arch.vzguestid[i] = 0; in kvm_vz_vcpu_init()
3061 * If the VCPU is freed and reused as another VCPU, we don't want the in kvm_vz_vcpu_uninit()
3075 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_vcpu_setup()
3093 /* Wired */ in kvm_vz_vcpu_setup()
3107 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); in kvm_vz_vcpu_setup()
3180 /* bits SEGBITS-13+3:4 set */ in kvm_vz_vcpu_setup()
3182 ((1ull << (cpu_vmbits - 13)) - 1) << 4); in kvm_vz_vcpu_setup()
3206 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; in kvm_vz_vcpu_setup()
3209 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); in kvm_vz_vcpu_setup()
3221 * For each CPU there is a single GPA ASID used by all VCPUs in in kvm_vz_flush_shadow_all()
3229 cpumask_setall(&kvm->arch.asid_flush_mask); in kvm_vz_flush_shadow_all()
3269 r = vcpu->arch.vcpu_run(vcpu); in kvm_vz_vcpu_run()
3317 return -ENODEV; in kvm_mips_emulation_init()
3323 if (WARN(pgd_reg == -1, in kvm_mips_emulation_init()
3325 return -ENODEV; in kvm_mips_emulation_init()