• Home
  • Raw
  • Download

Lines Matching +full:mips +full:- +full:cdmm

6  * KVM/MIPS: Support for hardware virtualization extensions
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
112 if (kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_vz_config5_guest_wrmask()
119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_vz_config5_guest_wrmask()
132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
135 * VInt, SP, CDMM, MT, SM, TL]
150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) in kvm_vz_config1_user_wrmask()
167 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_vz_config3_user_wrmask()
191 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_queue_irq()
192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_queue_irq()
197 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_dequeue_irq()
198 set_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_dequeue_irq()
222 int intr = (int)irq->irq; in kvm_vz_queue_io_int_cb()
250 int intr = (int)irq->irq; in kvm_vz_dequeue_io_int_cb()
257 case -2: in kvm_vz_dequeue_io_int_cb()
261 case -3: in kvm_vz_dequeue_io_int_cb()
265 case -4: in kvm_vz_dequeue_io_int_cb()
306 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_vz_irq_deliver_cb()
348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr); in kvm_vz_irq_clear_cb()
357 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
370 if (mips_hpt_frequency != vcpu->arch.count_hz) in kvm_vz_should_use_htimer()
381 * _kvm_vz_restore_stimer() - Restore soft timer state.
396 write_c0_gtoffset(compare - read_c0_count()); in _kvm_vz_restore_stimer()
403 * _kvm_vz_restore_htimer() - Restore hard timer state.
419 * Freeze the soft-timer and sync the guest CP0_Count with it. We do in _kvm_vz_restore_htimer()
424 write_c0_gtoffset(start_count - read_c0_count()); in _kvm_vz_restore_htimer()
438 if (after_count - start_count > compare - start_count - 1) in _kvm_vz_restore_htimer()
443 * kvm_vz_restore_timer() - Restore timer state.
450 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_restore_timer()
461 * kvm_vz_acquire_htimer() - Switch to hard timer state.
484 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
514 * Record a final CP0_Count which we will transfer to the soft-timer. in _kvm_vz_save_htimer()
526 if (end_count - before_count > compare - before_count - 1) in _kvm_vz_save_htimer()
530 * Restore soft-timer, ignoring a small amount of negative drift due to in _kvm_vz_save_htimer()
533 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); in _kvm_vz_save_htimer()
537 * kvm_vz_save_timer() - Save guest timer state.
545 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_save_timer()
560 /* save timer-related state to VCPU context */ in kvm_vz_save_timer()
566 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
592 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
593 * @inst: 32-bit instruction encoding.
631 * is_eva_am_mapped() - Find whether an access mode is mapped.
633 * @am: 3-bit encoded access mode.
657 * - 6 110 0 0 in is_eva_am_mapped()
690 opc = (u32 *)vcpu->arch.pc; in is_eva_am_mapped()
691 if (vcpu->arch.host_cp0_cause & CAUSEF_BD) in is_eva_am_mapped()
703 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
712 * -errno on failure.
721 /* Handle canonical 32-bit virtual address */ in kvm_vz_gva_to_gpa()
807 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
819 * -errno on failure.
824 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_vz_badvaddr_to_gpa()
836 return -EINVAL; in kvm_vz_badvaddr_to_gpa()
838 /* ... and we need to perform the GVA->GPA translation in software */ in kvm_vz_badvaddr_to_gpa()
844 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_no_handler()
845 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_no_handler()
847 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_no_handler()
861 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_no_handler()
891 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_write_maari()
895 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); in kvm_write_maari()
896 else if (val < ARRAY_SIZE(vcpu->arch.maar)) in kvm_write_maari()
905 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_gpsi_cop0()
915 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cop0()
937 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
958 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
959 val = vcpu->arch.maar[ in kvm_vz_gpsi_cop0()
976 val = cop0->reg[rd][sel]; in kvm_vz_gpsi_cop0()
986 vcpu->arch.gprs[rt] = val; in kvm_vz_gpsi_cop0()
997 cop0->stat[rd][sel]++; in kvm_vz_gpsi_cop0()
999 val = vcpu->arch.gprs[rt]; in kvm_vz_gpsi_cop0()
1007 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); in kvm_vz_gpsi_cop0()
1011 vcpu->arch.gprs[rt], in kvm_vz_gpsi_cop0()
1031 ARRAY_SIZE(vcpu->arch.maar)); in kvm_vz_gpsi_cop0()
1032 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = in kvm_vz_gpsi_cop0()
1057 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cop0()
1071 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_vz_gpsi_cache()
1078 curr_pc = vcpu->arch.pc; in kvm_vz_gpsi_cache()
1092 va = arch->gprs[base] + offset; in kvm_vz_gpsi_cache()
1095 cache, op, base, arch->gprs[base], offset); in kvm_vz_gpsi_cache()
1124 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], in kvm_vz_gpsi_cache()
1127 vcpu->arch.pc = curr_pc; in kvm_vz_gpsi_cache()
1136 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gpsi()
1137 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_gpsi()
1179 arch->gprs[rt] = in kvm_trap_vz_handle_gpsi()
1189 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); in kvm_trap_vz_handle_gpsi()
1214 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_trap_vz_handle_gsfc()
1234 unsigned int val = arch->gprs[rt]; in kvm_trap_vz_handle_gsfc()
1242 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_trap_vz_handle_gsfc()
1272 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_trap_vz_handle_gsfc()
1311 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) in kvm_trap_vz_handle_gsfc()
1366 curr_pc = vcpu->arch.pc; in kvm_trap_vz_handle_hc()
1373 vcpu->arch.pc = curr_pc; in kvm_trap_vz_handle_hc()
1400 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_guest_exit()
1401 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_guest_exit()
1403 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & in kvm_trap_vz_handle_guest_exit()
1410 ++vcpu->stat.vz_gpsi_exits; in kvm_trap_vz_handle_guest_exit()
1414 ++vcpu->stat.vz_gsfc_exits; in kvm_trap_vz_handle_guest_exit()
1418 ++vcpu->stat.vz_hc_exits; in kvm_trap_vz_handle_guest_exit()
1422 ++vcpu->stat.vz_grr_exits; in kvm_trap_vz_handle_guest_exit()
1427 ++vcpu->stat.vz_gva_exits; in kvm_trap_vz_handle_guest_exit()
1432 ++vcpu->stat.vz_ghfc_exits; in kvm_trap_vz_handle_guest_exit()
1436 ++vcpu->stat.vz_gpa_exits; in kvm_trap_vz_handle_guest_exit()
1441 ++vcpu->stat.vz_resvd_exits; in kvm_trap_vz_handle_guest_exit()
1453 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_guest_exit()
1460 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1468 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_cop_unusable()
1469 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_cop_unusable()
1479 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || in kvm_trap_vz_handle_cop_unusable()
1480 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { in kvm_trap_vz_handle_cop_unusable()
1496 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_cop_unusable()
1507 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1515 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_msa_disabled()
1523 if (!kvm_mips_guest_has_msa(&vcpu->arch) || in kvm_trap_vz_handle_msa_disabled()
1526 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { in kvm_trap_vz_handle_msa_disabled()
1527 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_msa_disabled()
1538 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_ld_miss()
1539 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_ld_miss()
1540 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_ld_miss()
1541 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_ld_miss()
1548 if (kvm_is_ifetch_fault(&vcpu->arch)) { in kvm_trap_vz_handle_tlb_ld_miss()
1549 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1558 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1567 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1574 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_ld_miss()
1577 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_ld_miss()
1585 struct kvm_run *run = vcpu->run; in kvm_trap_vz_handle_tlb_st_miss()
1586 u32 *opc = (u32 *) vcpu->arch.pc; in kvm_trap_vz_handle_tlb_st_miss()
1587 u32 cause = vcpu->arch.host_cp0_cause; in kvm_trap_vz_handle_tlb_st_miss()
1588 ulong badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1597 vcpu->arch.host_cp0_badvaddr = badvaddr; in kvm_trap_vz_handle_tlb_st_miss()
1605 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1614 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1621 run->exit_reason = KVM_EXIT_MMIO; in kvm_trap_vz_handle_tlb_st_miss()
1624 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_trap_vz_handle_tlb_st_miss()
1712 ret += 1 + ARRAY_SIZE(vcpu->arch.maar); in kvm_vz_num_regs()
1725 return -EFAULT; in kvm_vz_copy_reg_indices()
1731 return -EFAULT; in kvm_vz_copy_reg_indices()
1737 return -EFAULT; in kvm_vz_copy_reg_indices()
1743 return -EFAULT; in kvm_vz_copy_reg_indices()
1749 return -EFAULT; in kvm_vz_copy_reg_indices()
1755 return -EFAULT; in kvm_vz_copy_reg_indices()
1761 return -EFAULT; in kvm_vz_copy_reg_indices()
1765 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { in kvm_vz_copy_reg_indices()
1768 return -EFAULT; in kvm_vz_copy_reg_indices()
1774 return -EFAULT; in kvm_vz_copy_reg_indices()
1783 return -EFAULT; in kvm_vz_copy_reg_indices()
1796 * KVM API exposes 64-bit version of the register, so move the in entrylo_kvm_to_user()
1812 * KVM API exposes 64-bit versiono of the register, so move the in entrylo_user_to_kvm()
1826 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_get_one_reg()
1829 switch (reg->id) { in kvm_vz_get_one_reg()
1844 return -EINVAL; in kvm_vz_get_one_reg()
1849 return -EINVAL; in kvm_vz_get_one_reg()
1855 return -EINVAL; in kvm_vz_get_one_reg()
1867 return -EINVAL; in kvm_vz_get_one_reg()
1872 return -EINVAL; in kvm_vz_get_one_reg()
1877 return -EINVAL; in kvm_vz_get_one_reg()
1882 return -EINVAL; in kvm_vz_get_one_reg()
1887 return -EINVAL; in kvm_vz_get_one_reg()
1892 return -EINVAL; in kvm_vz_get_one_reg()
1900 return -EINVAL; in kvm_vz_get_one_reg()
1911 return -EINVAL; in kvm_vz_get_one_reg()
1916 return -EINVAL; in kvm_vz_get_one_reg()
1943 /* Octeon III has a read-only guest.PRid */ in kvm_vz_get_one_reg()
1959 return -EINVAL; in kvm_vz_get_one_reg()
1964 return -EINVAL; in kvm_vz_get_one_reg()
1969 return -EINVAL; in kvm_vz_get_one_reg()
1974 return -EINVAL; in kvm_vz_get_one_reg()
1979 return -EINVAL; in kvm_vz_get_one_reg()
1984 return -EINVAL; in kvm_vz_get_one_reg()
1985 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_get_one_reg()
1986 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_get_one_reg()
1987 return -EINVAL; in kvm_vz_get_one_reg()
1988 *v = vcpu->arch.maar[idx]; in kvm_vz_get_one_reg()
1992 return -EINVAL; in kvm_vz_get_one_reg()
1993 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); in kvm_vz_get_one_reg()
2004 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_get_one_reg()
2006 return -EINVAL; in kvm_vz_get_one_reg()
2029 *v = vcpu->arch.count_ctl; in kvm_vz_get_one_reg()
2032 *v = ktime_to_ns(vcpu->arch.count_resume); in kvm_vz_get_one_reg()
2035 *v = vcpu->arch.count_hz; in kvm_vz_get_one_reg()
2038 return -EINVAL; in kvm_vz_get_one_reg()
2047 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_set_one_reg()
2052 switch (reg->id) { in kvm_vz_set_one_reg()
2067 return -EINVAL; in kvm_vz_set_one_reg()
2072 return -EINVAL; in kvm_vz_set_one_reg()
2078 return -EINVAL; in kvm_vz_set_one_reg()
2090 return -EINVAL; in kvm_vz_set_one_reg()
2095 return -EINVAL; in kvm_vz_set_one_reg()
2100 return -EINVAL; in kvm_vz_set_one_reg()
2105 return -EINVAL; in kvm_vz_set_one_reg()
2110 return -EINVAL; in kvm_vz_set_one_reg()
2115 return -EINVAL; in kvm_vz_set_one_reg()
2123 return -EINVAL; in kvm_vz_set_one_reg()
2134 return -EINVAL; in kvm_vz_set_one_reg()
2139 return -EINVAL; in kvm_vz_set_one_reg()
2183 /* Octeon III has a guest.PRid, but its read-only */ in kvm_vz_set_one_reg()
2253 return -EINVAL; in kvm_vz_set_one_reg()
2254 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); in kvm_vz_set_one_reg()
2255 if (idx >= ARRAY_SIZE(vcpu->arch.maar)) in kvm_vz_set_one_reg()
2256 return -EINVAL; in kvm_vz_set_one_reg()
2257 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); in kvm_vz_set_one_reg()
2261 return -EINVAL; in kvm_vz_set_one_reg()
2273 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; in kvm_vz_set_one_reg()
2275 return -EINVAL; in kvm_vz_set_one_reg()
2307 return -EINVAL; in kvm_vz_set_one_reg()
2347 vcpu->arch.vzguestid[i] = 0; in kvm_vz_check_requests()
2370 if (wired > vcpu->arch.wired_tlb_limit) { in kvm_vz_vcpu_save_wired()
2371 tlbs = krealloc(vcpu->arch.wired_tlb, wired * in kvm_vz_vcpu_save_wired()
2372 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); in kvm_vz_vcpu_save_wired()
2375 wired = vcpu->arch.wired_tlb_limit; in kvm_vz_vcpu_save_wired()
2377 vcpu->arch.wired_tlb = tlbs; in kvm_vz_vcpu_save_wired()
2378 vcpu->arch.wired_tlb_limit = wired; in kvm_vz_vcpu_save_wired()
2384 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); in kvm_vz_vcpu_save_wired()
2386 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { in kvm_vz_vcpu_save_wired()
2387 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); in kvm_vz_vcpu_save_wired()
2388 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; in kvm_vz_vcpu_save_wired()
2389 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; in kvm_vz_vcpu_save_wired()
2390 vcpu->arch.wired_tlb[i].tlb_mask = 0; in kvm_vz_vcpu_save_wired()
2392 vcpu->arch.wired_tlb_used = wired; in kvm_vz_vcpu_save_wired()
2398 if (vcpu->arch.wired_tlb) in kvm_vz_vcpu_load_wired()
2399 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, in kvm_vz_vcpu_load_wired()
2400 vcpu->arch.wired_tlb_used); in kvm_vz_vcpu_load_wired()
2405 struct kvm *kvm = vcpu->kvm; in kvm_vz_vcpu_load_tlb()
2406 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; in kvm_vz_vcpu_load_tlb()
2413 migrated = (vcpu->arch.last_exec_cpu != cpu); in kvm_vz_vcpu_load_tlb()
2414 vcpu->arch.last_exec_cpu = cpu; in kvm_vz_vcpu_load_tlb()
2431 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & in kvm_vz_vcpu_load_tlb()
2434 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); in kvm_vz_vcpu_load_tlb()
2436 vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2440 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); in kvm_vz_vcpu_load_tlb()
2457 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) in kvm_vz_vcpu_load_tlb()
2466 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_vcpu_load()
2473 migrated = (vcpu->arch.last_sched_cpu != cpu); in kvm_vz_vcpu_load()
2487 if (current->flags & PF_VCPU) { in kvm_vz_vcpu_load()
2593 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); in kvm_vz_vcpu_load()
2608 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_vcpu_put()
2610 if (current->flags & PF_VCPU) in kvm_vz_vcpu_put()
2700 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = in kvm_vz_vcpu_put()
2707 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2720 /* Write MMUSize - 1 into guest Config registers */ in kvm_vz_resize_guest_vtlb()
2723 (size - 1) << MIPS_CONF1_TLBS_SHIFT); in kvm_vz_resize_guest_vtlb()
2729 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2734 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << in kvm_vz_resize_guest_vtlb()
2741 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it in kvm_vz_resize_guest_vtlb()
2748 if (size - 1 <= limit) in kvm_vz_resize_guest_vtlb()
2753 /* Read back MMUSize - 1 */ in kvm_vz_resize_guest_vtlb()
2795 mmu_size -= guest_mmu_size; in kvm_vz_hardware_enable()
2797 cvmvmconfig |= mmu_size - 1; in kvm_vz_hardware_enable()
2815 ftlb_size = current_cpu_data.tlbsize - mmu_size; in kvm_vz_hardware_enable()
2824 * root non-wired entries. This does assume that long-term wired in kvm_vz_hardware_enable()
2827 guest_mmu_size = mmu_size - num_wired_entries() - 2; in kvm_vz_hardware_enable()
2840 return -EINVAL; in kvm_vz_hardware_enable()
2896 cvmvmconfig |= mmu_size - 1; in kvm_vz_hardware_disable()
2926 /* We support 64-bit registers/operations and addresses */ in kvm_vz_check_extension()
2943 vcpu->arch.vzguestid[i] = 0; in kvm_vz_vcpu_init()
2967 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_vz_vcpu_setup()
2999 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); in kvm_vz_vcpu_setup()
3072 /* bits SEGBITS-13+3:4 set */ in kvm_vz_vcpu_setup()
3074 ((1ull << (cpu_vmbits - 13)) - 1) << 4); in kvm_vz_vcpu_setup()
3098 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; in kvm_vz_vcpu_setup()
3101 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); in kvm_vz_vcpu_setup()
3121 cpumask_setall(&kvm->arch.asid_flush_mask); in kvm_vz_flush_shadow_all()
3161 r = vcpu->arch.vcpu_run(run, vcpu); in kvm_vz_vcpu_run()
3209 return -ENODEV; in kvm_mips_emulation_init()
3215 if (WARN(pgd_reg == -1, in kvm_mips_emulation_init()
3217 return -ENODEV; in kvm_mips_emulation_init()
3219 pr_info("Starting KVM with MIPS VZ extensions\n"); in kvm_mips_emulation_init()