Lines Matching refs:mmu
372 return vcpu->arch.mmu == &vcpu->arch.guest_mmu; in kvm_vcpu_ad_need_write_protect()
2365 vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { in __kvm_sync_page()
2570 role = vcpu->arch.mmu->mmu_role.base; in kvm_mmu_get_page()
2576 if (!vcpu->arch.mmu->direct_map in kvm_mmu_get_page()
2577 && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2651 iterator->level = vcpu->arch.mmu->shadow_root_level; in shadow_walk_init_using_root()
2654 vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && in shadow_walk_init_using_root()
2655 !vcpu->arch.mmu->direct_map) in shadow_walk_init_using_root()
2663 BUG_ON(root != vcpu->arch.mmu->root_hpa); in shadow_walk_init_using_root()
2666 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2677 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, in shadow_walk_init()
3340 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) in __direct_map()
3550 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) in fast_page_fault()
3722 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_mmu_free_roots() argument
3732 if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { in kvm_mmu_free_roots()
3735 VALID_PAGE(mmu->prev_roots[i].hpa)) in kvm_mmu_free_roots()
3746 mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, in kvm_mmu_free_roots()
3750 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && in kvm_mmu_free_roots()
3751 (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { in kvm_mmu_free_roots()
3752 mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, in kvm_mmu_free_roots()
3756 if (mmu->pae_root[i] != 0) in kvm_mmu_free_roots()
3758 &mmu->pae_root[i], in kvm_mmu_free_roots()
3760 mmu->root_hpa = INVALID_PAGE; in kvm_mmu_free_roots()
3762 mmu->root_cr3 = 0; in kvm_mmu_free_roots()
3787 if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { in mmu_alloc_direct_roots()
3794 vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL); in mmu_alloc_direct_roots()
3797 vcpu->arch.mmu->root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3798 } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3800 hpa_t root = vcpu->arch.mmu->pae_root[i]; in mmu_alloc_direct_roots()
3813 vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3815 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); in mmu_alloc_direct_roots()
3818 vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); in mmu_alloc_direct_roots()
3830 root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); in mmu_alloc_shadow_roots()
3840 if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3841 hpa_t root = vcpu->arch.mmu->root_hpa; in mmu_alloc_shadow_roots()
3851 vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL); in mmu_alloc_shadow_roots()
3855 vcpu->arch.mmu->root_hpa = root; in mmu_alloc_shadow_roots()
3865 if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) in mmu_alloc_shadow_roots()
3869 hpa_t root = vcpu->arch.mmu->pae_root[i]; in mmu_alloc_shadow_roots()
3872 if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3873 pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3875 vcpu->arch.mmu->pae_root[i] = 0; in mmu_alloc_shadow_roots()
3893 vcpu->arch.mmu->pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3895 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); in mmu_alloc_shadow_roots()
3901 if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3902 if (vcpu->arch.mmu->lm_root == NULL) { in mmu_alloc_shadow_roots()
3914 lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3916 vcpu->arch.mmu->lm_root = lm_root; in mmu_alloc_shadow_roots()
3919 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); in mmu_alloc_shadow_roots()
3923 vcpu->arch.mmu->root_cr3 = root_cr3; in mmu_alloc_shadow_roots()
3930 if (vcpu->arch.mmu->direct_map) in mmu_alloc_roots()
3941 if (vcpu->arch.mmu->direct_map) in kvm_mmu_sync_roots()
3944 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) in kvm_mmu_sync_roots()
3949 if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { in kvm_mmu_sync_roots()
3950 hpa_t root = vcpu->arch.mmu->root_hpa; in kvm_mmu_sync_roots()
3981 hpa_t root = vcpu->arch.mmu->pae_root[i]; in kvm_mmu_sync_roots()
4021 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) in is_rsvd_bits_set() argument
4023 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); in is_rsvd_bits_set()
4026 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) in is_shadow_zero_bits_set() argument
4028 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); in is_shadow_zero_bits_set()
4055 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) in walk_shadow_page_get_mmio_spte()
4072 reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte, in walk_shadow_page_get_mmio_spte()
4151 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) in shadow_page_table_clear_flood()
4179 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); in nonpaging_page_fault()
4193 arch.direct_map = vcpu->arch.mmu->direct_map; in kvm_arch_setup_async_pf()
4194 arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); in kvm_arch_setup_async_pf()
4297 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); in tdp_page_fault()
4371 struct kvm_mmu *mmu = vcpu->arch.mmu; in cached_root_available() local
4373 root.cr3 = mmu->root_cr3; in cached_root_available()
4374 root.hpa = mmu->root_hpa; in cached_root_available()
4377 swap(root, mmu->prev_roots[i]); in cached_root_available()
4385 mmu->root_hpa = root.hpa; in cached_root_available()
4386 mmu->root_cr3 = root.cr3; in cached_root_available()
4395 struct kvm_mmu *mmu = vcpu->arch.mmu; in fast_cr3_switch() local
4402 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && in fast_cr3_switch()
4403 mmu->root_level >= PT64_ROOT_4LEVEL) { in fast_cr3_switch()
4431 page_header(mmu->root_hpa)); in fast_cr3_switch()
4445 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, in __kvm_mmu_new_cr3()
4464 vcpu->arch.mmu->inject_page_fault(vcpu, fault); in inject_page_fault()
4484 static inline bool is_last_gpte(struct kvm_mmu *mmu, in is_last_gpte() argument
4492 gpte &= level - mmu->last_nonleaf_level; in is_last_gpte()
4766 struct kvm_mmu *mmu, bool ept) in update_permission_bitmask() argument
4778 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { in update_permission_bitmask()
4802 if (!mmu->nx) in update_permission_bitmask()
4833 mmu->permissions[byte] = ff | uf | wf | smepf | smapf; in update_permission_bitmask()
4861 static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in update_pkru_bitmask() argument
4868 mmu->pkru_mask = 0; in update_pkru_bitmask()
4874 mmu->pkru_mask = 0; in update_pkru_bitmask()
4880 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { in update_pkru_bitmask()
4908 mmu->pkru_mask |= (pkey_bits & 3) << pfec; in update_pkru_bitmask()
4912 static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in update_last_nonleaf_level() argument
4914 unsigned root_level = mmu->root_level; in update_last_nonleaf_level()
4916 mmu->last_nonleaf_level = root_level; in update_last_nonleaf_level()
4918 mmu->last_nonleaf_level++; in update_last_nonleaf_level()
5028 struct kvm_mmu *context = vcpu->arch.mmu; in init_kvm_tdp_mmu()
5099 struct kvm_mmu *context = vcpu->arch.mmu; in kvm_init_shadow_mmu()
5153 struct kvm_mmu *context = vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
5186 struct kvm_mmu *context = vcpu->arch.mmu; in init_kvm_softmmu()
5249 vcpu->arch.mmu->root_hpa = INVALID_PAGE; in kvm_init_mmu()
5252 vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; in kvm_init_mmu()
5495 if (vcpu->arch.mmu->direct_map) in kvm_mmu_unprotect_page_virt()
5530 bool direct = vcpu->arch.mmu->direct_map; in kvm_mmu_page_fault()
5533 if (vcpu->arch.mmu->direct_map) { in kvm_mmu_page_fault()
5546 r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, in kvm_mmu_page_fault()
5564 if (vcpu->arch.mmu->direct_map && in kvm_mmu_page_fault()
5603 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_invlpg() local
5610 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invlpg()
5624 if (VALID_PAGE(mmu->prev_roots[i].hpa)) in kvm_mmu_invlpg()
5625 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invlpg()
5634 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_invpcid_gva() local
5639 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invpcid_gva()
5644 if (VALID_PAGE(mmu->prev_roots[i].hpa) && in kvm_mmu_invpcid_gva()
5645 pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) { in kvm_mmu_invpcid_gva()
5646 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invpcid_gva()
5749 static void free_mmu_pages(struct kvm_mmu *mmu) in free_mmu_pages() argument
5751 free_page((unsigned long)mmu->pae_root); in free_mmu_pages()
5752 free_page((unsigned long)mmu->lm_root); in free_mmu_pages()
5755 static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in alloc_mmu_pages() argument
5776 mmu->pae_root = page_address(page); in alloc_mmu_pages()
5778 mmu->pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
5788 vcpu->arch.mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()