Lines Matching +full:tlb +full:- +full:split
1 /* SPDX-License-Identifier: GPL-2.0 */
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
52 return ((2ULL << (e - s)) - 1) << s; in rsvd_bits()
71 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) in kvm_mmu_reload()
93 u64 root_hpa = vcpu->arch.mmu->root_hpa; in kvm_mmu_load_pgd()
99 vcpu->arch.mmu->shadow_root_level); in kvm_mmu_load_pgd()
109 if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault)) in kvm_mmu_do_page_fault()
112 return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault); in kvm_mmu_do_page_fault()
116 * Currently, we have two sorts of write-protection, a) the first one
117 * write-protects guest page to sync the guest modification, b) another one is
121 * 2) the first case requires flushing tlb immediately avoiding corrupting
123 * mmu-lock. And the another case does not need to flush tlb until returning
124 * the dirty bitmap to userspace since it only write-protects the page
126 * missed, so it can flush tlb out of mmu-lock.
128 * So, there is the problem: the first case can meet the corrupted tlb caused
129 * by another case which write-protects pages but without flush tlb
131 * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
136 * readonly, if that happens, we need to flush tlb. Fortunately,
140 * - if we want to see if it has writable tlb entry or if the spte can be
143 * - if we fix page fault on the spte or do write-protection by dirty logging,
146 * TODO: introduce APIs to split these two cases.
176 * If CPL = 3, SMAP applies to all supervisor-mode data accesses in permission_fault()
186 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); in permission_fault()
188 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); in permission_fault()
189 bool fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
193 if (unlikely(mmu->pkru_mask)) { in permission_fault()
202 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; in permission_fault()
206 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); in permission_fault()
208 pkru_bits &= mmu->pkru_mask >> offset; in permission_fault()
209 errcode |= -pkru_bits & PFERR_PK_MASK; in permission_fault()
213 return -(u32)fault & errcode; in permission_fault()