Lines Matching refs:mmu
131 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME()
135 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) | in FNAME()
136 ((mmu->bad_mt_xwr & (1ull << low6)) != 0); in FNAME()
148 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
175 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME()
208 struct kvm_mmu *mmu, in FNAME()
255 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
269 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
288 walker->level = mmu->root_level; in FNAME()
289 pte = mmu->get_cr3(vcpu); in FNAME()
293 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); in FNAME()
322 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), in FNAME()
356 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, in FNAME()
366 } while (!is_last_gpte(mmu, walker->level, pte)); in FNAME()
368 if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) { in FNAME()
379 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
397 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); in FNAME()
412 if (fetch_fault && (mmu->nx || in FNAME()
439 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
448 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, in FNAME()
572 top_level = vcpu->arch.mmu.root_level; in FNAME()
584 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in FNAME()
845 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { in FNAME()