Home
last modified time | relevance | path

Searched refs:gpte (Results 1 – 7 of 7) sorted by relevance

/arch/powerpc/kvm/
Dbook3s_64_mmu.c206 struct kvmppc_pte *gpte, bool data, in kvmppc_mmu_book3s_64_xlate() argument
227 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate()
228 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate()
229 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); in kvmppc_mmu_book3s_64_xlate()
230 gpte->raddr &= KVM_PAM; in kvmppc_mmu_book3s_64_xlate()
231 gpte->may_execute = true; in kvmppc_mmu_book3s_64_xlate()
232 gpte->may_read = true; in kvmppc_mmu_book3s_64_xlate()
233 gpte->may_write = true; in kvmppc_mmu_book3s_64_xlate()
234 gpte->page_size = MMU_PAGE_4K; in kvmppc_mmu_book3s_64_xlate()
235 gpte->wimg = HPTE_R_M; in kvmppc_mmu_book3s_64_xlate()
[all …]
Dbook3s_64_mmu_radix.c128 struct kvmppc_pte *gpte, u64 root, in kvmppc_mmu_walk_radix_tree() argument
193 gpte->page_size = ps; in kvmppc_mmu_walk_radix_tree()
194 gpte->page_shift = offset; in kvmppc_mmu_walk_radix_tree()
196 gpte->eaddr = eaddr; in kvmppc_mmu_walk_radix_tree()
197 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree()
200 gpte->may_read = !!(pte & _PAGE_READ); in kvmppc_mmu_walk_radix_tree()
201 gpte->may_write = !!(pte & _PAGE_WRITE); in kvmppc_mmu_walk_radix_tree()
202 gpte->may_execute = !!(pte & _PAGE_EXEC); in kvmppc_mmu_walk_radix_tree()
204 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); in kvmppc_mmu_walk_radix_tree()
221 struct kvmppc_pte *gpte, u64 table, in kvmppc_mmu_radix_translate_table() argument
[all …]
Dbook3s_hv_nested.c1198 struct kvmppc_pte gpte, in kvmhv_handle_nested_set_rc() argument
1210 if (pgflags & ~gpte.rc) in kvmhv_handle_nested_set_rc()
1216 gpte.raddr, kvm->arch.lpid); in kvmhv_handle_nested_set_rc()
1267 struct kvmppc_pte gpte; in __kvmhv_nested_page_fault() local
1290 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); in __kvmhv_nested_page_fault()
1306 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); in __kvmhv_nested_page_fault()
1323 l1_shift = gpte.page_shift; in __kvmhv_nested_page_fault()
1330 gpa = gpte.raddr; in __kvmhv_nested_page_fault()
1388 perm |= gpte.may_read ? 0UL : _PAGE_READ; in __kvmhv_nested_page_fault()
1389 perm |= gpte.may_write ? 0UL : _PAGE_WRITE; in __kvmhv_nested_page_fault()
[all …]
Dbook3s_64_mmu_hv.c341 struct kvmppc_pte *gpte, bool data, bool iswrite) in kvmppc_mmu_book3s_64_hv_xlate() argument
353 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); in kvmppc_mmu_book3s_64_hv_xlate()
383 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate()
384 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate()
392 gpte->may_read = hpte_read_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
393 gpte->may_write = hpte_write_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
394 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); in kvmppc_mmu_book3s_64_hv_xlate()
400 gpte->may_read = 0; in kvmppc_mmu_book3s_64_hv_xlate()
402 gpte->may_write = 0; in kvmppc_mmu_book3s_64_hv_xlate()
406 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
/arch/x86/kvm/
Dpaging_tmpl.h99 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl() argument
101 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; in gpte_to_gfn_lvl()
105 unsigned gpte) in FNAME()
117 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & in FNAME()
176 u64 gpte) in FNAME()
178 if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME()
181 if (!FNAME(is_present_gpte)(gpte)) in FNAME()
186 !(gpte & PT_GUEST_ACCESSED_MASK)) in FNAME()
202 static inline unsigned FNAME(gpte_access)(u64 gpte) in FNAME()
206 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | in FNAME()
[all …]
Dmmu.c633 static gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta() argument
637 return (gpte & PT32_DIR_PSE36_MASK) << shift; in pse36_gfn_delta()
4010 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) in is_rsvd_bits_set() argument
4012 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); in is_rsvd_bits_set()
4465 unsigned level, unsigned gpte) in is_last_gpte() argument
4472 gpte &= level - mmu->last_nonleaf_level; in is_last_gpte()
4479 gpte |= level - PT_PAGE_TABLE_LEVEL - 1; in is_last_gpte()
4481 return gpte & PT_PAGE_SIZE_MASK; in is_last_gpte()
/arch/powerpc/include/asm/
Dkvm_book3s.h188 struct kvmppc_pte *gpte, u64 root,
191 struct kvmppc_pte *gpte, u64 table,
194 struct kvmppc_pte *gpte, bool data, bool iswrite);