/arch/powerpc/kvm/ |
D | book3s_64_mmu_radix.c | 144 u64 pte, base, gpa; in kvmppc_mmu_walk_radix_tree() local 199 gpa = pte & 0x01fffffffffff000ul; in kvmppc_mmu_walk_radix_tree() 200 if (gpa & ((1ul << offset) - 1)) in kvmppc_mmu_walk_radix_tree() 202 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree() 210 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 418 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument 425 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() 429 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte() 430 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte() 449 gpa &= ~(page_size - 1); in kvmppc_unmap_pte() [all …]
|
D | book3s_hv_uvmem.c | 234 unsigned long gpa; member 516 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() argument 536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out() 569 gpa, 0, page_shift); in __kvmppc_svm_page_out() 589 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() argument 595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out() 645 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages() 647 pvt->gpa, addr); in kvmppc_uvmem_drop_pages() 695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument 719 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page() [all …]
|
D | book3s_hv_nested.c | 920 unsigned long gpa; in kvmhv_update_nest_rmap_rc() local 924 gpa = n_rmap & RMAP_NESTED_GPA_MASK; in kvmhv_update_nest_rmap_rc() 928 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); in kvmhv_update_nest_rmap_rc() 937 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmhv_update_nest_rmap_rc() 967 unsigned long gpa; in kvmhv_remove_nest_rmap() local 971 gpa = n_rmap & RMAP_NESTED_GPA_MASK; in kvmhv_remove_nest_rmap() 978 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); in kvmhv_remove_nest_rmap() 981 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_remove_nest_rmap() 1000 unsigned long gpa, unsigned long hpa, in kvmhv_remove_nest_rmap_range() argument 1008 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range() [all …]
|
D | e500.h | 255 gpa_t gpa; in tlbe_is_host_safe() local 267 gpa = get_tlb_raddr(tlbe); in tlbe_is_host_safe() 268 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) in tlbe_is_host_safe()
|
/arch/s390/kvm/ |
D | gaccess.h | 152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() local 154 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc() 178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in read_guest_lc() local 180 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_lc() 190 unsigned long *gpa, enum gacc_mode mode, 196 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, 199 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, 209 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, __uint128_t *old, 371 int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, in write_guest_abs() argument 374 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_abs() [all …]
|
D | vsie.c | 658 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument 662 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); in pin_guest_page() 665 *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); in pin_guest_page() 670 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument 674 mark_page_dirty(kvm, gpa_to_gfn(gpa)); in unpin_guest_page() 739 gpa_t gpa; in pin_blocks() local 742 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; in pin_blocks() 744 gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; in pin_blocks() 745 if (gpa) { in pin_blocks() 746 if (gpa < 2 * PAGE_SIZE) in pin_blocks() [all …]
|
D | gaccess.c | 606 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) in deref_table() argument 608 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); in deref_table() 633 unsigned long *gpa, const union asce asce, in guest_translate() argument 792 *gpa = raddr.addr; in guest_translate() 816 enum gacc_mode mode, gpa_t gpa) in vm_check_access_key() argument 826 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in vm_check_access_key() 879 enum gacc_mode mode, union asce asce, gpa_t gpa, in vcpu_check_access_key() argument 893 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); in vcpu_check_access_key() 962 unsigned long gpa; in guest_range_to_gpas() local 972 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); in guest_range_to_gpas() [all …]
|
/arch/x86/kvm/mmu/ |
D | page_track.h | 30 void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes); 41 static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, in __kvm_page_track_write() argument 50 static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_page_track_write() argument 53 __kvm_page_track_write(vcpu->kvm, gpa, new, bytes); in kvm_page_track_write() 55 kvm_mmu_track_write(vcpu, gpa, new, bytes); in kvm_page_track_write()
|
/arch/x86/include/asm/uv/ |
D | uv_hub.h | 461 uv_gpa_in_mmr_space(unsigned long gpa) in uv_gpa_in_mmr_space() argument 463 return (gpa >> 62) == 0x3UL; in uv_gpa_in_mmr_space() 467 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) in uv_gpa_to_soc_phys_ram() argument 475 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | in uv_gpa_to_soc_phys_ram() 476 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); in uv_gpa_to_soc_phys_ram() 478 paddr = gpa & uv_hub_info->gpa_mask; in uv_gpa_to_soc_phys_ram() 485 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) in uv_gpa_to_gnode() argument 490 return gpa >> n_lshift; in uv_gpa_to_gnode() 492 return uv_gam_range(gpa)->nasid >> 1; in uv_gpa_to_gnode() 496 static inline int uv_gpa_to_pnode(unsigned long gpa) in uv_gpa_to_pnode() argument [all …]
|
/arch/riscv/kvm/ |
D | tlb.c | 21 gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 49 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_gpa() argument 61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 334 gpa_t gpa, gpa_t gpsz, in kvm_riscv_hfence_gvma_vmid_gpa() argument 341 data.addr = gpa; in kvm_riscv_hfence_gvma_vmid_gpa()
|
D | mmu.c | 179 gpa_t gpa, phys_addr_t hpa, in gstage_map_page() argument 219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); in gstage_map_page() 346 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, in kvm_riscv_gstage_ioremap() argument 359 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_riscv_gstage_ioremap() 362 for (addr = gpa; addr < end; addr += PAGE_SIZE) { in kvm_riscv_gstage_ioremap() 386 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_gstage_iounmap() argument 389 gstage_unmap_range(kvm, gpa, size, false); in kvm_riscv_gstage_iounmap() 425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() local 429 gstage_unmap_range(kvm, gpa, size, false); in kvm_arch_flush_shadow_memslot() 510 gpa_t gpa = base_gpa + (vm_start - hva); in kvm_arch_prepare_memory_region() local [all …]
|
/arch/x86/kvm/ |
D | cpuid.h | 45 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_legal_gpa() argument 47 return !(gpa & vcpu->arch.reserved_gpa_bits); in kvm_vcpu_is_legal_gpa() 50 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_illegal_gpa() argument 52 return !kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_illegal_gpa() 56 gpa_t gpa, gpa_t alignment) in kvm_vcpu_is_legal_aligned_gpa() argument 58 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_legal_aligned_gpa() 61 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) in page_address_valid() argument 63 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); in page_address_valid()
|
D | mmu.h | 124 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, 295 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 300 gpa_t gpa, u64 access, in kvm_translate_gpa() argument 304 return gpa; in kvm_translate_gpa() 305 return translate_nested_gpa(vcpu, gpa, access, exception); in kvm_translate_gpa()
|
D | xen.c | 41 gpa_t gpa = gfn_to_gpa(gfn); in kvm_xen_shared_info_init() local 54 ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE); in kvm_xen_shared_info_init() 264 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) { in kvm_xen_update_runstate_guest() 265 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK); in kvm_xen_update_runstate_guest() 346 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1, in kvm_xen_update_runstate_guest() 438 mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT); in kvm_xen_update_runstate_guest() 440 mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT); in kvm_xen_update_runstate_guest() 552 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_xen_inject_pending_events() 681 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); in kvm_xen_hvm_get_attr() 729 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr() [all …]
|
/arch/riscv/include/asm/ |
D | kvm_host.h | 257 gpa_t gpa, gpa_t gpsz, 260 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, 286 gpa_t gpa, gpa_t gpsz, 304 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, 307 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, 311 gpa_t gpa, unsigned long hva, bool is_write);
|
/arch/mips/kvm/ |
D | mmu.c | 449 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_set_spte_gfn() local 451 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_set_spte_gfn() 484 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_test_age_gfn() local 485 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_test_age_gfn() 510 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, in _kvm_mips_map_page_fast() argument 515 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast() 524 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in _kvm_mips_map_page_fast() 586 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, in kvm_mips_map_page() argument 592 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page() 602 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry, in kvm_mips_map_page() [all …]
|
/arch/x86/xen/ |
D | mmu_hvm.c | 42 a.gpa = __pa(mm->pgd); in xen_hvm_exit_mmap() 53 a.gpa = 0x00; in is_pagetable_dying_supported()
|
/arch/x86/kvm/vmx/ |
D | sgx.c | 74 gpa_t *gpa) in sgx_gva_to_gpa() argument 79 *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); in sgx_gva_to_gpa() 81 *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); in sgx_gva_to_gpa() 83 if (*gpa == INVALID_GPA) { in sgx_gva_to_gpa() 91 static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva) in sgx_gpa_to_hva() argument 93 *hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa)); in sgx_gpa_to_hva() 95 sgx_handle_emulation_failure(vcpu, gpa, 1); in sgx_gpa_to_hva() 99 *hva |= gpa & ~PAGE_MASK; in sgx_gpa_to_hva()
|
D | vmx_ops.h | 18 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa); 315 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) in __invept() argument 318 u64 eptp, gpa; in __invept() member 319 } operand = {eptp, gpa}; in __invept() 321 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa); in __invept()
|
/arch/x86/coco/tdx/ |
D | tdx.c | 426 if (!mmio_write(size, ve->gpa, val)) in handle_mmio() 431 if (!mmio_write(size, ve->gpa, val)) in handle_mmio() 453 if (!mmio_read(size, ve->gpa, &val)) in handle_mmio() 604 ve->gpa = out.r9; in tdx_get_ve_info() 626 static inline bool is_private_gpa(u64 gpa) in is_private_gpa() argument 628 return gpa == cc_mkenc(gpa); in is_private_gpa() 649 if (is_private_gpa(ve->gpa)) in virt_exception_kernel()
|
/arch/arm64/include/asm/ |
D | kvm_mmu.h | 265 gpa_t gpa, void *data, unsigned long len) in kvm_read_guest_lock() argument 268 int ret = kvm_read_guest(kvm, gpa, data, len); in kvm_read_guest_lock() 275 static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, in kvm_write_guest_lock() argument 279 int ret = kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_lock()
|
/arch/powerpc/include/asm/ |
D | ultravisor.h | 75 static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) in uv_page_inval() argument 77 return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); in uv_page_inval()
|
D | kvm_book3s.h | 163 unsigned long gpa, gva_t ea, int is_store); 195 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, 200 bool writing, unsigned long gpa, 203 unsigned long gpa, 236 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, 249 unsigned long gpa, bool dirty);
|
/arch/ia64/include/asm/uv/ |
D | uv_hub.h | 166 static inline void *uv_va(unsigned long gpa) in uv_va() argument 168 return __va(gpa & uv_hub_info->gpa_mask); in uv_va()
|
/arch/arm64/kvm/vgic/ |
D | vgic-its.c | 905 static bool __is_visible_gfn_locked(struct vgic_its *its, gpa_t gpa) in __is_visible_gfn_locked() argument 907 gfn_t gfn = gpa >> PAGE_SHIFT; in __is_visible_gfn_locked() 1000 gpa_t gpa; in vgic_its_check_event_id() local 1006 gpa = device->itt_addr + event_id * ite_esz; in vgic_its_check_event_id() 1007 return __is_visible_gfn_locked(its, gpa); in vgic_its_check_event_id() 2175 gpa_t gpa = base; in scan_its_table() local 2185 ret = kvm_read_guest_lock(kvm, gpa, entry, esz); in scan_its_table() 2198 gpa += byte_offset; in scan_its_table() 2208 struct its_ite *ite, gpa_t gpa, int ite_esz) in vgic_its_save_ite() argument 2219 return vgic_write_guest_lock(kvm, gpa, &val, ite_esz); in vgic_its_save_ite() [all …]
|