/arch/powerpc/kvm/ |
D | book3s_64_mmu_radix.c | 135 u64 pte, base, gpa; in kvmppc_mmu_walk_radix_tree() local 187 gpa = pte & 0x01fffffffffff000ul; in kvmppc_mmu_walk_radix_tree() 188 if (gpa & ((1ul << offset) - 1)) in kvmppc_mmu_walk_radix_tree() 190 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree() 198 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 388 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument 395 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() 399 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte() 400 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte() 419 gpa &= ~(page_size - 1); in kvmppc_unmap_pte() [all …]
|
D | book3s_hv_nested.c | 817 unsigned long gpa; in kvmhv_update_nest_rmap_rc() local 821 gpa = n_rmap & RMAP_NESTED_GPA_MASK; in kvmhv_update_nest_rmap_rc() 828 ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); in kvmhv_update_nest_rmap_rc() 837 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmhv_update_nest_rmap_rc() 867 unsigned long gpa; in kvmhv_remove_nest_rmap() local 871 gpa = n_rmap & RMAP_NESTED_GPA_MASK; in kvmhv_remove_nest_rmap() 878 ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); in kvmhv_remove_nest_rmap() 881 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_remove_nest_rmap() 900 unsigned long gpa, unsigned long hpa, in kvmhv_remove_nest_rmap_range() argument 908 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range() [all …]
|
D | e500.h | 255 gpa_t gpa; in tlbe_is_host_safe() local 267 gpa = get_tlb_raddr(tlbe); in tlbe_is_host_safe() 268 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) in tlbe_is_host_safe()
|
D | book3s_64_mmu_hv.c | 429 unsigned long gpa, gva_t ea, int is_store) in kvmppc_hv_emulate_mmio() argument 442 ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0, in kvmppc_hv_emulate_mmio() 486 vcpu->arch.paddr_accessed = gpa; in kvmppc_hv_emulate_mmio() 500 unsigned long gpa, gfn, hva, pfn; in kvmppc_book3s_hv_page_fault() local 532 gpa = gpa_base | (ea & (psize - 1)); in kvmppc_book3s_hv_page_fault() 533 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, in kvmppc_book3s_hv_page_fault() 561 gpa = gpa_base | (ea & (psize - 1)); in kvmppc_book3s_hv_page_fault() 562 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault() 569 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, in kvmppc_book3s_hv_page_fault() 1137 gfn = vpa->gpa >> PAGE_SHIFT; in kvmppc_harvest_vpa_dirty() [all …]
|
/arch/s390/kvm/ |
D | gaccess.h | 152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() local 154 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc() 178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in read_guest_lc() local 180 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_lc() 190 u8 ar, unsigned long *gpa, enum gacc_mode mode); 310 int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, in write_guest_abs() argument 313 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_abs() 333 int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, in read_guest_abs() argument 336 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_abs()
|
D | vsie.c | 647 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument 651 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); in pin_guest_page() 654 *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK); in pin_guest_page() 659 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument 663 mark_page_dirty(kvm, gpa_to_gfn(gpa)); in unpin_guest_page() 728 gpa_t gpa; in pin_blocks() local 731 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; in pin_blocks() 733 gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; in pin_blocks() 734 if (gpa) { in pin_blocks() 735 if (gpa < 2 * PAGE_SIZE) in pin_blocks() [all …]
|
D | gaccess.c | 588 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) in deref_table() argument 590 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); in deref_table() 615 unsigned long *gpa, const union asce asce, in guest_translate() argument 774 *gpa = raddr.addr; in guest_translate() 834 unsigned long _len, nr_pages, gpa, idx; in access_guest() local 858 gpa = *(pages + idx) + (ga & ~PAGE_MASK); in access_guest() 859 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); in access_guest() 861 rc = kvm_write_guest(vcpu->kvm, gpa, data, _len); in access_guest() 863 rc = kvm_read_guest(vcpu->kvm, gpa, data, _len); in access_guest() 878 unsigned long _len, gpa; in access_guest_real() local [all …]
|
/arch/x86/include/asm/uv/ |
D | uv_hub.h | 524 uv_gpa_in_mmr_space(unsigned long gpa) in uv_gpa_in_mmr_space() argument 526 return (gpa >> 62) == 0x3UL; in uv_gpa_in_mmr_space() 530 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) in uv_gpa_to_soc_phys_ram() argument 538 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | in uv_gpa_to_soc_phys_ram() 539 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); in uv_gpa_to_soc_phys_ram() 541 paddr = gpa & uv_hub_info->gpa_mask; in uv_gpa_to_soc_phys_ram() 548 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) in uv_gpa_to_gnode() argument 553 return gpa >> n_lshift; in uv_gpa_to_gnode() 555 return uv_gam_range(gpa)->nasid >> 1; in uv_gpa_to_gnode() 559 static inline int uv_gpa_to_pnode(unsigned long gpa) in uv_gpa_to_pnode() argument [all …]
|
/arch/x86/xen/ |
D | mmu_hvm.c | 53 a.gpa = __pa(mm->pgd); in xen_hvm_exit_mmap() 64 a.gpa = 0x00; in is_pagetable_dying_supported()
|
/arch/x86/include/asm/ |
D | kvm_page_track.h | 35 void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, 72 void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
|
/arch/x86/kvm/vmx/ |
D | ops.h | 22 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa); 262 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) in __invept() argument 265 u64 eptp, gpa; in __invept() member 266 } operand = {eptp, gpa}; in __invept() 268 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa); in __invept()
|
/arch/mips/kvm/ |
D | mmu.c | 527 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_set_spte_handler() local 529 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_set_spte_handler() 575 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_test_age_hva_handler() local 576 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_test_age_hva_handler() 611 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, in _kvm_mips_map_page_fast() argument 616 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast() 625 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in _kvm_mips_map_page_fast() 687 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, in kvm_mips_map_page() argument 693 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page() 703 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry, in kvm_mips_map_page() [all …]
|
/arch/ia64/include/asm/uv/ |
D | uv_hub.h | 166 static inline void *uv_va(unsigned long gpa) in uv_va() argument 168 return __va(gpa & uv_hub_info->gpa_mask); in uv_va()
|
/arch/arm/include/asm/ |
D | kvm_mmu.h | 362 gpa_t gpa, void *data, unsigned long len) in kvm_read_guest_lock() argument 365 int ret = kvm_read_guest(kvm, gpa, data, len); in kvm_read_guest_lock() 372 static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, in kvm_write_guest_lock() argument 376 int ret = kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_lock()
|
/arch/powerpc/include/asm/ |
D | kvm_book3s.h | 164 unsigned long gpa, gva_t ea, int is_store); 197 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, 202 bool writing, unsigned long gpa, 205 unsigned long gpa, 238 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, 251 unsigned long gpa, bool dirty);
|
D | iommu.h | 300 unsigned long gpa); 306 #define iommu_tce_put_param_check(tbl, ioba, gpa) \ argument 310 iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
|
/arch/arm/boot/dts/ |
D | s3c64xx-pinctrl.dtsi | 19 gpa: gpa-gpio-bank { label 135 samsung,pins = "gpa-0", "gpa-1"; 141 samsung,pins = "gpa-2", "gpa-3"; 147 samsung,pins = "gpa-4", "gpa-5"; 153 samsung,pins = "gpa-6", "gpa-7";
|
D | s3c2416-pinctrl.dtsi | 15 gpa: gpa { label
|
/arch/arm64/include/asm/ |
D | kvm_mmu.h | 427 gpa_t gpa, void *data, unsigned long len) in kvm_read_guest_lock() argument 430 int ret = kvm_read_guest(kvm, gpa, data, len); in kvm_read_guest_lock() 437 static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, in kvm_write_guest_lock() argument 441 int ret = kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_lock()
|
/arch/x86/kvm/ |
D | x86.c | 2629 gpa_t gpa = data & ~0x3f; in kvm_pv_enable_async_pf() local 2643 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf() 5403 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, in translate_nested_gpa() argument 5412 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa() 5455 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper() local 5461 if (gpa == UNMAPPED_GVA) in kvm_read_guest_virt_helper() 5463 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper() 5489 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt() local 5491 if (unlikely(gpa == UNMAPPED_GVA)) in kvm_fetch_guest_virt() 5497 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt() [all …]
|
D | page_track.c | 223 void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, in kvm_page_track_write() argument 238 n->track_write(vcpu, gpa, new, bytes, n); in kvm_page_track_write()
|
D | trace.h | 136 TP_PROTO(u64 gpa), 137 TP_ARGS(gpa), 140 __field(u64, gpa) 144 __entry->gpa = gpa; 147 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 772 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 773 TP_ARGS(gva, gpa, write, gpa_match), 777 __field(gpa_t, gpa) 784 __entry->gpa = gpa; 789 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
|
D | mmu.c | 472 u64 gpa = gfn << PAGE_SHIFT; in mark_mmio_spte() local 476 mask |= gpa | shadow_nonpresent_or_rsvd_mask; in mark_mmio_spte() 477 mask |= (gpa & shadow_nonpresent_or_rsvd_mask) in mark_mmio_spte() 486 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; in get_mmio_spte_gfn() local 488 gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) in get_mmio_spte_gfn() 491 return gpa >> PAGE_SHIFT; in get_mmio_spte_gfn() 3330 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, in __direct_map() argument 3337 gfn_t gfn = gpa >> PAGE_SHIFT; in __direct_map() 3343 trace_kvm_mmu_spte_requested(gpa, level, pfn); in __direct_map() 3344 for_each_shadow_entry(vcpu, gpa, it) { in __direct_map() [all …]
|
D | mmutrace.h | 106 __field(__u64, gpa) 110 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT) 114 TP_printk("gpa %llx", __entry->gpa)
|
D | paging_tmpl.h | 955 gpa_t gpa = UNMAPPED_GVA; in FNAME() local 961 gpa = gfn_to_gpa(walker.gfn); in FNAME() 962 gpa |= addr & ~PAGE_MASK; in FNAME() 966 return gpa; in FNAME() 976 gpa_t gpa = UNMAPPED_GVA; in FNAME() local 987 gpa = gfn_to_gpa(walker.gfn); in FNAME() 988 gpa |= vaddr & ~PAGE_MASK; in FNAME() 992 return gpa; in FNAME()
|