Home
last modified time | relevance | path

Searched refs:gfn (Results 1 – 25 of 29) sorted by relevance

12

/arch/x86/kvm/
Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
37 __entry->gfn, role.level, \
205 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
206 TP_ARGS(sptep, gfn, access, gen),
210 __field(gfn_t, gfn)
217 __entry->gfn = gfn;
223 __entry->gfn, __entry->access, __entry->gen)
228 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
229 TP_ARGS(addr, gfn, access),
[all …]
Dpage_track.c64 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() argument
69 index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); in update_gfn_track()
92 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page() argument
99 update_gfn_track(slot, gfn, mode, 1); in kvm_slot_page_track_add_page()
105 kvm_mmu_gfn_disallow_lpage(slot, gfn); in kvm_slot_page_track_add_page()
108 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) in kvm_slot_page_track_add_page()
127 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page() argument
133 update_gfn_track(slot, gfn, mode, -1); in kvm_slot_page_track_remove_page()
139 kvm_mmu_gfn_allow_lpage(slot, gfn); in kvm_slot_page_track_remove_page()
146 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_page_track_is_active() argument
[all …]
Dmmu.c360 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
365 u64 gpa = gfn << PAGE_SHIFT; in mark_mmio_spte()
373 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
398 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
402 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
1038 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
1041 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
1044 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
1048 if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) in kvm_mmu_page_set_gfn()
1051 sp->gfn, in kvm_mmu_page_set_gfn()
[all …]
Dmmu_audit.c99 gfn_t gfn; in audit_mappings() local
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings()
117 pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn); in audit_mappings()
136 gfn_t gfn; in inspect_spte_has_rmap() local
139 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap()
142 slot = __gfn_to_memslot(slots, gfn); in inspect_spte_has_rmap()
146 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); in inspect_spte_has_rmap()
148 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap()
153 rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot); in inspect_spte_has_rmap()
205 slot = __gfn_to_memslot(slots, sp->gfn); in audit_write_protection()
[all …]
Dpaging_tmpl.h98 gfn_t gfn; member
298 gfn_t gfn; in FNAME() local
403 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
404 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
407 gfn += pse36_gfn_delta(pte); in FNAME()
409 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
413 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME()
502 gfn_t gfn; in FNAME() local
510 gfn = gpte_to_gfn(gpte); in FNAME()
513 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME()
[all …]
Dmmu.h193 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
194 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
196 struct kvm_memory_slot *slot, u64 gfn);
Dx86.h142 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument
155 vcpu->arch.mmio_gfn = gfn; in vcpu_cache_mmio_info()
235 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
239 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
Dhyperv.c877 u64 gfn; in kvm_hv_setup_tsc_page() local
889 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_setup_tsc_page()
894 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), in kvm_hv_setup_tsc_page()
903 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), in kvm_hv_setup_tsc_page()
912 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
926 kvm_write_guest(kvm, gfn_to_gpa(gfn), in kvm_hv_setup_tsc_page()
946 u64 gfn; in kvm_hv_set_msr_pw() local
957 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; in kvm_hv_set_msr_pw()
958 addr = gfn_to_hva(kvm, gfn); in kvm_hv_set_msr_pw()
966 mark_page_dirty(kvm, gfn); in kvm_hv_set_msr_pw()
[all …]
Dmtrr.c629 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type() argument
638 start = gfn_to_gpa(gfn); in kvm_mtrr_get_guest_memory_type()
705 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency() argument
713 start = gfn_to_gpa(gfn); in kvm_mtrr_check_gfn_range_consistency()
714 end = gfn_to_gpa(gfn + page_num); in kvm_mtrr_check_gfn_range_consistency()
Dx86.c565 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page() argument
568 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
621 gfn_t gfn; in pdptrs_changed() local
631 gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT; in pdptrs_changed()
633 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), in pdptrs_changed()
8898 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) in kvm_async_pf_hash_fn() argument
8900 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); in kvm_async_pf_hash_fn()
8908 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
8910 u32 key = kvm_async_pf_hash_fn(gfn); in kvm_add_async_pf_gfn()
8915 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
[all …]
/arch/powerpc/kvm/
Dbook3s_64_mmu_radix.c23 unsigned long gfn, unsigned int order);
275 unsigned long gfn = lgpa >> PAGE_SHIFT; in kvmppc_create_pte() local
277 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_create_pte()
279 mark_pages_dirty(kvm, memslot, gfn, in kvmppc_create_pte()
329 unsigned long gpa, gfn, hva, pfn; in kvmppc_book3s_radix_page_fault() local
355 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_radix_page_fault()
358 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault()
380 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_radix_page_fault()
484 pfn |= gfn & ((PMD_SIZE >> PAGE_SHIFT) - 1); in kvmppc_book3s_radix_page_fault()
501 unsigned long gfn, unsigned int order) in mark_pages_dirty() argument
[all …]
Dbook3s_64_mmu_hv.c488 unsigned long gpa, gfn, hva, pfn; in kvmppc_book3s_hv_page_fault() local
549 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault()
550 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
578 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
751 unsigned long gfn);
766 gfn_t gfn, gfn_end; in kvm_handle_hva_range() local
777 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
780 for (; gfn < gfn_end; ++gfn) { in kvm_handle_hva_range()
781 ret = handler(kvm, memslot, gfn); in kvm_handle_hva_range()
797 unsigned long *rmapp, unsigned long gfn) in kvmppc_unmap_hpte() argument
[all …]
De500_mmu_host.c326 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument
356 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
357 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map()
384 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
412 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
415 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
417 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
419 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
452 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map()
456 __func__, (long)gfn); in kvmppc_e500_shadow_map()
[all …]
Dbook3s_64_vio_hv.c170 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_gpa_to_ua() local
173 memslot = search_memslots(kvm_memslots(kvm), gfn); in kvmppc_gpa_to_ua()
177 *ua = __gfn_to_hva_memslot(memslot, gfn) | in kvmppc_gpa_to_ua()
182 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvmppc_gpa_to_ua()
Dbook3s_hv_rm_mmu.c130 unsigned long gfn; in revmap_for_hpte() local
132 gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr)); in revmap_for_hpte()
133 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte()
137 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte()
180 unsigned long i, pa, gpa, gfn, psize; in kvmppc_do_h_enter() local
210 gfn = gpa >> PAGE_SHIFT; in kvmppc_do_h_enter()
211 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_do_h_enter()
225 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
229 hva = __gfn_to_hva_memslot(memslot, gfn); in kvmppc_do_h_enter()
Dbook3s_64_mmu_host.c100 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; in kvmppc_mmu_map_page() local
139 mark_page_dirty(vcpu->kvm, gfn); in kvmppc_mmu_map_page()
Dbooke.c1240 gfn_t gfn; in kvmppc_handle_exit() local
1269 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit()
1271 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit()
1297 gfn_t gfn; in kvmppc_handle_exit() local
1317 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit()
1319 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit()
/arch/x86/xen/
Dmmu.c94 xen_pfn_t *gfn, int nr, in do_remap_gfn() argument
107 rmd.mfn = gfn; in do_remap_gfn()
169 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument
173 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); in xen_remap_domain_gfn_range()
179 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument
188 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages); in xen_remap_domain_gfn_array()
/arch/x86/include/asm/
Dkvm_page_track.h58 struct kvm_memory_slot *slot, gfn_t gfn,
61 struct kvm_memory_slot *slot, gfn_t gfn,
63 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
Dkvm_host.h109 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
112 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index()
286 gfn_t gfn; member
1013 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1098 gfn_t gfn; member
1238 gfn_t gfn, void *data, int offset, int len,
1260 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1423 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
/arch/mips/kvm/
Dmmu.c471 int (*handler)(struct kvm *kvm, gfn_t gfn, in handle_hva_to_gpa() argument
486 gfn_t gfn, gfn_end; in handle_hva_to_gpa() local
498 gfn = hva_to_gfn_memslot(hva_start, memslot); in handle_hva_to_gpa()
501 ret |= handler(kvm, gfn, gfn_end, memslot, data); in handle_hva_to_gpa()
508 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_unmap_hva_handler() argument
511 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); in kvm_unmap_hva_handler()
533 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_set_spte_handler() argument
536 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_set_spte_handler()
574 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_age_hva_handler() argument
577 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); in kvm_age_hva_handler()
[all …]
/arch/x86/include/asm/xen/
Dpage.h224 static inline unsigned long gfn_to_pfn(unsigned long gfn) in gfn_to_pfn() argument
227 return gfn; in gfn_to_pfn()
229 return mfn_to_pfn(gfn); in gfn_to_pfn()
/arch/arm/xen/
Denlighten.c64 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument
69 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
77 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument
/arch/powerpc/include/asm/
Dkvm_book3s.h196 unsigned long gfn);
198 unsigned long gfn);
200 unsigned long gfn);
/arch/s390/kvm/
Dpriv.c965 unsigned long gfn, hva, res, pgstev, ptev; in do_essa() local
974 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; in do_essa()
975 hva = gfn_to_hva(vcpu->kvm, gfn); in do_essa()
1010 cbrlo[entries] = gfn << PAGE_SHIFT; in do_essa()
1013 if (orc && gfn < ms->bitmap_size) { in do_essa()
1015 if (!test_and_set_bit(gfn, ms->pgste_bitmap)) in do_essa()

12