Home
last modified time | relevance | path

Searched refs:gfn (Results 1 – 25 of 32) sorted by relevance

12

/arch/x86/kvm/
Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
37 __entry->gfn, role.level, \
205 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
206 TP_ARGS(sptep, gfn, access, gen),
210 __field(gfn_t, gfn)
217 __entry->gfn = gfn;
223 __entry->gfn, __entry->access, __entry->gen)
228 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
229 TP_ARGS(addr, gfn, access),
[all …]
Dpage_track.c63 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() argument
68 index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); in update_gfn_track()
91 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page() argument
98 update_gfn_track(slot, gfn, mode, 1); in kvm_slot_page_track_add_page()
104 kvm_mmu_gfn_disallow_lpage(slot, gfn); in kvm_slot_page_track_add_page()
107 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) in kvm_slot_page_track_add_page()
126 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page() argument
132 update_gfn_track(slot, gfn, mode, -1); in kvm_slot_page_track_remove_page()
138 kvm_mmu_gfn_allow_lpage(slot, gfn); in kvm_slot_page_track_remove_page()
145 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_page_track_is_active() argument
[all …]
Dmmu_audit.c96 gfn_t gfn; in audit_mappings() local
113 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings()
114 pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn); in audit_mappings()
133 gfn_t gfn; in inspect_spte_has_rmap() local
136 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap()
139 slot = __gfn_to_memslot(slots, gfn); in inspect_spte_has_rmap()
143 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); in inspect_spte_has_rmap()
145 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap()
150 rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot); in inspect_spte_has_rmap()
202 slot = __gfn_to_memslot(slots, sp->gfn); in audit_write_protection()
[all …]
Dmmu.c454 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
459 u64 gpa = gfn << PAGE_SHIFT; in mark_mmio_spte()
467 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
486 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
490 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
1155 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
1158 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
1161 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
1165 if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) in kvm_mmu_page_set_gfn()
1168 sp->gfn, in kvm_mmu_page_set_gfn()
[all …]
Dpaging_tmpl.h95 gfn_t gfn; member
316 gfn_t gfn; in FNAME() local
421 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
422 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
425 gfn += pse36_gfn_delta(pte); in FNAME()
427 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
431 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME()
520 gfn_t gfn; in FNAME() local
528 gfn = gpte_to_gfn(gpte); in FNAME()
531 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME()
[all …]
Dmmu.h208 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
209 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
211 struct kvm_memory_slot *slot, u64 gfn);
Dx86.h187 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument
200 vcpu->arch.mmio_gfn = gfn; in vcpu_cache_mmio_info()
282 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
286 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
Dmtrr.c615 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type() argument
624 start = gfn_to_gpa(gfn); in kvm_mtrr_get_guest_memory_type()
691 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency() argument
699 start = gfn_to_gpa(gfn); in kvm_mtrr_check_gfn_range_consistency()
700 end = gfn_to_gpa(gfn + page_num); in kvm_mtrr_check_gfn_range_consistency()
Dhyperv.c937 u64 gfn; in kvm_hv_setup_tsc_page() local
949 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_setup_tsc_page()
954 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), in kvm_hv_setup_tsc_page()
963 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), in kvm_hv_setup_tsc_page()
972 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
986 kvm_write_guest(kvm, gfn_to_gpa(gfn), in kvm_hv_setup_tsc_page()
1006 u64 gfn; in kvm_hv_set_msr_pw() local
1017 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; in kvm_hv_set_msr_pw()
1018 addr = gfn_to_hva(kvm, gfn); in kvm_hv_set_msr_pw()
1026 mark_page_dirty(kvm, gfn); in kvm_hv_set_msr_pw()
[all …]
/arch/x86/include/asm/
Dkvm_page_track.h58 struct kvm_memory_slot *slot, gfn_t gfn,
61 struct kvm_memory_slot *slot, gfn_t gfn,
63 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
Dkvm_host.h120 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
123 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index()
327 gfn_t gfn; member
1106 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1223 gfn_t gfn; member
1404 gfn_t gfn, void *data, int offset, int len,
1430 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1588 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
/arch/powerpc/kvm/
De500_mmu_host.c323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument
353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
354 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map()
381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
449 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map()
453 __func__, (long)gfn); in kvmppc_e500_shadow_map()
[all …]
Dbook3s_64_mmu_hv.c500 unsigned long gpa, gfn, hva, pfn; in kvmppc_book3s_hv_page_fault() local
562 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault()
563 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
591 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
767 unsigned long gfn);
782 gfn_t gfn, gfn_end; in kvm_handle_hva_range() local
793 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
796 for (; gfn < gfn_end; ++gfn) { in kvm_handle_hva_range()
797 ret = handler(kvm, memslot, gfn); in kvm_handle_hva_range()
814 unsigned long *rmapp, unsigned long gfn) in kvmppc_unmap_hpte() argument
[all …]
Dbook3s_hv_rm_mmu.c110 unsigned long gfn, unsigned long psize) in kvmppc_update_dirty_map() argument
117 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
118 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map()
126 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local
130 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte()
131 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte()
133 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte()
144 unsigned long gfn; in revmap_for_hpte() local
146 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); in revmap_for_hpte()
147 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte()
[all …]
Dbook3s_64_mmu_radix.c382 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() local
394 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
411 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte()
775 unsigned long hva, gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_instantiate_page() local
793 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_instantiate_page()
800 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, in kvmppc_book3s_instantiate_page()
893 unsigned long gpa, gfn; in kvmppc_book3s_radix_page_fault() local
914 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_radix_page_fault()
919 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault()
969 unsigned long gfn) in kvm_unmap_radix() argument
[all …]
Dbook3s_hv_nested.c882 unsigned long gfn, end_gfn; in kvmhv_remove_nest_rmap_range() local
887 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range()
888 end_gfn = gfn + (nbytes >> PAGE_SHIFT); in kvmhv_remove_nest_rmap_range()
893 for (; gfn < end_gfn; gfn++) { in kvmhv_remove_nest_rmap_range()
894 unsigned long *rmap = &memslot->arch.rmap[gfn]; in kvmhv_remove_nest_rmap_range()
1273 unsigned long n_gpa, gpa, gfn, perm = 0UL; in __kvmhv_nested_page_fault() local
1331 gfn = gpa >> PAGE_SHIFT; in __kvmhv_nested_page_fault()
1335 memslot = gfn_to_memslot(kvm, gfn); in __kvmhv_nested_page_fault()
1383 gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT; in __kvmhv_nested_page_fault()
1416 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in __kvmhv_nested_page_fault()
Dbook3s_64_vio_hv.c81 unsigned long gfn = tce >> PAGE_SHIFT; in kvmppc_rm_tce_to_ua() local
84 memslot = search_memslots(kvm_memslots_raw(kvm), gfn); in kvmppc_rm_tce_to_ua()
88 *ua = __gfn_to_hva_memslot(memslot, gfn) | in kvmppc_rm_tce_to_ua()
92 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvmppc_rm_tce_to_ua()
Dbook3s_64_mmu_host.c88 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; in kvmppc_mmu_map_page() local
127 mark_page_dirty(vcpu->kvm, gfn); in kvmppc_mmu_map_page()
Dbook3s_64_vio.c336 unsigned long gfn = tce >> PAGE_SHIFT; in kvmppc_tce_to_ua() local
339 memslot = search_memslots(kvm_memslots(kvm), gfn); in kvmppc_tce_to_ua()
343 *ua = __gfn_to_hva_memslot(memslot, gfn) | in kvmppc_tce_to_ua()
Dbook3s_xive_native.c565 gfn_t gfn; in kvmppc_xive_native_set_queue_config() local
639 gfn = gpa_to_gfn(kvm_eq.qaddr); in kvmppc_xive_native_set_queue_config()
641 page_size = kvm_host_page_size(kvm, gfn); in kvmppc_xive_native_set_queue_config()
648 page = gfn_to_page(kvm, gfn); in kvmppc_xive_native_set_queue_config()
Dbooke.c1239 gfn_t gfn; in kvmppc_handle_exit() local
1268 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit()
1270 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit()
1296 gfn_t gfn; in kvmppc_handle_exit() local
1316 gfn = gpaddr >> PAGE_SHIFT; in kvmppc_handle_exit()
1318 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { in kvmppc_handle_exit()
/arch/mips/kvm/
Dmmu.c471 int (*handler)(struct kvm *kvm, gfn_t gfn, in handle_hva_to_gpa() argument
486 gfn_t gfn, gfn_end; in handle_hva_to_gpa() local
498 gfn = hva_to_gfn_memslot(hva_start, memslot); in handle_hva_to_gpa()
501 ret |= handler(kvm, gfn, gfn_end, memslot, data); in handle_hva_to_gpa()
508 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_unmap_hva_handler() argument
511 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); in kvm_unmap_hva_handler()
523 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_set_spte_handler() argument
526 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_set_spte_handler()
565 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_age_hva_handler() argument
568 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); in kvm_age_hva_handler()
[all …]
/arch/x86/include/asm/xen/
Dpage.h260 static inline unsigned long gfn_to_pfn(unsigned long gfn) in gfn_to_pfn() argument
263 return gfn; in gfn_to_pfn()
265 return mfn_to_pfn(gfn); in gfn_to_pfn()
/arch/powerpc/include/asm/
Dkvm_book3s.h216 unsigned long gfn);
218 unsigned long gfn);
220 unsigned long gfn);
243 unsigned long gfn, unsigned long psize);
/arch/s390/kvm/
Dpriv.c1123 unsigned long gfn, hva, res, pgstev, ptev; in __do_essa() local
1132 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; in __do_essa()
1133 hva = gfn_to_hva(vcpu->kvm, gfn); in __do_essa()
1168 cbrlo[entries] = gfn << PAGE_SHIFT; in __do_essa()
1172 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn); in __do_essa()
1175 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in __do_essa()

12