Home
last modified time | relevance | path

Searched refs:base_gfn (Results 1 – 22 of 22) sorted by relevance

/arch/arm64/kvm/
Dmmu.c185 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
490 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot()
650 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
651 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
674 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_mmu_write_protect_pt_masked() local
675 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked()
676 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked()
724 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping()
1410 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
1492 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
/arch/powerpc/kvm/
Dtrace_hv.h285 __field(u64, base_gfn)
297 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
305 __entry->base_gfn, __entry->slot_flags)
Dbook3s_hv_uvmem.c259 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init()
277 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free()
392 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
443 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create()
614 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
788 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
Dbook3s_64_mmu_hv.c568 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
682 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
849 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp()
895 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv()
924 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp()
987 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp()
1126 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty()
1127 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty()
1132 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty()
1207 set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); in kvmppc_unpin_guest_page()
[all …]
Dbook3s_64_mmu_radix.c1036 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_radix()
1067 unsigned long gfn = memslot->base_gfn + pagenum; in kvm_radix_test_clear_dirty()
1110 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_radix_test_clear_dirty()
1158 gpa = memslot->base_gfn << PAGE_SHIFT; in kvmppc_radix_flush_memslot()
Dbook3s_hv_rm_mmu.c117 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
155 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte()
246 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
De500_mmu_host.c381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
Dbook3s_hv_nested.c928 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range()
1456 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in __kvmhv_nested_page_fault()
Dbook3s_pr.c1899 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log_pr()
Dbook3s_hv.c812 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) in kvmppc_copy_guest()
824 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) in kvmppc_copy_guest()
/arch/x86/kvm/mmu/
Dpage_track.c64 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track()
154 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_page_track_is_active()
Dtdp_mmu.c880 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
881 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot()
944 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
945 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot()
1060 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_slot_set_dirty()
1061 slot->base_gfn + slot->npages); in kvm_tdp_mmu_slot_set_dirty()
1118 zap_collapsible_spte_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_zap_collapsible_sptes()
1119 slot->base_gfn + slot->npages); in kvm_tdp_mmu_zap_collapsible_sptes()
Dpaging_tmpl.h639 gfn_t base_gfn = gw->gfn; in FNAME() local
701 base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in FNAME()
710 sp = kvm_mmu_get_page(vcpu, base_gfn, addr, in FNAME()
719 it.level, base_gfn, pfn, prefault, map_writable); in FNAME()
Dmmu.c729 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
942 idx = gfn_to_index(gfn, slot->base_gfn, level); in __gfn_to_rmap()
1226 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked()
1228 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked()
1255 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked()
1257 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked()
2862 gfn_t base_gfn = gfn; in __direct_map() local
2880 base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in __direct_map()
2886 sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, in __direct_map()
2897 write, level, base_gfn, pfn, prefault, in __direct_map()
[all …]
/arch/mips/kvm/
Dmmu.c419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
420 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
421 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
Dmips.c222 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot()
223 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot()
263 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
264 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
/arch/s390/kvm/
Dkvm-s390.c601 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
602 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
1945 if (gfn >= memslots[slot].base_gfn && in gfn_to_memslot_approx()
1946 gfn < memslots[slot].base_gfn + memslots[slot].npages) in gfn_to_memslot_approx()
1952 if (gfn >= memslots[slot].base_gfn) in gfn_to_memslot_approx()
1961 if (gfn >= memslots[start].base_gfn && in gfn_to_memslot_approx()
1962 gfn < memslots[start].base_gfn + memslots[start].npages) { in gfn_to_memslot_approx()
1997 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
1999 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2009 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
[all …]
Dpv.c134 npages = memslot->base_gfn + memslot->npages; in kvm_s390_pv_alloc_vm()
Dpriv.c1196 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in __do_essa()
/arch/powerpc/include/asm/
Dkvm_book3s_64.h494 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()
/arch/x86/include/asm/
Dkvm_host.h123 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
127 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index()
/arch/x86/kvm/
Dx86.c10825 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_alloc_memslot_metadata()
10826 slot->base_gfn, level) + 1; in kvm_alloc_memslot_metadata()
10842 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10844 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10851 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()