• Home
  • Raw
  • Download

Lines Matching +full:gpa +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 #include <linux/page-flags.h>
17 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
21 #define KVM_MMU_CACHE_MIN_PAGES 1
37 static int kvm_tlb_flush_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) in kvm_tlb_flush_gpa() argument
40 gpa &= (PAGE_MASK << 1); in kvm_tlb_flush_gpa()
41 invtlb(INVTLB_GID_ADDR, kvm_read_csr_gstat() & KVM_GSTAT_GID, gpa); in kvm_tlb_flush_gpa()
90 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_mmu_free_memory_caches()
94 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
96 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
99 * Returns: Pointer to new KVM GPA page directory.
118 * kvm_walk_pgd() - Walk page table with optional allocation.
174 /* Caller must hold kvm->mm_lock */
179 return kvm_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr); in kvm_pte_for_gpa()
184 * Flush a range of guest physical address space from the VM's GPA page tables.
192 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); in kvm_flush_gpa_pte()
201 *data = *data + 1; in kvm_flush_gpa_pte()
213 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); in kvm_flush_gpa_pmd()
248 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); in kvm_flush_gpa_pud()
277 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); in kvm_flush_gpa_pgd()
300 * kvm_flush_gpa_pt() - Flush a range of guest physical addresses.
302 * @start_gfn: Guest frame number of first page in GPA range to flush.
303 * @end_gfn: Guest frame number of last page in GPA range to flush.
305 * Flushes a range of GPA mappings from the GPA page tables.
307 * The caller must hold the @kvm->mmu_lock spinlock.
314 return kvm_flush_gpa_pgd(kvm->arch.gpa_mm.pgd, in kvm_flush_gpa_pt()
322 * GPA page table to allow dirty page tracking.
337 ret = 1; in kvm_mkclean_pte()
363 ret = 1; in kvm_mkclean_pmd()
424 * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
426 * @start_gfn: Guest frame number of first page in GPA range to flush.
427 * @end_gfn: Guest frame number of last page in GPA range to flush.
429 * Make a range of GPA mappings clean so that guest writes will fault and
432 * The caller must hold the @kvm->mmu_lock spinlock.
434 * Returns: Whether any GPA mappings were modified, which would require
440 return kvm_mkclean_pgd(kvm->arch.gpa_mm.pgd, start_gfn << PAGE_SHIFT, in kvm_mkclean_gpa_pt()
445 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
453 * acquire @kvm->mmu_lock.
459 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
474 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", in kvm_arch_commit_memory_region()
475 __func__, kvm, mem->slot, mem->guest_phys_addr, in kvm_arch_commit_memory_region()
476 mem->memory_size, mem->userspace_addr); in kvm_arch_commit_memory_region()
488 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && in kvm_arch_commit_memory_region()
489 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { in kvm_arch_commit_memory_region()
490 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
491 /* Write protect GPA page table entries */ in kvm_arch_commit_memory_region()
492 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
493 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
497 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
503 /* Flush whole GPA */ in kvm_arch_flush_shadow_all()
521 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
522 /* Flush slot from GPA */ in kvm_arch_flush_shadow_memslot()
523 kvm_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot()
524 slot->base_gfn + slot->npages - 1, &npages); in kvm_arch_flush_shadow_memslot()
528 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
535 pgd_free(NULL, kvm->arch.gpa_mm.pgd); in _kvm_destroy_mm()
536 kvm->arch.gpa_mm.pgd = NULL; in _kvm_destroy_mm()
541 * VM's GPA page table to allow detection of commonly used pages.
562 ret = 1; in kvm_mkold_pte()
588 ret = 1; in kvm_mkold_pmd()
670 hva_start = max(start, memslot->userspace_addr); in handle_hva_to_gpa()
671 hva_end = min(end, memslot->userspace_addr + in handle_hva_to_gpa()
672 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
678 * {gfn_start, gfn_start+1, ..., gfn_end-1}. in handle_hva_to_gpa()
681 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in handle_hva_to_gpa()
695 kvm_flush_gpa_pt(kvm, gfn, gfn_end - 1, &npages); in kvm_unmap_hva_handler()
712 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_set_spte_handler() local
714 pte_t *gpa_pte = kvm_pte_for_gpa(kvm, NULL, gpa); in kvm_set_spte_handler()
722 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) in kvm_set_spte_handler()
724 else if (memslot->flags & KVM_MEM_READONLY) in kvm_set_spte_handler()
755 return kvm_mkold_pgd(kvm->arch.gpa_mm.pgd, gfn << PAGE_SHIFT, in kvm_age_hva_handler()
762 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_test_age_hva_handler() local
763 pte_t *gpa_pte = kvm_pte_for_gpa(kvm, NULL, gpa); in kvm_test_age_hva_handler()
785 pgd = kvm->arch.gpa_mm.pgd + pgd_index(addr); in kvm_get_pud()
823 pmd = kvm_get_pmd(vcpu->kvm, cache, addr); in kvm_set_pmd_huge()
830 * same value. Following the break-before-make in kvm_set_pmd_huge()
856 ++vcpu->stat.huge_merge_exits; in kvm_set_pmd_huge()
857 kvm_flush_gpa_pt(vcpu->kvm, in kvm_set_pmd_huge()
859 ((addr & PMD_MASK) + PMD_SIZE - 1) >> PAGE_SHIFT, NULL); in kvm_set_pmd_huge()
871 * and mapped back in on-demand. in kvm_set_pmd_huge()
899 (atomic_read(&page->_mapcount) < 0)) { in transparent_hugepage_adjust()
919 mask = PTRS_PER_PMD - 1; in transparent_hugepage_adjust()
945 if ((memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) && write) in fault_supports_huge_mapping()
948 size = memslot->npages * PAGE_SIZE; in fault_supports_huge_mapping()
949 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_huge_mapping()
950 uaddr_start = memslot->userspace_addr; in fault_supports_huge_mapping()
955 * within a PMD/PUD for userspace and GPA cannot be mapped with stage-2 in fault_supports_huge_mapping()
960 * memslot->userspace_addr: in fault_supports_huge_mapping()
961 * +-----+--------------------+--------------------+---+ in fault_supports_huge_mapping()
962 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| in fault_supports_huge_mapping()
963 * +-----+--------------------+--------------------+---+ in fault_supports_huge_mapping()
965 * memslot->base_gfn << PAGE_SIZE: in fault_supports_huge_mapping()
966 * +---+--------------------+--------------------+-----+ in fault_supports_huge_mapping()
967 * |abc|def Stage-2 block | Stage-2 block |tvxyz| in fault_supports_huge_mapping()
968 * +---+--------------------+--------------------+-----+ in fault_supports_huge_mapping()
970 * If we create those stage-2 blocks, we'll end up with this incorrect in fault_supports_huge_mapping()
972 * d -> f in fault_supports_huge_mapping()
973 * e -> g in fault_supports_huge_mapping()
974 * f -> h in fault_supports_huge_mapping()
976 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) in fault_supports_huge_mapping()
982 * for the beginning and end of a non-block aligned and non-block sized in fault_supports_huge_mapping()
991 return (hva & ~(map_size - 1)) >= uaddr_start && in fault_supports_huge_mapping()
992 (hva & ~(map_size - 1)) + map_size <= uaddr_end; in fault_supports_huge_mapping()
996 * kvm_map_page_fast() - Fast path GPA fault handler.
998 * @gpa: Guest physical address of fault.
1001 * Perform fast path GPA fault handling, doing all that can be done without
1007 * -EFAULT on failure due to absent GPA mapping or write to
1008 * read-only page, in which case KVM must be consulted.
1010 static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, in kvm_map_page_fast() argument
1013 struct kvm *kvm = vcpu->kvm; in kvm_map_page_fast()
1014 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page_fast()
1021 spin_lock(&kvm->mmu_lock); in kvm_map_page_fast()
1023 /* Fast path - just check GPA page table for an existing entry */ in kvm_map_page_fast()
1024 ptep = kvm_pte_for_gpa(kvm, NULL, gpa); in kvm_map_page_fast()
1026 ret = -EFAULT; in kvm_map_page_fast()
1039 ret = -EFAULT; in kvm_map_page_fast()
1049 if (slot->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_map_page_fast()
1050 ret = -EFAULT; in kvm_map_page_fast()
1060 gfn_t base_gfn = (gpa & PMD_MASK) >> PAGE_SHIFT; in kvm_map_page_fast()
1070 spin_unlock(&kvm->mmu_lock); in kvm_map_page_fast()
1086 memcache = &vcpu->arch.mmu_page_cache; in kvm_split_huge()
1097 return child + (gfn & (PTRS_PER_PTE - 1)); in kvm_split_huge()
1101 * kvm_map_page() - Map a guest physical page.
1103 * @gpa: Guest physical address of fault.
1106 * Handle GPA faults by creating a new GPA mapping (or updating an existing
1110 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
1117 * -EFAULT if there is no memory region at @gpa or a write was
1118 * attempted to a read-only memory region. This is usually handled
1121 static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, in kvm_map_page() argument
1133 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page()
1135 struct kvm *kvm = vcpu->kvm; in kvm_map_page()
1137 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in kvm_map_page()
1140 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_map_page()
1141 err = kvm_map_page_fast(vcpu, gpa, write); in kvm_map_page()
1151 mmap_read_lock(current->mm); in kvm_map_page()
1152 vma = find_vma_intersection(current->mm, hva, hva + 1); in kvm_map_page()
1155 mmap_read_unlock(current->mm); in kvm_map_page()
1156 err = -EFAULT; in kvm_map_page()
1165 ++vcpu->stat.huge_dec_exits; in kvm_map_page()
1170 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; in kvm_map_page()
1172 mmap_read_unlock(current->mm); in kvm_map_page()
1184 mmu_seq = kvm->mmu_notifier_seq; in kvm_map_page()
1198 /* Slow path - ask KVM core whether we can access this GPA */ in kvm_map_page()
1201 err = -EFAULT; in kvm_map_page()
1205 spin_lock(&kvm->mmu_lock); in kvm_map_page()
1213 spin_unlock(&kvm->mmu_lock); in kvm_map_page()
1233 ++vcpu->stat.huge_thp_exits; in kvm_map_page()
1235 transparent_hugepage_adjust(&pfn, &gpa)) { in kvm_map_page()
1236 ++vcpu->stat.huge_adjust_exits; in kvm_map_page()
1243 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in kvm_map_page()
1261 gfn_t base_gfn = (gpa & PMD_MASK) >> PAGE_SHIFT; in kvm_map_page()
1266 ++vcpu->stat.huge_set_exits; in kvm_map_page()
1267 kvm_set_pmd_huge(vcpu, memcache, gpa, &new_pmd); in kvm_map_page()
1274 ptep = kvm_pte_for_gpa(kvm, memcache, gpa); in kvm_map_page()
1282 spin_unlock(&kvm->mmu_lock); in kvm_map_page()
1286 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_map_page()
1304 * kvm_flush_tlb_all() - Flush all root TLB entries for
1307 * Invalidate all entries including GVA-->GPA and GPA-->HPA mappings.