Lines Matching +full:gpa +full:- +full:1
19 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
23 #define KVM_MMU_CACHE_MIN_PAGES 1
30 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_mmu_free_memory_caches()
34 * kvm_pgd_init() - Initialise KVM GPA page directory.
35 * @page: Pointer to page directory (PGD) for KVM GPA.
37 * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
59 p[1] = entry; in kvm_pgd_init()
64 p[-3] = entry; in kvm_pgd_init()
65 p[-2] = entry; in kvm_pgd_init()
66 p[-1] = entry; in kvm_pgd_init()
71 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
73 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
76 * Returns: Pointer to new KVM GPA page directory.
91 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
142 /* Caller must hold kvm->mm_lock */
147 return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr); in kvm_mips_pte_for_gpa()
152 * Flush a range of guest physical address space from the VM's GPA page tables.
160 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); in kvm_mips_flush_gpa_pte()
179 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); in kvm_mips_flush_gpa_pmd()
207 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); in kvm_mips_flush_gpa_pud()
236 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); in kvm_mips_flush_gpa_pgd()
259 * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
261 * @start_gfn: Guest frame number of first page in GPA range to flush.
262 * @end_gfn: Guest frame number of last page in GPA range to flush.
264 * Flushes a range of GPA mappings from the GPA page tables.
266 * The caller must hold the @kvm->mmu_lock spinlock.
273 return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd, in kvm_mips_flush_gpa_pt()
297 ret = 1; \
377 * GPA page table to allow dirty page tracking.
383 * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean. in BUILD_PTE_RANGE_OP()
385 * @start_gfn: Guest frame number of first page in GPA range to flush. in BUILD_PTE_RANGE_OP()
386 * @end_gfn: Guest frame number of last page in GPA range to flush. in BUILD_PTE_RANGE_OP()
388 * Make a range of GPA mappings clean so that guest writes will fault and in BUILD_PTE_RANGE_OP()
391 * The caller must hold the @kvm->mmu_lock spinlock. in BUILD_PTE_RANGE_OP()
393 * Returns: Whether any GPA mappings were modified, which would require in BUILD_PTE_RANGE_OP()
399 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd, in BUILD_PTE_RANGE_OP()
405 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
413 * acquire @kvm->mmu_lock.
419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
429 * VM's GPA page table to allow detection of commonly used pages.
437 return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd, in BUILD_PTE_RANGE_OP()
462 hva_start = max(start, memslot->userspace_addr); in handle_hva_to_gpa()
463 hva_end = min(end, memslot->userspace_addr + in handle_hva_to_gpa()
464 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
470 * {gfn_start, gfn_start+1, ..., gfn_end-1}. in handle_hva_to_gpa()
473 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in handle_hva_to_gpa()
486 return 1; in kvm_unmap_hva_handler()
494 kvm_mips_callbacks->flush_shadow_all(kvm); in kvm_unmap_hva_range()
501 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_set_spte_handler() local
503 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_set_spte_handler()
511 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) in kvm_set_spte_handler()
513 else if (memslot->flags & KVM_MEM_READONLY) in kvm_set_spte_handler()
536 kvm_mips_callbacks->flush_shadow_all(kvm); in kvm_set_spte_hva()
549 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_test_age_hva_handler() local
550 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_test_age_hva_handler()
568 * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
570 * @gpa: Guest physical address of fault.
572 * @out_entry: New PTE for @gpa (written on success unless NULL).
573 * @out_buddy: New PTE for @gpa's buddy (written on success unless
576 * Perform fast path GPA fault handling, doing all that can be done without
582 * -EFAULT on failure due to absent GPA mapping or write to
583 * read-only page, in which case KVM must be consulted.
585 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, in _kvm_mips_map_page_fast() argument
589 struct kvm *kvm = vcpu->kvm; in _kvm_mips_map_page_fast()
590 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast()
596 spin_lock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
598 /* Fast path - just check GPA page table for an existing entry */ in _kvm_mips_map_page_fast()
599 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in _kvm_mips_map_page_fast()
601 ret = -EFAULT; in _kvm_mips_map_page_fast()
614 ret = -EFAULT; in _kvm_mips_map_page_fast()
631 spin_unlock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
638 * kvm_mips_map_page() - Map a guest physical page.
640 * @gpa: Guest physical address of fault.
642 * @out_entry: New PTE for @gpa (written on success unless NULL).
643 * @out_buddy: New PTE for @gpa's buddy (written on success unless
646 * Handle GPA faults by creating a new GPA mapping (or updating an existing
650 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
657 * -EFAULT if there is no memory region at @gpa or a write was
658 * attempted to a read-only memory region. This is usually handled
661 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, in kvm_mips_map_page() argument
665 struct kvm *kvm = vcpu->kvm; in kvm_mips_map_page()
666 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in kvm_mips_map_page()
667 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page()
676 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_mips_map_page()
677 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry, in kvm_mips_map_page()
692 mmu_seq = kvm->mmu_notifier_seq; in kvm_mips_map_page()
706 /* Slow path - ask KVM core whether we can access this GPA */ in kvm_mips_map_page()
709 err = -EFAULT; in kvm_mips_map_page()
713 spin_lock(&kvm->mmu_lock); in kvm_mips_map_page()
721 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
727 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa); in kvm_mips_map_page()
750 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
754 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_mips_map_page()
761 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in kvm_trap_emul_pte_for_gva()
771 pgdp = vcpu->arch.guest_kernel_mm.pgd; in kvm_trap_emul_pte_for_gva()
773 pgdp = vcpu->arch.guest_user_mm.pgd; in kvm_trap_emul_pte_for_gva()
784 addr &= PAGE_MASK << 1; in kvm_trap_emul_invalidate_gva()
786 pgdp = vcpu->arch.guest_kernel_mm.pgd; in kvm_trap_emul_invalidate_gva()
790 ptep[1] = pfn_pte(0, __pgprot(0)); in kvm_trap_emul_invalidate_gva()
794 pgdp = vcpu->arch.guest_user_mm.pgd; in kvm_trap_emul_invalidate_gva()
798 ptep[1] = pfn_pte(0, __pgprot(0)); in kvm_trap_emul_invalidate_gva()
805 * Flush a range of guest physical address space from the VM's GPA page tables.
813 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); in kvm_mips_flush_gva_pte()
839 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); in kvm_mips_flush_gva_pmd()
867 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); in kvm_mips_flush_gva_pud()
896 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); in kvm_mips_flush_gva_pgd()
941 * Don't leak writeable but clean entries from GPA page tables. We don't in kvm_mips_gpa_pte_to_gva_unmapped()
981 unsigned long gpa; in kvm_mips_handle_kseg0_tlb_fault() local
988 return -1; in kvm_mips_handle_kseg0_tlb_fault()
991 /* Get the GPA page table entry */ in kvm_mips_handle_kseg0_tlb_fault()
992 gpa = KVM_GUEST_CPHYSADDR(badvaddr); in kvm_mips_handle_kseg0_tlb_fault()
993 idx = (badvaddr >> PAGE_SHIFT) & 1; in kvm_mips_handle_kseg0_tlb_fault()
994 if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx], in kvm_mips_handle_kseg0_tlb_fault()
996 return -1; in kvm_mips_handle_kseg0_tlb_fault()
1002 return -1; in kvm_mips_handle_kseg0_tlb_fault()
1005 /* Copy a pair of entries from GPA page table to GVA page table */ in kvm_mips_handle_kseg0_tlb_fault()
1007 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]); in kvm_mips_handle_kseg0_tlb_fault()
1019 struct kvm *kvm = vcpu->kvm; in kvm_mips_handle_mapped_seg_tlb_fault()
1025 tlb_lo[0] = tlb->tlb_lo[0]; in kvm_mips_handle_mapped_seg_tlb_fault()
1026 tlb_lo[1] = tlb->tlb_lo[1]; in kvm_mips_handle_mapped_seg_tlb_fault()
1032 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) in kvm_mips_handle_mapped_seg_tlb_fault()
1035 /* Get the GPA page table entry */ in kvm_mips_handle_mapped_seg_tlb_fault()
1038 return -1; in kvm_mips_handle_mapped_seg_tlb_fault()
1040 /* And its GVA buddy's GPA page table entry if it also exists */ in kvm_mips_handle_mapped_seg_tlb_fault()
1043 spin_lock(&kvm->mmu_lock); in kvm_mips_handle_mapped_seg_tlb_fault()
1048 spin_unlock(&kvm->mmu_lock); in kvm_mips_handle_mapped_seg_tlb_fault()
1055 return -1; in kvm_mips_handle_mapped_seg_tlb_fault()
1058 /* Copy a pair of entries from GPA page table to GVA page table */ in kvm_mips_handle_mapped_seg_tlb_fault()
1060 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]); in kvm_mips_handle_mapped_seg_tlb_fault()
1065 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, in kvm_mips_handle_mapped_seg_tlb_fault()
1066 tlb->tlb_lo[0], tlb->tlb_lo[1]); in kvm_mips_handle_mapped_seg_tlb_fault()
1080 return -1; in kvm_mips_handle_commpage_tlb_fault()
1083 pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); in kvm_mips_handle_commpage_tlb_fault()
1093 * kvm_mips_migrate_count() - Migrate timer.
1105 if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) in kvm_mips_migrate_count()
1106 hrtimer_restart(&vcpu->arch.comparecount_timer); in kvm_mips_migrate_count()
1118 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
1119 if (vcpu->arch.last_sched_cpu != cpu) { in kvm_arch_vcpu_load()
1120 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", in kvm_arch_vcpu_load()
1121 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); in kvm_arch_vcpu_load()
1131 kvm_mips_callbacks->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
1145 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1146 vcpu->cpu = -1; in kvm_arch_vcpu_put()
1149 kvm_mips_callbacks->vcpu_put(vcpu, cpu); in kvm_arch_vcpu_put()
1155 * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
1174 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_trap_emul_gva_fault()
1188 tlb = &vcpu->arch.guest_tlb[index]; in kvm_trap_emul_gva_fault()
1211 return -EINVAL; in kvm_get_inst()
1228 return -EFAULT; in kvm_get_inst()