Lines Matching refs:memslot
70 static bool memslot_is_logging(struct kvm_memory_slot *memslot) in memslot_is_logging() argument
72 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging()
145 struct kvm_memory_slot *memslot) in stage2_flush_memslot() argument
147 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
148 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
163 struct kvm_memory_slot *memslot; in stage2_flush_vm() local
170 kvm_for_each_memslot(memslot, slots) in stage2_flush_vm()
171 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
404 struct kvm_memory_slot *memslot) in stage2_unmap_memslot() argument
406 hva_t hva = memslot->userspace_addr; in stage2_unmap_memslot()
407 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot()
408 phys_addr_t size = PAGE_SIZE * memslot->npages; in stage2_unmap_memslot()
437 gpa_t gpa = addr + (vm_start - memslot->userspace_addr); in stage2_unmap_memslot()
454 struct kvm_memory_slot *memslot; in stage2_unmap_vm() local
462 kvm_for_each_memslot(memslot, slots) in stage2_unmap_vm()
463 stage2_unmap_memslot(kvm, memslot); in stage2_unmap_vm()
561 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); in kvm_mmu_wp_memory_region() local
564 if (WARN_ON_ONCE(!memslot)) in kvm_mmu_wp_memory_region()
567 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
568 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
627 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, in fault_supports_stage2_huge_mapping() argument
639 size = memslot->npages * PAGE_SIZE; in fault_supports_stage2_huge_mapping()
641 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping()
643 uaddr_start = memslot->userspace_addr; in fault_supports_stage2_huge_mapping()
697 transparent_hugepage_adjust(struct kvm_memory_slot *memslot, in transparent_hugepage_adjust() argument
709 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { in transparent_hugepage_adjust()
742 struct kvm_memory_slot *memslot, unsigned long hva, in user_mem_abort() argument
756 bool logging_active = memslot_is_logging(memslot); in user_mem_abort()
795 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) in user_mem_abort()
803 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) in user_mem_abort()
880 vma_pagesize = transparent_hugepage_adjust(memslot, hva, in user_mem_abort()
955 struct kvm_memory_slot *memslot; in kvm_handle_guest_abort() local
994 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
995 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in kvm_handle_guest_abort()
1051 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); in kvm_handle_guest_abort()
1073 struct kvm_memory_slot *memslot; in handle_hva_to_gpa() local
1079 kvm_for_each_memslot(memslot, slots) { in handle_hva_to_gpa()
1083 hva_start = max(start, memslot->userspace_addr); in handle_hva_to_gpa()
1084 hva_end = min(end, memslot->userspace_addr + in handle_hva_to_gpa()
1085 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
1089 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; in handle_hva_to_gpa()
1295 struct kvm_memory_slot *memslot, in kvm_arch_prepare_memory_region() argument
1312 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
1350 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_arch_prepare_memory_region()
1371 stage2_flush_memslot(kvm, memslot); in kvm_arch_prepare_memory_region()