Searched refs:pkvm (Results 1 – 10 of 10) sorted by relevance
158 kvm->arch.pkvm.shadow_handle = shadow_handle; in __create_el2_shadow()200 mutex_lock(&kvm->arch.pkvm.shadow_lock); in create_el2_shadow()201 if (!kvm->arch.pkvm.shadow_handle) in create_el2_shadow()203 mutex_unlock(&kvm->arch.pkvm.shadow_lock); in create_el2_shadow()214 if (kvm->arch.pkvm.shadow_handle) in kvm_shadow_destroy()216 kvm->arch.pkvm.shadow_handle)); in kvm_shadow_destroy()218 free_hyp_memcache(&kvm->arch.pkvm.teardown_mc); in kvm_shadow_destroy()220 ppages = &kvm->arch.pkvm.pinned_pages; in kvm_shadow_destroy()298 mutex_lock(&kvm->arch.pkvm.shadow_lock); in pkvm_vm_ioctl_set_fw_ipa()299 if (kvm->arch.pkvm.shadow_handle) { in pkvm_vm_ioctl_set_fw_ipa()[all …]
18 vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
203 list_for_each_entry(ppage, &kvm->arch.pkvm.pinned_pages, link) { in pkvm_stage2_flush()693 INIT_LIST_HEAD(&kvm->arch.pkvm.pinned_pages); in kvm_init_stage2_mmu()1250 list_add(&ppage->link, &kvm->arch.pkvm.pinned_pages); in pkvm_mem_abort()1829 kvm->arch.pkvm.shadow_handle) { in kvm_arch_prepare_memory_region()
539 vcpu->kvm->arch.pkvm.shadow_handle, in kvm_arch_vcpu_load()
66 return vcpu->arch.pkvm.shadow_vm->arch.pkvm.enabled; in vcpu_is_protected()95 return vm->arch.pkvm.pvmfw_load_addr != PVMFW_INVALID_LOAD_ADDR; in pvm_has_pvmfw()100 struct kvm_protected_vm *pkvm = &vm->arch.pkvm; in ipa_in_pvmfw_region() local105 return ipa - pkvm->pvmfw_load_addr < pvmfw_size; in ipa_in_pvmfw_region()
284 if (unlikely(vcpu->arch.pkvm.loaded_on_cpu)) { in get_shadow_vcpu()300 vcpu->arch.pkvm.loaded_on_cpu = true; in get_shadow_vcpu()315 struct kvm_shadow_vm *vm = vcpu->arch.pkvm.shadow_vm; in put_shadow_vcpu()318 vcpu->arch.pkvm.loaded_on_cpu = false; in put_shadow_vcpu()371 struct kvm_vcpu *host_vcpu = shadow_vcpu->vcpu.arch.pkvm.host_vcpu; in unpin_host_vcpu()407 vm->arch.pkvm.pvmfw_load_addr = kvm->arch.pkvm.pvmfw_load_addr; in init_shadow_vm()408 vm->arch.pkvm.enabled = READ_ONCE(kvm->arch.pkvm.enabled); in init_shadow_vm()427 shadow_vcpu->arch.pkvm.host_vcpu = host_vcpu; in init_shadow_vcpu()462 if (vm->arch.pkvm.enabled) in init_shadow_vcpu()470 shadow_vcpu->arch.pkvm.shadow_vm = vm; in init_shadow_vcpu()[all …]
64 nr_pages = VTCR_EL2_LVLS(shadow_vcpu->arch.pkvm.shadow_vm->arch.vtcr) - 1; in pkvm_refill_memcache()85 struct kvm_shadow_vm *vm = shadow_vcpu->arch.pkvm.shadow_vm; in handle_pvm_entry_psci()89 if (vcpu && READ_ONCE(vcpu->arch.pkvm.power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING) in handle_pvm_entry_psci()90 WRITE_ONCE(vcpu->arch.pkvm.power_state, PSCI_0_2_AFFINITY_LEVEL_OFF); in handle_pvm_entry_psci()510 struct kvm_vcpu *host_vcpu = shadow_vcpu->arch.pkvm.host_vcpu; in __sync_vcpu_state()517 struct kvm_vcpu *host_vcpu = shadow_vcpu->arch.pkvm.host_vcpu; in __flush_vcpu_state()525 struct kvm_vcpu *host_vcpu = shadow_vcpu->arch.pkvm.host_vcpu; in flush_shadow_state()529 if (READ_ONCE(shadow_vcpu->arch.pkvm.power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING) in flush_shadow_state()548 switch (ARM_EXCEPTION_CODE(shadow_vcpu->arch.pkvm.exit_code)) { in flush_shadow_state()568 shadow_vcpu->arch.pkvm.exit_code = 0; in flush_shadow_state()[all …]
75 __guest_lock(vcpu->arch.pkvm.shadow_vm); in guest_lock_component()80 __guest_unlock(vcpu->arch.pkvm.shadow_vm); in guest_unlock_component()1080 struct kvm_shadow_vm *vm = vcpu->arch.pkvm.shadow_vm; in __guest_check_page_state_range()1114 struct kvm_shadow_vm *vm = vcpu->arch.pkvm.shadow_vm; in guest_complete_share()1127 struct kvm_shadow_vm *vm = vcpu->arch.pkvm.shadow_vm; in guest_complete_donation()1168 struct kvm_protected_vcpu *pkvm = &vcpu->arch.pkvm; in __guest_request_page_transition() local1169 struct kvm_shadow_vm *vm = pkvm->shadow_vm; in __guest_request_page_transition()1225 struct kvm_protected_vcpu *pkvm = &vcpu->arch.pkvm; in __guest_initiate_page_transition() local1226 struct kvm_shadow_vm *vm = pkvm->shadow_vm; in __guest_initiate_page_transition()1982 struct kvm_shadow_vm *vm = vcpu->arch.pkvm.shadow_vm; in __check_ioguard_page()[all …]
22 cache.o ffa.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o iommu.o
213 struct kvm_protected_vm pkvm; member508 struct kvm_protected_vcpu pkvm; member925 #define kvm_vm_is_protected(kvm) ((kvm)->arch.pkvm.enabled)