Lines Matching +full:architecturally +full:- +full:defined
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
128 return -EINVAL; in kvm_vcpu_enable_sve()
132 return -EINVAL; in kvm_vcpu_enable_sve()
134 vcpu->arch.sve_max_vl = kvm_sve_max_vl; in kvm_vcpu_enable_sve()
141 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; in kvm_vcpu_enable_sve()
148 * vcpu->arch.sve_state as necessary.
155 vl = vcpu->arch.sve_max_vl; in kvm_vcpu_finalize_sve()
160 * set_sve_vls(). Double-check here just to be sure: in kvm_vcpu_finalize_sve()
164 return -EIO; in kvm_vcpu_finalize_sve()
168 return -ENOMEM; in kvm_vcpu_finalize_sve()
170 vcpu->arch.sve_state = buf; in kvm_vcpu_finalize_sve()
171 vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; in kvm_vcpu_finalize_sve()
180 return -EINVAL; in kvm_arm_vcpu_finalize()
183 return -EPERM; in kvm_arm_vcpu_finalize()
188 return -EINVAL; in kvm_arm_vcpu_finalize()
201 kfree(vcpu->arch.sve_state); in kvm_arm_vcpu_destroy()
207 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); in kvm_vcpu_reset_sve()
217 if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || in kvm_vcpu_enable_ptrauth()
218 !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) || in kvm_vcpu_enable_ptrauth()
220 return -EINVAL; in kvm_vcpu_enable_ptrauth()
222 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; in kvm_vcpu_enable_ptrauth()
237 kvm_for_each_vcpu(i, tmp, vcpu->kvm) { in vcpu_allowed_register_width()
246 * kvm_reset_vcpu - sets core registers and sys_regs to reset value
250 * the virtual CPU struct to their architecturally defined reset
258 * on the memory-backed values of system registers, we want to do a full put if
271 mutex_lock(&vcpu->kvm->lock); in kvm_reset_vcpu()
272 reset_state = vcpu->arch.reset_state; in kvm_reset_vcpu()
273 WRITE_ONCE(vcpu->arch.reset_state.reset, false); in kvm_reset_vcpu()
274 mutex_unlock(&vcpu->kvm->lock); in kvm_reset_vcpu()
276 /* Reset PMU outside of the non-preemptible section */ in kvm_reset_vcpu()
280 loaded = (vcpu->cpu != -1); in kvm_reset_vcpu()
285 if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { in kvm_reset_vcpu()
294 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || in kvm_reset_vcpu()
295 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { in kvm_reset_vcpu()
297 ret = -EINVAL; in kvm_reset_vcpu()
303 ret = -EINVAL; in kvm_reset_vcpu()
307 switch (vcpu->arch.target) { in kvm_reset_vcpu()
309 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { in kvm_reset_vcpu()
320 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); in kvm_reset_vcpu()
321 vcpu->arch.ctxt.spsr_abt = 0; in kvm_reset_vcpu()
322 vcpu->arch.ctxt.spsr_und = 0; in kvm_reset_vcpu()
323 vcpu->arch.ctxt.spsr_irq = 0; in kvm_reset_vcpu()
324 vcpu->arch.ctxt.spsr_fiq = 0; in kvm_reset_vcpu()
325 vcpu_gp_regs(vcpu)->pstate = pstate; in kvm_reset_vcpu()
383 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at in kvm_set_ipa_limit()
384 * Stage-2. If not, things will stop very quickly. in kvm_set_ipa_limit()
402 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); in kvm_set_ipa_limit()
403 return -EINVAL; in kvm_set_ipa_limit()
405 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); in kvm_set_ipa_limit()
408 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); in kvm_set_ipa_limit()
435 return -EINVAL; in kvm_arm_setup_stage2()
441 return -EINVAL; in kvm_arm_setup_stage2()
446 current->comm); in kvm_arm_setup_stage2()
447 return -EINVAL; in kvm_arm_setup_stage2()
479 kvm->arch.vtcr = vtcr; in kvm_arm_setup_stage2()