• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/reset.c
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_host.h>
14 #include <linux/kvm.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 
20 #include <kvm/arm_arch_timer.h>
21 
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/fpsimd.h>
25 #include <asm/ptrace.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_mmu.h>
30 #include <asm/kvm_nested.h>
31 #include <asm/virt.h>
32 
33 /* Maximum phys_shift supported for any VM on this host */
34 static u32 __ro_after_init kvm_ipa_limit;
35 
36 unsigned int __ro_after_init kvm_sve_max_vl;
37 unsigned int __ro_after_init kvm_host_sve_max_vl;
38 
kvm_arm_init_sve(void)39 int __init kvm_arm_init_sve(void)
40 {
41 	if (system_supports_sve()) {
42 		kvm_sve_max_vl = sve_max_virtualisable_vl();
43 		kvm_host_sve_max_vl = sve_max_vl();
44 
45 		/*
46 		 * The get_sve_reg()/set_sve_reg() ioctl interface will need
47 		 * to be extended with multiple register slice support in
48 		 * order to support vector lengths greater than
49 		 * VL_ARCH_MAX:
50 		 */
51 		if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
52 			kvm_sve_max_vl = VL_ARCH_MAX;
53 
54 		/*
55 		 * Don't even try to make use of vector lengths that
56 		 * aren't available on all CPUs, for now:
57 		 */
58 		if (kvm_sve_max_vl < sve_max_vl())
59 			pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
60 				kvm_sve_max_vl);
61 	}
62 
63 	return 0;
64 }
65 
kvm_vcpu_enable_sve(struct kvm_vcpu * vcpu)66 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
67 {
68 	if (!system_supports_sve())
69 		return -EINVAL;
70 
71 	vcpu->arch.sve_max_vl = kvm_sve_max_vl;
72 
73 	/*
74 	 * Userspace can still customize the vector lengths by writing
75 	 * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
76 	 * kvm_arm_vcpu_finalize(), which freezes the configuration.
77 	 */
78 	vcpu_set_flag(vcpu, GUEST_HAS_SVE);
79 
80 	return 0;
81 }
82 
alloc_sve_state(struct kvm_vcpu * vcpu)83 static int alloc_sve_state(struct kvm_vcpu *vcpu)
84 {
85 	size_t reg_sz = PAGE_ALIGN(vcpu_sve_state_size(vcpu));
86 	void *buf;
87 	int ret;
88 
89 	if (kvm_vm_is_protected(vcpu->kvm))
90 		return 0;
91 
92 	buf = alloc_pages_exact(reg_sz, GFP_KERNEL_ACCOUNT);
93 	if (!buf)
94 		return -ENOMEM;
95 
96 	ret = kvm_share_hyp(buf, buf + reg_sz);
97 	if (ret) {
98 		kfree(buf);
99 		return ret;
100 	}
101 
102 	vcpu->arch.sve_state = buf;
103 
104 	return 0;
105 }
106 
107 /*
108  * Finalize vcpu's maximum SVE vector length, allocating
109  * vcpu->arch.sve_state as necessary.
110  */
kvm_vcpu_finalize_sve(struct kvm_vcpu * vcpu)111 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
112 {
113 	unsigned int vl;
114 	int ret;
115 
116 	vl = vcpu->arch.sve_max_vl;
117 
118 	/*
119 	 * Responsibility for these properties is shared between
120 	 * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
121 	 * set_sve_vls().  Double-check here just to be sure:
122 	 */
123 	if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
124 		    vl > VL_ARCH_MAX))
125 		return -EIO;
126 
127 	ret = alloc_sve_state(vcpu);
128 	if (ret)
129 		return ret;
130 
131 	vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
132 	return 0;
133 }
134 
kvm_arm_vcpu_finalize(struct kvm_vcpu * vcpu,int feature)135 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
136 {
137 	switch (feature) {
138 	case KVM_ARM_VCPU_SVE:
139 		if (!vcpu_has_sve(vcpu))
140 			return -EINVAL;
141 
142 		if (kvm_arm_vcpu_sve_finalized(vcpu))
143 			return -EPERM;
144 
145 		return kvm_vcpu_finalize_sve(vcpu);
146 	}
147 
148 	return -EINVAL;
149 }
150 
kvm_arm_vcpu_is_finalized(struct kvm_vcpu * vcpu)151 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
152 {
153 	if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
154 		return false;
155 
156 	return true;
157 }
158 
kvm_arm_vcpu_destroy(struct kvm_vcpu * vcpu)159 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
160 {
161 	void *sve_state = vcpu->arch.sve_state;
162 
163 	kvm_unshare_hyp(vcpu, vcpu + 1);
164 
165 	if (sve_state) {
166 		size_t reg_sz = PAGE_ALIGN(vcpu_sve_state_size(vcpu));
167 
168 		/* sve_allocate within the hypervisor when protected */
169 		BUG_ON(kvm_vm_is_protected(vcpu->kvm));
170 
171 		kvm_unshare_hyp(sve_state, sve_state + reg_sz);
172 		free_pages_exact(sve_state, reg_sz);
173 	}
174 
175 	kfree(vcpu->arch.ccsidr);
176 }
177 
kvm_vcpu_reset_sve(struct kvm_vcpu * vcpu)178 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
179 {
180 	if (!kvm_vm_is_protected(vcpu->kvm) && vcpu_has_sve(vcpu))
181 		memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
182 }
183 
184 /**
185  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
186  * @vcpu: The VCPU pointer
187  *
188  * This function sets the registers on the virtual CPU struct to their
189  * architecturally defined reset values, except for registers whose reset is
190  * deferred until kvm_arm_vcpu_finalize().
191  *
192  * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
193  * ioctl or as part of handling a request issued by another VCPU in the PSCI
194  * handling code.  In the first case, the VCPU will not be loaded, and in the
195  * second case the VCPU will be loaded.  Because this function operates purely
196  * on the memory-backed values of system registers, we want to do a full put if
197  * we were loaded (handling a request) and load the values back at the end of
198  * the function.  Otherwise we leave the state alone.  In both cases, we
199  * disable preemption around the vcpu reset as we would otherwise race with
200  * preempt notifiers which also call put/load.
201  */
kvm_reset_vcpu(struct kvm_vcpu * vcpu)202 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
203 {
204 	struct vcpu_reset_state reset_state;
205 	int ret;
206 	bool loaded;
207 
208 	spin_lock(&vcpu->arch.mp_state_lock);
209 	reset_state = vcpu->arch.reset_state;
210 	vcpu->arch.reset_state.reset = false;
211 	spin_unlock(&vcpu->arch.mp_state_lock);
212 
213 	/* Reset PMU outside of the non-preemptible section */
214 	kvm_pmu_vcpu_reset(vcpu);
215 
216 	preempt_disable();
217 	loaded = (vcpu->cpu != -1);
218 	if (loaded)
219 		kvm_arch_vcpu_put(vcpu);
220 
221 	/* Disallow NV+SVE for the time being */
222 	if (vcpu_has_nv(vcpu) && vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
223 		ret = -EINVAL;
224 		goto out;
225 	}
226 
227 	if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
228 		if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
229 			ret = kvm_vcpu_enable_sve(vcpu);
230 			if (ret)
231 				goto out;
232 		}
233 	} else {
234 		kvm_vcpu_reset_sve(vcpu);
235 	}
236 
237 	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
238 	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
239 		if (kvm_vcpu_enable_ptrauth(vcpu)) {
240 			ret = -EINVAL;
241 			goto out;
242 		}
243 	}
244 
245 	if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
246 		ret = -EINVAL;
247 		goto out;
248 	}
249 
250 	/* Reset core registers */
251 	kvm_reset_vcpu_core(vcpu);
252 
253 	/* Reset system registers */
254 	kvm_reset_sys_regs(vcpu);
255 
256 	/*
257 	 * Additional reset state handling that PSCI may have imposed on us.
258 	 * Must be done after all the sys_reg reset.
259 	 */
260 	if (reset_state.reset)
261 		kvm_reset_vcpu_psci(vcpu, &reset_state);
262 
263 	/* Reset timer */
264 	ret = kvm_timer_vcpu_reset(vcpu);
265 out:
266 	if (loaded)
267 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
268 	preempt_enable();
269 	return ret;
270 }
271 
get_kvm_ipa_limit(void)272 u32 get_kvm_ipa_limit(void)
273 {
274 	return kvm_ipa_limit;
275 }
276 
kvm_set_ipa_limit(void)277 int __init kvm_set_ipa_limit(void)
278 {
279 	unsigned int parange;
280 	u64 mmfr0;
281 
282 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
283 	parange = cpuid_feature_extract_unsigned_field(mmfr0,
284 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
285 	/*
286 	 * IPA size beyond 48 bits could not be supported
287 	 * on either 4K or 16K page size. Hence let's cap
288 	 * it to 48 bits, in case it's reported as larger
289 	 * on the system.
290 	 */
291 	if (PAGE_SIZE != SZ_64K)
292 		parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
293 
294 	/*
295 	 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
296 	 * Stage-2. If not, things will stop very quickly.
297 	 */
298 	switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
299 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
300 		kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
301 		return -EINVAL;
302 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
303 		kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
304 		break;
305 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
306 		kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
307 		break;
308 	default:
309 		kvm_err("Unsupported value for TGRAN_2, giving up\n");
310 		return -EINVAL;
311 	}
312 
313 	kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
314 	kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
315 		 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
316 		  " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
317 
318 	return 0;
319 }
320