1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/arm-smccc.h>
8 #include <linux/preempt.h>
9 #include <linux/kvm_host.h>
10 #include <linux/uaccess.h>
11 #include <linux/wait.h>
12
13 #include <asm/cputype.h>
14 #include <asm/kvm_emulate.h>
15
16 #include <kvm/arm_psci.h>
17 #include <kvm/arm_hypercalls.h>
18
19 /*
20 * This is an implementation of the Power State Coordination Interface
21 * as described in ARM document number ARM DEN 0022A.
22 */
23
kvm_psci_vcpu_suspend(struct kvm_vcpu * vcpu)24 static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
25 {
26 /*
27 * NOTE: For simplicity, we make VCPU suspend emulation to be
28 * same-as WFI (Wait-for-interrupt) emulation.
29 *
30 * This means for KVM the wakeup events are interrupts and
31 * this is consistent with intended use of StateID as described
32 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
33 *
34 * Further, we also treat power-down request to be same as
35 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
36 * specification (ARM DEN 0022A). This means all suspend states
37 * for KVM will preserve the register state.
38 */
39 kvm_vcpu_wfi(vcpu);
40
41 return PSCI_RET_SUCCESS;
42 }
43
kvm_psci_vcpu_on(struct kvm_vcpu * source_vcpu)44 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
45 {
46 struct vcpu_reset_state *reset_state;
47 struct kvm *kvm = source_vcpu->kvm;
48 struct kvm_vcpu *vcpu = NULL;
49 int ret = PSCI_RET_SUCCESS;
50 unsigned long cpu_id;
51
52 cpu_id = smccc_get_arg1(source_vcpu);
53 if (!kvm_psci_valid_affinity(source_vcpu, cpu_id))
54 return PSCI_RET_INVALID_PARAMS;
55
56 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
57
58 /*
59 * Make sure the caller requested a valid CPU and that the CPU is
60 * turned off.
61 */
62 if (!vcpu)
63 return PSCI_RET_INVALID_PARAMS;
64
65 spin_lock(&vcpu->arch.mp_state_lock);
66 if (!kvm_arm_vcpu_stopped(vcpu)) {
67 if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
68 ret = PSCI_RET_ALREADY_ON;
69 else
70 ret = PSCI_RET_INVALID_PARAMS;
71
72 goto out_unlock;
73 }
74
75 reset_state = &vcpu->arch.reset_state;
76
77 reset_state->pc = smccc_get_arg2(source_vcpu);
78
79 /* Propagate caller endianness */
80 reset_state->be = kvm_vcpu_is_be(source_vcpu);
81
82 /*
83 * NOTE: We always update r0 (or x0) because for PSCI v0.1
84 * the general purpose registers are undefined upon CPU_ON.
85 */
86 reset_state->r0 = smccc_get_arg3(source_vcpu);
87
88 reset_state->reset = true;
89 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
90
91 /*
92 * Make sure the reset request is observed if the RUNNABLE mp_state is
93 * observed.
94 */
95 smp_wmb();
96
97 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
98 kvm_vcpu_wake_up(vcpu);
99
100 out_unlock:
101 spin_unlock(&vcpu->arch.mp_state_lock);
102 return ret;
103 }
104
kvm_psci_vcpu_affinity_info(struct kvm_vcpu * vcpu)105 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
106 {
107 int matching_cpus = 0;
108 unsigned long i, mpidr;
109 unsigned long target_affinity;
110 unsigned long target_affinity_mask;
111 unsigned long lowest_affinity_level;
112 struct kvm *kvm = vcpu->kvm;
113 struct kvm_vcpu *tmp;
114
115 target_affinity = smccc_get_arg1(vcpu);
116 lowest_affinity_level = smccc_get_arg2(vcpu);
117
118 if (!kvm_psci_valid_affinity(vcpu, target_affinity))
119 return PSCI_RET_INVALID_PARAMS;
120
121 /* Determine target affinity mask */
122 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
123 if (!target_affinity_mask)
124 return PSCI_RET_INVALID_PARAMS;
125
126 /* Ignore other bits of target affinity */
127 target_affinity &= target_affinity_mask;
128
129 /*
130 * If one or more VCPU matching target affinity are running
131 * then ON else OFF
132 */
133 kvm_for_each_vcpu(i, tmp, kvm) {
134 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
135 if ((mpidr & target_affinity_mask) == target_affinity) {
136 matching_cpus++;
137 if (!kvm_arm_vcpu_stopped(tmp))
138 return PSCI_0_2_AFFINITY_LEVEL_ON;
139 }
140 }
141
142 if (!matching_cpus)
143 return PSCI_RET_INVALID_PARAMS;
144
145 return PSCI_0_2_AFFINITY_LEVEL_OFF;
146 }
147
kvm_prepare_system_event(struct kvm_vcpu * vcpu,u32 type,u64 flags)148 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
149 {
150 unsigned long i;
151 struct kvm_vcpu *tmp;
152
153 /*
154 * The KVM ABI specifies that a system event exit may call KVM_RUN
155 * again and may perform shutdown/reboot at a later time that when the
156 * actual request is made. Since we are implementing PSCI and a
157 * caller of PSCI reboot and shutdown expects that the system shuts
158 * down or reboots immediately, let's make sure that VCPUs are not run
159 * after this call is handled and before the VCPUs have been
160 * re-initialized.
161 */
162 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
163 spin_lock(&tmp->arch.mp_state_lock);
164 WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
165 spin_unlock(&tmp->arch.mp_state_lock);
166 }
167 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
168
169 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
170 vcpu->run->system_event.type = type;
171 vcpu->run->system_event.ndata = 1;
172 vcpu->run->system_event.data[0] = flags;
173 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
174 }
175
kvm_psci_system_off(struct kvm_vcpu * vcpu)176 static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
177 {
178 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN, 0);
179 }
180
kvm_psci_system_reset(struct kvm_vcpu * vcpu)181 static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
182 {
183 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET, 0);
184 }
185
kvm_psci_system_reset2(struct kvm_vcpu * vcpu)186 static void kvm_psci_system_reset2(struct kvm_vcpu *vcpu)
187 {
188 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET,
189 KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2);
190 }
191
kvm_psci_system_suspend(struct kvm_vcpu * vcpu)192 static void kvm_psci_system_suspend(struct kvm_vcpu *vcpu)
193 {
194 struct kvm_run *run = vcpu->run;
195
196 memset(&run->system_event, 0, sizeof(vcpu->run->system_event));
197 run->system_event.type = KVM_SYSTEM_EVENT_SUSPEND;
198 run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
199 }
200
kvm_psci_check_allowed_function(struct kvm_vcpu * vcpu,u32 fn)201 static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
202 {
203 /*
204 * Prevent 32 bit guests from calling 64 bit PSCI functions.
205 */
206 if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
207 return PSCI_RET_NOT_SUPPORTED;
208
209 return 0;
210 }
211
kvm_psci_0_2_call(struct kvm_vcpu * vcpu)212 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
213 {
214 u32 psci_fn = smccc_get_function(vcpu);
215 unsigned long val;
216 int ret = 1;
217
218 switch (psci_fn) {
219 case PSCI_0_2_FN_PSCI_VERSION:
220 /*
221 * Bits[31:16] = Major Version = 0
222 * Bits[15:0] = Minor Version = 2
223 */
224 val = KVM_ARM_PSCI_0_2;
225 break;
226 case PSCI_0_2_FN_CPU_SUSPEND:
227 case PSCI_0_2_FN64_CPU_SUSPEND:
228 val = kvm_psci_vcpu_suspend(vcpu);
229 break;
230 case PSCI_0_2_FN_CPU_OFF:
231 kvm_arm_vcpu_power_off(vcpu);
232 val = PSCI_RET_SUCCESS;
233 break;
234 case PSCI_0_2_FN_CPU_ON:
235 kvm_psci_narrow_to_32bit(vcpu);
236 fallthrough;
237 case PSCI_0_2_FN64_CPU_ON:
238 val = kvm_psci_vcpu_on(vcpu);
239 break;
240 case PSCI_0_2_FN_AFFINITY_INFO:
241 kvm_psci_narrow_to_32bit(vcpu);
242 fallthrough;
243 case PSCI_0_2_FN64_AFFINITY_INFO:
244 val = kvm_psci_vcpu_affinity_info(vcpu);
245 break;
246 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
247 /*
248 * Trusted OS is MP hence does not require migration
249 * or
250 * Trusted OS is not present
251 */
252 val = PSCI_0_2_TOS_MP;
253 break;
254 case PSCI_0_2_FN_SYSTEM_OFF:
255 kvm_psci_system_off(vcpu);
256 /*
257 * We shouldn't be going back to guest VCPU after
258 * receiving SYSTEM_OFF request.
259 *
260 * If user space accidentally/deliberately resumes
261 * guest VCPU after SYSTEM_OFF request then guest
262 * VCPU should see internal failure from PSCI return
263 * value. To achieve this, we preload r0 (or x0) with
264 * PSCI return value INTERNAL_FAILURE.
265 */
266 val = PSCI_RET_INTERNAL_FAILURE;
267 ret = 0;
268 break;
269 case PSCI_0_2_FN_SYSTEM_RESET:
270 kvm_psci_system_reset(vcpu);
271 /*
272 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
273 * with PSCI return value INTERNAL_FAILURE.
274 */
275 val = PSCI_RET_INTERNAL_FAILURE;
276 ret = 0;
277 break;
278 default:
279 val = PSCI_RET_NOT_SUPPORTED;
280 break;
281 }
282
283 smccc_set_retval(vcpu, val, 0, 0, 0);
284 return ret;
285 }
286
kvm_psci_1_x_call(struct kvm_vcpu * vcpu,u32 minor)287 static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
288 {
289 unsigned long val = PSCI_RET_NOT_SUPPORTED;
290 u32 psci_fn = smccc_get_function(vcpu);
291 struct kvm *kvm = vcpu->kvm;
292 u32 arg;
293 int ret = 1;
294
295 switch(psci_fn) {
296 case PSCI_0_2_FN_PSCI_VERSION:
297 val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
298 break;
299 case PSCI_1_0_FN_PSCI_FEATURES:
300 arg = smccc_get_arg1(vcpu);
301 val = kvm_psci_check_allowed_function(vcpu, arg);
302 if (val)
303 break;
304
305 val = PSCI_RET_NOT_SUPPORTED;
306
307 switch(arg) {
308 case PSCI_0_2_FN_PSCI_VERSION:
309 case PSCI_0_2_FN_CPU_SUSPEND:
310 case PSCI_0_2_FN64_CPU_SUSPEND:
311 case PSCI_0_2_FN_CPU_OFF:
312 case PSCI_0_2_FN_CPU_ON:
313 case PSCI_0_2_FN64_CPU_ON:
314 case PSCI_0_2_FN_AFFINITY_INFO:
315 case PSCI_0_2_FN64_AFFINITY_INFO:
316 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
317 case PSCI_0_2_FN_SYSTEM_OFF:
318 case PSCI_0_2_FN_SYSTEM_RESET:
319 case PSCI_1_0_FN_PSCI_FEATURES:
320 case ARM_SMCCC_VERSION_FUNC_ID:
321 val = 0;
322 break;
323 case PSCI_1_0_FN_SYSTEM_SUSPEND:
324 case PSCI_1_0_FN64_SYSTEM_SUSPEND:
325 if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags))
326 val = 0;
327 break;
328 case PSCI_1_1_FN_SYSTEM_RESET2:
329 case PSCI_1_1_FN64_SYSTEM_RESET2:
330 if (minor >= 1)
331 val = 0;
332 break;
333 }
334 break;
335 case PSCI_1_0_FN_SYSTEM_SUSPEND:
336 kvm_psci_narrow_to_32bit(vcpu);
337 fallthrough;
338 case PSCI_1_0_FN64_SYSTEM_SUSPEND:
339 /*
340 * Return directly to userspace without changing the vCPU's
341 * registers. Userspace depends on reading the SMCCC parameters
342 * to implement SYSTEM_SUSPEND.
343 */
344 if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags)) {
345 kvm_psci_system_suspend(vcpu);
346 return 0;
347 }
348 break;
349 case PSCI_1_1_FN_SYSTEM_RESET2:
350 kvm_psci_narrow_to_32bit(vcpu);
351 fallthrough;
352 case PSCI_1_1_FN64_SYSTEM_RESET2:
353 if (minor >= 1) {
354 arg = smccc_get_arg1(vcpu);
355
356 if (arg <= PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET ||
357 arg >= PSCI_1_1_RESET_TYPE_VENDOR_START) {
358 kvm_psci_system_reset2(vcpu);
359 vcpu_set_reg(vcpu, 0, PSCI_RET_INTERNAL_FAILURE);
360 return 0;
361 }
362
363 val = PSCI_RET_INVALID_PARAMS;
364 break;
365 }
366 break;
367 default:
368 return kvm_psci_0_2_call(vcpu);
369 }
370
371 smccc_set_retval(vcpu, val, 0, 0, 0);
372 return ret;
373 }
374
kvm_psci_0_1_call(struct kvm_vcpu * vcpu)375 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
376 {
377 u32 psci_fn = smccc_get_function(vcpu);
378 unsigned long val;
379
380 switch (psci_fn) {
381 case KVM_PSCI_FN_CPU_OFF:
382 kvm_arm_vcpu_power_off(vcpu);
383 val = PSCI_RET_SUCCESS;
384 break;
385 case KVM_PSCI_FN_CPU_ON:
386 val = kvm_psci_vcpu_on(vcpu);
387 break;
388 default:
389 val = PSCI_RET_NOT_SUPPORTED;
390 break;
391 }
392
393 smccc_set_retval(vcpu, val, 0, 0, 0);
394 return 1;
395 }
396
397 /**
398 * kvm_psci_call - handle PSCI call if r0 value is in range
399 * @vcpu: Pointer to the VCPU struct
400 *
401 * Handle PSCI calls from guests through traps from HVC instructions.
402 * The calling convention is similar to SMC calls to the secure world
403 * where the function number is placed in r0.
404 *
405 * This function returns: > 0 (success), 0 (success but exit to user
406 * space), and < 0 (errors)
407 *
408 * Errors:
409 * -EINVAL: Unrecognized PSCI function
410 */
kvm_psci_call(struct kvm_vcpu * vcpu)411 int kvm_psci_call(struct kvm_vcpu *vcpu)
412 {
413 u32 psci_fn = smccc_get_function(vcpu);
414 int version = kvm_psci_version(vcpu);
415 unsigned long val;
416
417 val = kvm_psci_check_allowed_function(vcpu, psci_fn);
418 if (val) {
419 smccc_set_retval(vcpu, val, 0, 0, 0);
420 return 1;
421 }
422
423 switch (version) {
424 case KVM_ARM_PSCI_1_1:
425 return kvm_psci_1_x_call(vcpu, 1);
426 case KVM_ARM_PSCI_1_0:
427 return kvm_psci_1_x_call(vcpu, 0);
428 case KVM_ARM_PSCI_0_2:
429 return kvm_psci_0_2_call(vcpu);
430 case KVM_ARM_PSCI_0_1:
431 return kvm_psci_0_1_call(vcpu);
432 default:
433 WARN_ONCE(1, "Unknown PSCI version %d", version);
434 smccc_set_retval(vcpu, SMCCC_RET_NOT_SUPPORTED, 0, 0, 0);
435 return 1;
436 }
437 }
438