• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: David Brazdil <dbrazdil@google.com>
5  */
6 
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <uapi/linux/psci.h>
13 
14 #include <nvhe/memory.h>
15 #include <nvhe/pkvm.h>
16 #include <nvhe/trap_handler.h>
17 
18 void kvm_hyp_cpu_entry(unsigned long r0);
19 void kvm_hyp_cpu_resume(unsigned long r0);
20 
21 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
22 
23 /* Config options set by the host. */
24 struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
25 
26 #define INVALID_CPU_ID	UINT_MAX
27 
28 struct psci_boot_args {
29 	atomic_t lock;
30 	unsigned long pc;
31 	unsigned long r0;
32 };
33 
34 #define PSCI_BOOT_ARGS_UNLOCKED		0
35 #define PSCI_BOOT_ARGS_LOCKED		1
36 
37 #define PSCI_BOOT_ARGS_INIT					\
38 	((struct psci_boot_args){				\
39 		.lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED),	\
40 	})
41 
42 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
43 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
44 
45 #define	is_psci_0_1(what, func_id)					\
46 	(kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&	\
47 	 (func_id) == kvm_host_psci_config.function_ids_0_1.what)
48 
is_psci_0_1_call(u64 func_id)49 static bool is_psci_0_1_call(u64 func_id)
50 {
51 	return (is_psci_0_1(cpu_suspend, func_id) ||
52 		is_psci_0_1(cpu_on, func_id) ||
53 		is_psci_0_1(cpu_off, func_id) ||
54 		is_psci_0_1(migrate, func_id));
55 }
56 
is_psci_0_2_call(u64 func_id)57 static bool is_psci_0_2_call(u64 func_id)
58 {
59 	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
60 	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
61 	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
62 }
63 
psci_call(unsigned long fn,unsigned long arg0,unsigned long arg1,unsigned long arg2)64 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
65 			       unsigned long arg1, unsigned long arg2)
66 {
67 	struct arm_smccc_res res;
68 
69 	arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
70 	return res.a0;
71 }
72 
psci_forward(struct kvm_cpu_context * host_ctxt)73 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
74 {
75 	return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
76 			 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
77 }
78 
find_cpu_id(u64 mpidr)79 static unsigned int find_cpu_id(u64 mpidr)
80 {
81 	unsigned int i;
82 
83 	/* Reject invalid MPIDRs */
84 	if (mpidr & ~MPIDR_HWID_BITMASK)
85 		return INVALID_CPU_ID;
86 
87 	for (i = 0; i < NR_CPUS; i++) {
88 		if (cpu_logical_map(i) == mpidr)
89 			return i;
90 	}
91 
92 	return INVALID_CPU_ID;
93 }
94 
try_acquire_boot_args(struct psci_boot_args * args)95 static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
96 {
97 	return atomic_cmpxchg_acquire(&args->lock,
98 				      PSCI_BOOT_ARGS_UNLOCKED,
99 				      PSCI_BOOT_ARGS_LOCKED) ==
100 		PSCI_BOOT_ARGS_UNLOCKED;
101 }
102 
release_boot_args(struct psci_boot_args * args)103 static __always_inline void release_boot_args(struct psci_boot_args *args)
104 {
105 	atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
106 }
107 
psci_cpu_on(u64 func_id,struct kvm_cpu_context * host_ctxt)108 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
109 {
110 	DECLARE_REG(u64, mpidr, host_ctxt, 1);
111 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
112 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
113 
114 	unsigned int cpu_id;
115 	struct psci_boot_args *boot_args;
116 	struct kvm_nvhe_init_params *init_params;
117 	int ret;
118 
119 	/*
120 	 * Find the logical CPU ID for the given MPIDR. The search set is
121 	 * the set of CPUs that were online at the point of KVM initialization.
122 	 * Booting other CPUs is rejected because their cpufeatures were not
123 	 * checked against the finalized capabilities. This could be relaxed
124 	 * by doing the feature checks in hyp.
125 	 */
126 	cpu_id = find_cpu_id(mpidr);
127 	if (cpu_id == INVALID_CPU_ID)
128 		return PSCI_RET_INVALID_PARAMS;
129 
130 	boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
131 	init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
132 
133 	/* Check if the target CPU is already being booted. */
134 	if (!try_acquire_boot_args(boot_args))
135 		return PSCI_RET_ALREADY_ON;
136 
137 	boot_args->pc = pc;
138 	boot_args->r0 = r0;
139 	wmb();
140 
141 	ret = psci_call(func_id, mpidr,
142 			__hyp_pa(&kvm_hyp_cpu_entry),
143 			__hyp_pa(init_params));
144 
145 	/* If successful, the lock will be released by the target CPU. */
146 	if (ret != PSCI_RET_SUCCESS)
147 		release_boot_args(boot_args);
148 
149 	return ret;
150 }
151 
psci_cpu_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt)152 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
153 {
154 	DECLARE_REG(u64, power_state, host_ctxt, 1);
155 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
156 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
157 
158 	struct psci_boot_args *boot_args;
159 	struct kvm_nvhe_init_params *init_params;
160 
161 	boot_args = this_cpu_ptr(&suspend_args);
162 	init_params = this_cpu_ptr(&kvm_init_params);
163 
164 	/*
165 	 * No need to acquire a lock before writing to boot_args because a core
166 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
167 	 */
168 	boot_args->pc = pc;
169 	boot_args->r0 = r0;
170 
171 	/*
172 	 * Will either return if shallow sleep state, or wake up into the entry
173 	 * point if it is a deep sleep state.
174 	 */
175 	return psci_call(func_id, power_state,
176 			 __hyp_pa(&kvm_hyp_cpu_resume),
177 			 __hyp_pa(init_params));
178 }
179 
psci_system_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt)180 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
181 {
182 	DECLARE_REG(unsigned long, pc, host_ctxt, 1);
183 	DECLARE_REG(unsigned long, r0, host_ctxt, 2);
184 
185 	struct psci_boot_args *boot_args;
186 	struct kvm_nvhe_init_params *init_params;
187 
188 	boot_args = this_cpu_ptr(&suspend_args);
189 	init_params = this_cpu_ptr(&kvm_init_params);
190 
191 	/*
192 	 * No need to acquire a lock before writing to boot_args because a core
193 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
194 	 */
195 	boot_args->pc = pc;
196 	boot_args->r0 = r0;
197 
198 	/* Will only return on error. */
199 	return psci_call(func_id,
200 			 __hyp_pa(&kvm_hyp_cpu_resume),
201 			 __hyp_pa(init_params), 0);
202 }
203 
kvm_host_psci_cpu_entry(bool is_cpu_on)204 asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
205 {
206 	struct psci_boot_args *boot_args;
207 	struct kvm_cpu_context *host_ctxt;
208 
209 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
210 
211 	if (is_cpu_on)
212 		boot_args = this_cpu_ptr(&cpu_on_args);
213 	else
214 		boot_args = this_cpu_ptr(&suspend_args);
215 
216 	cpu_reg(host_ctxt, 0) = boot_args->r0;
217 	write_sysreg_el2(boot_args->pc, SYS_ELR);
218 
219 	if (is_cpu_on)
220 		release_boot_args(boot_args);
221 
222 	__host_enter(host_ctxt);
223 }
224 
225 static DEFINE_HYP_SPINLOCK(mem_protect_lock);
226 
psci_mem_protect(s64 offset)227 static u64 psci_mem_protect(s64 offset)
228 {
229 	static u64 cnt;
230 	u64 new = cnt + offset;
231 
232 	hyp_assert_lock_held(&mem_protect_lock);
233 
234 	if (!offset || kvm_host_psci_config.version < PSCI_VERSION(1, 1))
235 		return cnt;
236 
237 	if (!cnt || !new)
238 		psci_call(PSCI_1_1_FN_MEM_PROTECT, offset < 0 ? 0 : 1, 0, 0);
239 
240 	cnt = new;
241 	return cnt;
242 }
243 
psci_mem_protect_active(void)244 static bool psci_mem_protect_active(void)
245 {
246 	return psci_mem_protect(0);
247 }
248 
psci_mem_protect_inc(void)249 void psci_mem_protect_inc(void)
250 {
251 	hyp_spin_lock(&mem_protect_lock);
252 	psci_mem_protect(1);
253 	hyp_spin_unlock(&mem_protect_lock);
254 }
255 
psci_mem_protect_dec(void)256 void psci_mem_protect_dec(void)
257 {
258 	hyp_spin_lock(&mem_protect_lock);
259 	psci_mem_protect(-1);
260 	hyp_spin_unlock(&mem_protect_lock);
261 }
262 
psci_0_1_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)263 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
264 {
265 	if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
266 		return psci_forward(host_ctxt);
267 	if (is_psci_0_1(cpu_on, func_id))
268 		return psci_cpu_on(func_id, host_ctxt);
269 	if (is_psci_0_1(cpu_suspend, func_id))
270 		return psci_cpu_suspend(func_id, host_ctxt);
271 
272 	return PSCI_RET_NOT_SUPPORTED;
273 }
274 
psci_0_2_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)275 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
276 {
277 	switch (func_id) {
278 	case PSCI_0_2_FN_PSCI_VERSION:
279 	case PSCI_0_2_FN_CPU_OFF:
280 	case PSCI_0_2_FN64_AFFINITY_INFO:
281 	case PSCI_0_2_FN64_MIGRATE:
282 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
283 	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
284 		return psci_forward(host_ctxt);
285 	/*
286 	 * SYSTEM_OFF/RESET should not return according to the spec.
287 	 * Allow it so as to stay robust to broken firmware.
288 	 */
289 	case PSCI_0_2_FN_SYSTEM_OFF:
290 	case PSCI_0_2_FN_SYSTEM_RESET:
291 		pkvm_clear_pvmfw_pages();
292 		/* Avoid racing with a MEM_PROTECT call. */
293 		hyp_spin_lock(&mem_protect_lock);
294 		return psci_forward(host_ctxt);
295 	case PSCI_0_2_FN64_CPU_SUSPEND:
296 		return psci_cpu_suspend(func_id, host_ctxt);
297 	case PSCI_0_2_FN64_CPU_ON:
298 		return psci_cpu_on(func_id, host_ctxt);
299 	default:
300 		return PSCI_RET_NOT_SUPPORTED;
301 	}
302 }
303 
psci_1_0_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)304 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
305 {
306 	switch (func_id) {
307 	case PSCI_1_1_FN64_SYSTEM_RESET2:
308 		pkvm_clear_pvmfw_pages();
309 		hyp_spin_lock(&mem_protect_lock);
310 		if (psci_mem_protect_active())
311 			cpu_reg(host_ctxt, 0) = PSCI_0_2_FN_SYSTEM_RESET;
312 		fallthrough;
313 	case PSCI_1_0_FN_PSCI_FEATURES:
314 	case PSCI_1_0_FN_SET_SUSPEND_MODE:
315 		return psci_forward(host_ctxt);
316 	case PSCI_1_0_FN64_SYSTEM_SUSPEND:
317 		return psci_system_suspend(func_id, host_ctxt);
318 	default:
319 		return psci_0_2_handler(func_id, host_ctxt);
320 	}
321 }
322 
kvm_host_psci_handler(struct kvm_cpu_context * host_ctxt)323 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
324 {
325 	DECLARE_REG(u64, func_id, host_ctxt, 0);
326 	unsigned long ret;
327 
328 	switch (kvm_host_psci_config.version) {
329 	case PSCI_VERSION(0, 1):
330 		if (!is_psci_0_1_call(func_id))
331 			return false;
332 		ret = psci_0_1_handler(func_id, host_ctxt);
333 		break;
334 	case PSCI_VERSION(0, 2):
335 		if (!is_psci_0_2_call(func_id))
336 			return false;
337 		ret = psci_0_2_handler(func_id, host_ctxt);
338 		break;
339 	default:
340 		if (!is_psci_0_2_call(func_id))
341 			return false;
342 		ret = psci_1_0_handler(func_id, host_ctxt);
343 		break;
344 	}
345 
346 	cpu_reg(host_ctxt, 0) = ret;
347 	cpu_reg(host_ctxt, 1) = 0;
348 	cpu_reg(host_ctxt, 2) = 0;
349 	cpu_reg(host_ctxt, 3) = 0;
350 	return true;
351 }
352