• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: David Brazdil <dbrazdil@google.com>
5  */
6 
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_hypevents.h>
10 #include <asm/kvm_mmu.h>
11 #include <linux/kvm_host.h>
12 #include <uapi/linux/psci.h>
13 
14 #include <nvhe/arm-smccc.h>
15 #include <nvhe/mem_protect.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/pkvm.h>
18 #include <nvhe/trap_handler.h>
19 
20 void kvm_hyp_cpu_entry(unsigned long r0);
21 void kvm_hyp_cpu_resume(unsigned long r0);
22 
23 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
24 
25 /* Config options set by the host. */
26 struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
27 
28 static void (*pkvm_psci_notifier)(enum pkvm_psci_notification, struct user_pt_regs *);
pkvm_psci_notify(enum pkvm_psci_notification notif,struct kvm_cpu_context * host_ctxt)29 static void pkvm_psci_notify(enum pkvm_psci_notification notif, struct kvm_cpu_context *host_ctxt)
30 {
31 	if (smp_load_acquire(&pkvm_psci_notifier))
32 		pkvm_psci_notifier(notif, &host_ctxt->regs);
33 }
34 
__pkvm_register_psci_notifier(void (* cb)(enum pkvm_psci_notification,struct user_pt_regs *))35 int __pkvm_register_psci_notifier(void (*cb)(enum pkvm_psci_notification, struct user_pt_regs *))
36 {
37 	/*
38 	 * Paired with smp_load_acquire(&pkvm_psci_notifier) in
39 	 * pkvm_psci_notify(). Ensure memory stores hapenning during a pKVM module
40 	 * init are observed before executing the callback.
41 	 */
42 	return cmpxchg_release(&pkvm_psci_notifier, NULL, cb) ? -EBUSY : 0;
43 }
44 
45 #define INVALID_CPU_ID	UINT_MAX
46 
47 struct psci_boot_args {
48 	atomic_t lock;
49 	unsigned long pc;
50 	unsigned long r0;
51 };
52 
53 #define PSCI_BOOT_ARGS_UNLOCKED		0
54 #define PSCI_BOOT_ARGS_LOCKED		1
55 
56 #define PSCI_BOOT_ARGS_INIT					\
57 	((struct psci_boot_args){				\
58 		.lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED),	\
59 	})
60 
61 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
62 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
63 
64 #define	is_psci_0_1(what, func_id)					\
65 	(kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&	\
66 	 (func_id) == kvm_host_psci_config.function_ids_0_1.what)
67 
is_psci_0_1_call(u64 func_id)68 static bool is_psci_0_1_call(u64 func_id)
69 {
70 	return (is_psci_0_1(cpu_suspend, func_id) ||
71 		is_psci_0_1(cpu_on, func_id) ||
72 		is_psci_0_1(cpu_off, func_id) ||
73 		is_psci_0_1(migrate, func_id));
74 }
75 
is_psci_0_2_call(u64 func_id)76 static bool is_psci_0_2_call(u64 func_id)
77 {
78 	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
79 	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
80 	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
81 }
82 
psci_call(unsigned long fn,unsigned long arg0,unsigned long arg1,unsigned long arg2)83 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
84 			       unsigned long arg1, unsigned long arg2)
85 {
86 	struct arm_smccc_res res;
87 
88 	arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
89 	return res.a0;
90 }
91 
psci_forward(struct kvm_cpu_context * host_ctxt)92 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
93 {
94 	return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
95 			 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
96 }
97 
find_cpu_id(u64 mpidr)98 static unsigned int find_cpu_id(u64 mpidr)
99 {
100 	unsigned int i;
101 
102 	/* Reject invalid MPIDRs */
103 	if (mpidr & ~MPIDR_HWID_BITMASK)
104 		return INVALID_CPU_ID;
105 
106 	for (i = 0; i < NR_CPUS; i++) {
107 		if (cpu_logical_map(i) == mpidr)
108 			return i;
109 	}
110 
111 	return INVALID_CPU_ID;
112 }
113 
try_acquire_boot_args(struct psci_boot_args * args)114 static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
115 {
116 	return atomic_cmpxchg_acquire(&args->lock,
117 				      PSCI_BOOT_ARGS_UNLOCKED,
118 				      PSCI_BOOT_ARGS_LOCKED) ==
119 		PSCI_BOOT_ARGS_UNLOCKED;
120 }
121 
release_boot_args(struct psci_boot_args * args)122 static __always_inline void release_boot_args(struct psci_boot_args *args)
123 {
124 	atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
125 }
126 
psci_cpu_on(u64 func_id,struct kvm_cpu_context * host_ctxt)127 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
128 {
129 	DECLARE_REG(u64, mpidr, host_ctxt, 1);
130 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
131 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
132 
133 	unsigned int cpu_id;
134 	struct psci_boot_args *boot_args;
135 	struct kvm_nvhe_init_params *init_params;
136 	int ret;
137 
138 	/*
139 	 * Find the logical CPU ID for the given MPIDR. The search set is
140 	 * the set of CPUs that were online at the point of KVM initialization.
141 	 * Booting other CPUs is rejected because their cpufeatures were not
142 	 * checked against the finalized capabilities. This could be relaxed
143 	 * by doing the feature checks in hyp.
144 	 */
145 	cpu_id = find_cpu_id(mpidr);
146 	if (cpu_id == INVALID_CPU_ID)
147 		return PSCI_RET_INVALID_PARAMS;
148 
149 	boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
150 	init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
151 
152 	/* Check if the target CPU is already being booted. */
153 	if (!try_acquire_boot_args(boot_args))
154 		return PSCI_RET_ALREADY_ON;
155 
156 	boot_args->pc = pc;
157 	boot_args->r0 = r0;
158 	wmb();
159 
160 	ret = psci_call(func_id, mpidr,
161 			__hyp_pa(&kvm_hyp_cpu_entry),
162 			__hyp_pa(init_params));
163 
164 	/* If successful, the lock will be released by the target CPU. */
165 	if (ret != PSCI_RET_SUCCESS)
166 		release_boot_args(boot_args);
167 
168 	return ret;
169 }
170 
psci_cpu_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt)171 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
172 {
173 	DECLARE_REG(u64, power_state, host_ctxt, 1);
174 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
175 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
176 	int ret;
177 
178 	struct psci_boot_args *boot_args;
179 	struct kvm_nvhe_init_params *init_params;
180 
181 	boot_args = this_cpu_ptr(&suspend_args);
182 	init_params = this_cpu_ptr(&kvm_init_params);
183 
184 	/*
185 	 * No need to acquire a lock before writing to boot_args because a core
186 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
187 	 */
188 	boot_args->pc = pc;
189 	boot_args->r0 = r0;
190 
191 	pkvm_psci_notify(PKVM_PSCI_CPU_SUSPEND, host_ctxt);
192 
193 	/*
194 	 * Will either return if shallow sleep state, or wake up into the entry
195 	 * point if it is a deep sleep state.
196 	 */
197 	ret = psci_call(func_id, power_state,
198 			__hyp_pa(&kvm_hyp_cpu_resume),
199 			__hyp_pa(init_params));
200 
201 	return ret;
202 }
203 
psci_system_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt)204 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
205 {
206 	DECLARE_REG(unsigned long, pc, host_ctxt, 1);
207 	DECLARE_REG(unsigned long, r0, host_ctxt, 2);
208 
209 	struct psci_boot_args *boot_args;
210 	struct kvm_nvhe_init_params *init_params;
211 
212 	boot_args = this_cpu_ptr(&suspend_args);
213 	init_params = this_cpu_ptr(&kvm_init_params);
214 
215 	/*
216 	 * No need to acquire a lock before writing to boot_args because a core
217 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
218 	 */
219 	boot_args->pc = pc;
220 	boot_args->r0 = r0;
221 
222 	pkvm_psci_notify(PKVM_PSCI_SYSTEM_SUSPEND, host_ctxt);
223 
224 	/* Will only return on error. */
225 	return psci_call(func_id,
226 			 __hyp_pa(&kvm_hyp_cpu_resume),
227 			 __hyp_pa(init_params), 0);
228 }
229 
__kvm_host_psci_cpu_entry(bool is_cpu_on)230 asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
231 {
232 	struct psci_boot_args *boot_args;
233 	struct kvm_cpu_context *host_ctxt;
234 
235 	__hyp_enter();
236 
237 	host_ctxt = host_data_ptr(host_ctxt);
238 
239 	if (is_cpu_on)
240 		boot_args = this_cpu_ptr(&cpu_on_args);
241 	else
242 		boot_args = this_cpu_ptr(&suspend_args);
243 
244 	cpu_reg(host_ctxt, 0) = boot_args->r0;
245 	write_sysreg_el2(boot_args->pc, SYS_ELR);
246 
247 	if (is_cpu_on)
248 		release_boot_args(boot_args);
249 
250 	pkvm_psci_notify(PKVM_PSCI_CPU_ENTRY, host_ctxt);
251 	__hyp_exit();
252 	hyp_ftrace_ret_flush();
253 	__host_enter(host_ctxt);
254 }
255 
256 static DEFINE_HYP_SPINLOCK(mem_protect_lock);
257 
psci_mem_protect(s64 offset)258 static u64 psci_mem_protect(s64 offset)
259 {
260 	static u64 cnt;
261 	u64 new = cnt + offset;
262 
263 	hyp_assert_lock_held(&mem_protect_lock);
264 
265 	if (!offset || kvm_host_psci_config.version < PSCI_VERSION(1, 1))
266 		return cnt;
267 
268 	if (!cnt || !new)
269 		psci_call(PSCI_1_1_FN_MEM_PROTECT, offset < 0 ? 0 : 1, 0, 0);
270 
271 	trace_psci_mem_protect(new, cnt);
272 
273 	cnt = new;
274 	return cnt;
275 }
276 
psci_mem_protect_active(void)277 static bool psci_mem_protect_active(void)
278 {
279 	return psci_mem_protect(0);
280 }
281 
psci_mem_protect_inc(u64 n)282 void psci_mem_protect_inc(u64 n)
283 {
284 	hyp_spin_lock(&mem_protect_lock);
285 	psci_mem_protect(n);
286 	hyp_spin_unlock(&mem_protect_lock);
287 }
288 
psci_mem_protect_dec(u64 n)289 void psci_mem_protect_dec(u64 n)
290 {
291 	hyp_spin_lock(&mem_protect_lock);
292 	psci_mem_protect(-n);
293 	hyp_spin_unlock(&mem_protect_lock);
294 }
295 
psci_0_1_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)296 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
297 {
298 	if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
299 		return psci_forward(host_ctxt);
300 	if (is_psci_0_1(cpu_on, func_id))
301 		return psci_cpu_on(func_id, host_ctxt);
302 	if (is_psci_0_1(cpu_suspend, func_id))
303 		return psci_cpu_suspend(func_id, host_ctxt);
304 
305 	return PSCI_RET_NOT_SUPPORTED;
306 }
307 
psci_0_2_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)308 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
309 {
310 	switch (func_id) {
311 	case PSCI_0_2_FN_PSCI_VERSION:
312 	case PSCI_0_2_FN_CPU_OFF:
313 	case PSCI_0_2_FN64_AFFINITY_INFO:
314 	case PSCI_0_2_FN64_MIGRATE:
315 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
316 	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
317 		return psci_forward(host_ctxt);
318 	/*
319 	 * SYSTEM_OFF/RESET should not return according to the spec.
320 	 * Allow it so as to stay robust to broken firmware.
321 	 */
322 	case PSCI_0_2_FN_SYSTEM_OFF:
323 	case PSCI_0_2_FN_SYSTEM_RESET:
324 		pkvm_poison_pvmfw_pages();
325 		/* Avoid racing with a MEM_PROTECT call. */
326 		hyp_spin_lock(&mem_protect_lock);
327 		return psci_forward(host_ctxt);
328 	case PSCI_0_2_FN64_CPU_SUSPEND:
329 		return psci_cpu_suspend(func_id, host_ctxt);
330 	case PSCI_0_2_FN64_CPU_ON:
331 		return psci_cpu_on(func_id, host_ctxt);
332 	default:
333 		return PSCI_RET_NOT_SUPPORTED;
334 	}
335 }
336 
psci_system_reset2(struct kvm_cpu_context * host_ctxt)337 static unsigned long psci_system_reset2(struct kvm_cpu_context *host_ctxt)
338 {
339 	DECLARE_REG(u32, reset_type, host_ctxt, 1);
340 
341 	pkvm_poison_pvmfw_pages();
342 	hyp_spin_lock(&mem_protect_lock);
343 
344 	if (psci_mem_protect_active() &&
345 	    reset_type == PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET) {
346 		cpu_reg(host_ctxt, 0) = PSCI_0_2_FN_SYSTEM_RESET;
347 	}
348 
349 	return psci_forward(host_ctxt);
350 }
351 
psci_1_0_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)352 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
353 {
354 	switch (func_id) {
355 	case PSCI_1_1_FN64_SYSTEM_RESET2:
356 		return psci_system_reset2(host_ctxt);
357 	case PSCI_1_0_FN_PSCI_FEATURES:
358 	case PSCI_1_0_FN_SET_SUSPEND_MODE:
359 		return psci_forward(host_ctxt);
360 	case PSCI_1_0_FN64_SYSTEM_SUSPEND:
361 		return psci_system_suspend(func_id, host_ctxt);
362 	default:
363 		return psci_0_2_handler(func_id, host_ctxt);
364 	}
365 }
366 
kvm_host_psci_handler(struct kvm_cpu_context * host_ctxt,u32 func_id)367 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
368 {
369 	unsigned long ret;
370 
371 	switch (kvm_host_psci_config.version) {
372 	case PSCI_VERSION(0, 1):
373 		if (!is_psci_0_1_call(func_id))
374 			return false;
375 		ret = psci_0_1_handler(func_id, host_ctxt);
376 		break;
377 	case PSCI_VERSION(0, 2):
378 		if (!is_psci_0_2_call(func_id))
379 			return false;
380 		ret = psci_0_2_handler(func_id, host_ctxt);
381 		break;
382 	default:
383 		if (!is_psci_0_2_call(func_id))
384 			return false;
385 		ret = psci_1_0_handler(func_id, host_ctxt);
386 		break;
387 	}
388 
389 	cpu_reg(host_ctxt, 0) = ret;
390 	cpu_reg(host_ctxt, 1) = 0;
391 	cpu_reg(host_ctxt, 2) = 0;
392 	cpu_reg(host_ctxt, 3) = 0;
393 	return true;
394 }
395