• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2022 Google LLC
4  */
5 #include <asm/kvm_host.h>
6 #include <asm/kvm_pkvm_module.h>
7 #include <asm/kvm_hypevents.h>
8 #include <asm/module.h>
9 
10 #include <nvhe/alloc.h>
11 #include <nvhe/iommu.h>
12 #include <nvhe/mem_protect.h>
13 #include <nvhe/modules.h>
14 #include <nvhe/mm.h>
15 #include <nvhe/serial.h>
16 #include <nvhe/spinlock.h>
17 #include <nvhe/trace.h>
18 #include <nvhe/trap_handler.h>
19 
__pkvm_module_memcpy(void * to,const void * from,size_t count)20 static void *__pkvm_module_memcpy(void *to, const void *from, size_t count)
21 {
22 	return memcpy(to, from, count);
23 }
24 
__pkvm_module_memset(void * dst,int c,size_t count)25 static void *__pkvm_module_memset(void *dst, int c, size_t count)
26 {
27 	return memset(dst, c, count);
28 }
29 
__kvm_flush_dcache_to_poc(void * addr,size_t size)30 static void __kvm_flush_dcache_to_poc(void *addr, size_t size)
31 {
32 	kvm_flush_dcache_to_poc((unsigned long)addr, (unsigned long)size);
33 }
34 
__update_hcr_el2(unsigned long set_mask,unsigned long clear_mask)35 static void __update_hcr_el2(unsigned long set_mask, unsigned long clear_mask)
36 {
37 	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
38 
39 	params->hcr_el2 |= set_mask;
40 	params->hcr_el2 &= ~clear_mask;
41 	__kvm_flush_dcache_to_poc(params, sizeof(*params));
42 	write_sysreg(params->hcr_el2, hcr_el2);
43 }
44 
__update_hfgwtr_el2(unsigned long set_mask,unsigned long clear_mask)45 static void __update_hfgwtr_el2(unsigned long set_mask, unsigned long clear_mask)
46 {
47 	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
48 
49 	params->hfgwtr_el2 |= set_mask;
50 	params->hfgwtr_el2 &= ~clear_mask;
51 	__kvm_flush_dcache_to_poc(params, sizeof(*params));
52 	write_sysreg_s(params->hfgwtr_el2, SYS_HFGWTR_EL2);
53 }
54 
55 static atomic_t early_lm_pages;
__pkvm_linear_map_early(phys_addr_t phys,size_t size,enum kvm_pgtable_prot prot)56 static void *__pkvm_linear_map_early(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot)
57 {
58 	void *addr = NULL;
59 	int ret;
60 
61 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
62 		return NULL;
63 
64 	addr = __hyp_va(phys);
65 	ret = pkvm_create_mappings(addr, addr + size, prot);
66 	if (ret)
67 		addr = NULL;
68 	else
69 		atomic_add(size, &early_lm_pages);
70 
71 	return addr;
72 }
73 
__pkvm_linear_unmap_early(void * addr,size_t size)74 static void __pkvm_linear_unmap_early(void *addr, size_t size)
75 {
76 	pkvm_remove_mappings(addr, addr + size);
77 	atomic_sub(size, &early_lm_pages);
78 }
79 
__pkvm_close_module_registration(void)80 void __pkvm_close_module_registration(void)
81 {
82 	/*
83 	 * Page ownership tracking might go out of sync if there are stale
84 	 * entries in pKVM's linear map range, so they must really be gone by
85 	 * now.
86 	 */
87 	WARN_ON_ONCE(atomic_read(&early_lm_pages));
88 
89 	/*
90 	 * Nothing else to do, module loading HVCs are only accessible before
91 	 * deprivilege
92 	 */
93 }
94 
tracing_mod_hyp_printk(u8 fmt_id,u64 a,u64 b,u64 c,u64 d)95 static void tracing_mod_hyp_printk(u8 fmt_id, u64 a, u64 b, u64 c, u64 d)
96 {
97 #ifdef CONFIG_TRACING
98 	struct trace_hyp_format___hyp_printk *entry;
99 	size_t length = sizeof(*entry);
100 
101 	if (!atomic_read(&__hyp_printk_enabled))
102 		return;
103 
104 	entry = tracing_reserve_entry(length);
105 	if (!entry)
106 		return;
107 	entry->hdr.id = hyp_event_id___hyp_printk.id;
108 	entry->fmt_id = fmt_id;
109 	entry->a = a;
110 	entry->b = b;
111 	entry->c = c;
112 	entry->d = d;
113 	tracing_commit_entry();
114 #endif
115 }
116 
host_stage2_enable_lazy_pte(u64 pfn,u64 nr_pages)117 static int host_stage2_enable_lazy_pte(u64 pfn, u64 nr_pages)
118 {
119 	/*
120 	 * Deprecating the lazy PTE functionality as now the
121 	 * host can unmap on FF-A lend.
122 	 */
123 	WARN_ON(1);
124 
125 	return -EPERM;
126 }
127 
host_stage2_disable_lazy_pte(u64 pfn,u64 nr_pages)128 static int host_stage2_disable_lazy_pte(u64 pfn, u64 nr_pages)
129 {
130 	WARN_ON(1);
131 
132 	return -EPERM;
133 }
134 
__hyp_smp_processor_id(void)135 static int __hyp_smp_processor_id(void)
136 {
137 	return hyp_smp_processor_id();
138 }
139 
140 #define MAX_MOD_HANDLERS 16
141 
142 enum mod_handler_type {
143 	HOST_FAULT_HANDLER = 0,
144 	HOST_SMC_HANDLER,
145 	GUEST_SMC_HANDLER,
146 	NUM_MOD_HANDLER_TYPES,
147 };
148 
149 static void *mod_handlers[NUM_MOD_HANDLER_TYPES][MAX_MOD_HANDLERS];
150 
mod_handler_register(enum mod_handler_type type,void * handler)151 static int mod_handler_register(enum mod_handler_type type, void *handler)
152 {
153 	int i;
154 
155 	for (i = 0; i < MAX_MOD_HANDLERS; i++) {
156 		if (!cmpxchg64_release(&mod_handlers[type][i], NULL, handler))
157 			return 0;
158 	}
159 
160 	return -EBUSY;
161 }
162 
__get_mod_handler(enum mod_handler_type type,int i)163 static void *__get_mod_handler(enum mod_handler_type type, int i)
164 {
165 	if (WARN_ON(type >= NUM_MOD_HANDLER_TYPES))
166 		return NULL;
167 
168 	if (i >= MAX_MOD_HANDLERS)
169 		return NULL;
170 
171 	i = array_index_nospec(i, MAX_MOD_HANDLERS);
172 
173 	return smp_load_acquire(&mod_handlers[type][i]);
174 }
175 
176 #define for_each_mod_handler(type, handler, i)					\
177 	for ((i) = 0, handler = (typeof(handler))__get_mod_handler(type, 0);	\
178 	     handler;								\
179 	     handler = (typeof(handler))__get_mod_handler(type, ++(i)))
180 
181 static int
__register_host_perm_fault_handler(int (* cb)(struct user_pt_regs * regs,u64 esr,u64 addr))182 __register_host_perm_fault_handler(int (*cb)(struct user_pt_regs *regs, u64 esr, u64 addr))
183 {
184 	return mod_handler_register(HOST_FAULT_HANDLER, cb);
185 }
186 
__register_host_smc_handler(bool (* cb)(struct user_pt_regs *))187 static int __register_host_smc_handler(bool (*cb)(struct user_pt_regs *))
188 {
189 	return mod_handler_register(HOST_SMC_HANDLER, cb);
190 }
191 
__register_guest_smc_handler(bool (* cb)(struct arm_smccc_1_2_regs * regs,struct arm_smccc_1_2_regs * res,pkvm_handle_t handle))192 static int __register_guest_smc_handler(bool (*cb)(struct arm_smccc_1_2_regs *regs,
193 						   struct arm_smccc_1_2_regs *res,
194 						   pkvm_handle_t handle))
195 {
196 	return mod_handler_register(GUEST_SMC_HANDLER, cb);
197 }
198 
module_handle_host_perm_fault(struct user_pt_regs * regs,u64 esr,u64 addr)199 bool module_handle_host_perm_fault(struct user_pt_regs *regs, u64 esr, u64 addr)
200 {
201 	int (*cb)(struct user_pt_regs *regs, u64 esr, u64 addr);
202 	int i;
203 
204 	for_each_mod_handler(HOST_FAULT_HANDLER, cb, i) {
205 		if (!cb(regs, esr, addr))
206 			return true;
207 	}
208 
209 	return false;
210 }
211 
module_handle_host_smc(struct user_pt_regs * regs)212 bool module_handle_host_smc(struct user_pt_regs *regs)
213 {
214 	bool (*cb)(struct user_pt_regs *regs);
215 	int i;
216 
217 	for_each_mod_handler(HOST_SMC_HANDLER, cb, i) {
218 		if (cb(regs))
219 			return true;
220 	}
221 
222 	return false;
223 }
224 
module_handle_guest_smc(struct arm_smccc_1_2_regs * regs,struct arm_smccc_1_2_regs * res,pkvm_handle_t handle)225 bool module_handle_guest_smc(struct arm_smccc_1_2_regs *regs, struct arm_smccc_1_2_regs *res,
226 			     pkvm_handle_t handle)
227 {
228 	bool (*cb)(struct arm_smccc_1_2_regs *regs, struct arm_smccc_1_2_regs *res,
229 		   pkvm_handle_t handle);
230 	int i;
231 
232 	for_each_mod_handler(GUEST_SMC_HANDLER, cb, i) {
233 		if (cb(regs, res, handle))
234 			return true;
235 	}
236 
237 	return false;
238 }
239 
240 static const struct pkvm_module_trng_ops *module_guest_trng_ops;
241 
__register_guest_trng_ops(const struct pkvm_module_trng_ops * ops)242 static int __register_guest_trng_ops(const struct pkvm_module_trng_ops *ops)
243 {
244 	if (!ops->trng_uuid || !ops->trng_rnd64)
245 		return -EINVAL;
246 
247 	if (cmpxchg64_relaxed(&module_guest_trng_ops, NULL, ops))
248 		return -EBUSY;
249 
250 	return 0;
251 }
252 
module_get_guest_trng_uuid(void)253 const uuid_t *module_get_guest_trng_uuid(void)
254 {
255 	const struct pkvm_module_trng_ops *ops;
256 
257 	ops = READ_ONCE(module_guest_trng_ops);
258 	if (!ops)
259 		return NULL;
260 
261 	return ops->trng_uuid;
262 }
263 
264 
module_get_guest_trng_rng(u64 * entropy,int nbits)265 u64 module_get_guest_trng_rng(u64 *entropy, int nbits)
266 {
267 	const struct pkvm_module_trng_ops *ops;
268 
269 	ops = READ_ONCE(module_guest_trng_ops);
270 	if (!ops)
271 		return SMCCC_RET_NOT_SUPPORTED;
272 
273 	return ops->trng_rnd64(entropy, nbits);
274 }
275 
276 const struct pkvm_module_ops module_ops = {
277 	.create_private_mapping = __pkvm_create_private_mapping,
278 	.alloc_module_va = __pkvm_alloc_module_va,
279 	.map_module_page = __pkvm_map_module_page,
280 	.register_serial_driver = __pkvm_register_serial_driver,
281 	.putc = hyp_putc,
282 	.puts = hyp_puts,
283 	.putx64 = hyp_putx64,
284 	.fixmap_map = hyp_fixmap_map,
285 	.fixmap_unmap = hyp_fixmap_unmap,
286 	.fixblock_map = hyp_fixblock_map,
287 	.fixblock_unmap = hyp_fixblock_unmap,
288 	.linear_map_early = __pkvm_linear_map_early,
289 	.linear_unmap_early = __pkvm_linear_unmap_early,
290 	.flush_dcache_to_poc = __kvm_flush_dcache_to_poc,
291 	.update_hcr_el2 = __update_hcr_el2,
292 	.update_hfgwtr_el2 = __update_hfgwtr_el2,
293 	.register_host_perm_fault_handler = __register_host_perm_fault_handler,
294 	.host_stage2_mod_prot = module_change_host_page_prot,
295 	.host_stage2_get_leaf = host_stage2_get_leaf,
296 	.host_stage2_enable_lazy_pte = host_stage2_enable_lazy_pte,
297 	.host_stage2_disable_lazy_pte = host_stage2_disable_lazy_pte,
298 	.register_host_smc_handler = __register_host_smc_handler,
299 	.register_guest_smc_handler = __register_guest_smc_handler,
300 	.register_default_trap_handler = __pkvm_register_default_trap_handler,
301 	.register_illegal_abt_notifier = __pkvm_register_illegal_abt_notifier,
302 	.register_psci_notifier = __pkvm_register_psci_notifier,
303 	.register_hyp_panic_notifier = __pkvm_register_hyp_panic_notifier,
304 	.register_unmask_serror = __pkvm_register_unmask_serror,
305 	.host_donate_hyp = ___pkvm_host_donate_hyp,
306 	.host_donate_hyp_prot = ___pkvm_host_donate_hyp_prot,
307 	.host_donate_sglist_hyp = __pkvm_host_donate_sglist_hyp,
308 	.hyp_donate_host = __pkvm_hyp_donate_host,
309 	.host_share_hyp = __pkvm_host_share_hyp,
310 	.host_unshare_hyp = __pkvm_host_unshare_hyp,
311 	.pin_shared_mem = hyp_pin_shared_mem,
312 	.unpin_shared_mem = hyp_unpin_shared_mem,
313 	.memcpy = __pkvm_module_memcpy,
314 	.memset = __pkvm_module_memset,
315 	.hyp_pa = hyp_virt_to_phys,
316 	.hyp_va = hyp_phys_to_virt,
317 	.kern_hyp_va = __kern_hyp_va,
318 	.tracing_reserve_entry = tracing_reserve_entry,
319 	.tracing_commit_entry = tracing_commit_entry,
320 	.tracing_mod_hyp_printk = tracing_mod_hyp_printk,
321 	.hyp_alloc = hyp_alloc,
322 	.hyp_alloc_errno = hyp_alloc_errno,
323 	.hyp_free = hyp_free,
324 	.hyp_alloc_missing_donations = hyp_alloc_missing_donations,
325 	.iommu_donate_pages = kvm_iommu_donate_pages,
326 	.iommu_reclaim_pages = kvm_iommu_reclaim_pages,
327 	.iommu_init_device = kvm_iommu_init_device,
328 	.udelay = pkvm_udelay,
329 	.iommu_iotlb_gather_add_page = kvm_iommu_iotlb_gather_add_page,
330 	.pkvm_unuse_dma = iommu_pkvm_unuse_dma,
331 #ifdef CONFIG_LIST_HARDENED
332 	.list_add_valid_or_report = __list_add_valid_or_report,
333 	.list_del_entry_valid_or_report = __list_del_entry_valid_or_report,
334 #endif
335 	.iommu_snapshot_host_stage2 = kvm_iommu_snapshot_host_stage2,
336 	.iommu_donate_pages_atomic = kvm_iommu_donate_pages_atomic,
337 	.iommu_reclaim_pages_atomic = kvm_iommu_reclaim_pages_atomic,
338 	.hyp_smp_processor_id = __hyp_smp_processor_id,
339 	.device_register_reset = pkvm_device_register_reset,
340 	.register_guest_trng_ops = __register_guest_trng_ops,
341 };
342 
pkvm_module_hyp_va(struct pkvm_el2_module * mod,void * kern_va)343 static void *pkvm_module_hyp_va(struct pkvm_el2_module *mod, void *kern_va)
344 {
345 	return kern_va - mod->sections.start + mod->hyp_va;
346 }
347 
__pkvm_init_module(void * host_mod)348 int __pkvm_init_module(void *host_mod)
349 {
350 	int (*do_module_init)(const struct pkvm_module_ops *ops);
351 	struct pkvm_el2_module *mod = kern_hyp_va(host_mod);
352 	void *event_ids, *funcs, *funcs_end, *ftrace_tramp;
353 	size_t hyp_kern_offset;
354 
355 	event_ids = pkvm_module_hyp_va(mod, mod->event_ids.start);
356 	funcs = pkvm_module_hyp_va(mod, mod->patchable_function_entries.start);
357 	funcs_end = pkvm_module_hyp_va(mod, mod->patchable_function_entries.end);
358 	/* see module.lds.h */
359 	ftrace_tramp = pkvm_module_hyp_va(mod, mod->text.end) - 20;
360 
361 	hyp_kern_offset = mod->sections.start - mod->hyp_va;
362 
363 	register_hyp_mod_events(event_ids, mod->nr_hyp_events,
364 				funcs, funcs_end, ftrace_tramp, hyp_kern_offset);
365 
366 	do_module_init = pkvm_module_hyp_va(mod, (void *)mod->init);
367 
368 	return do_module_init(&module_ops);
369 }
370 
371 #define MAX_DYNAMIC_HCALLS 128
372 
373 atomic_t num_dynamic_hcalls = ATOMIC_INIT(0);
374 DEFINE_HYP_SPINLOCK(dyn_hcall_lock);
375 
376 static dyn_hcall_t host_dynamic_hcalls[MAX_DYNAMIC_HCALLS];
377 
handle_host_dynamic_hcall(struct user_pt_regs * regs,int id)378 int handle_host_dynamic_hcall(struct user_pt_regs *regs, int id)
379 {
380 	dyn_hcall_t hfn;
381 	int dyn_id;
382 
383 	/*
384 	 * TODO: static key to protect when no dynamic hcall is registered?
385 	 */
386 
387 	dyn_id = id - __KVM_HOST_SMCCC_FUNC___dynamic_hcalls;
388 	if (dyn_id < 0)
389 		return HCALL_UNHANDLED;
390 
391 	/*
392 	 * Order access to num_dynamic_hcalls and host_dynamic_hcalls. Paired
393 	 * with __pkvm_register_hcall().
394 	 */
395 	if (dyn_id >= atomic_read_acquire(&num_dynamic_hcalls))
396 		return HCALL_UNHANDLED;
397 
398 	hfn = READ_ONCE(host_dynamic_hcalls[dyn_id]);
399 	if (!hfn)
400 		return HCALL_UNHANDLED;
401 
402 	hfn(regs);
403 
404 	return HCALL_HANDLED;
405 }
406 
__pkvm_register_hcall(unsigned long hvn_hyp_va)407 int __pkvm_register_hcall(unsigned long hvn_hyp_va)
408 {
409 	dyn_hcall_t hfn = (void *)hvn_hyp_va;
410 	int reserved_id, ret;
411 
412 	assert_in_mod_range(hvn_hyp_va);
413 
414 	hyp_spin_lock(&dyn_hcall_lock);
415 
416 	reserved_id = atomic_read(&num_dynamic_hcalls);
417 
418 	if (reserved_id >= MAX_DYNAMIC_HCALLS) {
419 		ret = -ENOMEM;
420 		goto err_hcall_unlock;
421 	}
422 
423 	WRITE_ONCE(host_dynamic_hcalls[reserved_id], hfn);
424 
425 	/*
426 	 * Order access to num_dynamic_hcalls and host_dynamic_hcalls. Paired
427 	 * with handle_host_dynamic_hcall.
428 	 */
429 	atomic_set_release(&num_dynamic_hcalls, reserved_id + 1);
430 
431 	ret = reserved_id + __KVM_HOST_SMCCC_FUNC___dynamic_hcalls;
432 err_hcall_unlock:
433 	hyp_spin_unlock(&dyn_hcall_lock);
434 
435 	return ret;
436 };
437