• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Google LLC
4  */
5 
6 #include <linux/glob.h>
7 #include <linux/rcupdate.h>
8 #include <linux/tracefs.h>
9 
10 #include <asm/kvm_host.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/patching.h>
13 #include <asm/setup.h>
14 
15 #include "hyp_trace.h"
16 
17 static const char *hyp_printk_fmt_from_id(u8 fmt_id);
18 
19 #include <asm/kvm_define_hypevents.h>
20 
21 struct hyp_table {
22 	void		*start;
23 	unsigned long	nr_entries;
24 };
25 
26 struct hyp_mod_tables {
27 	struct hyp_table	*tables;
28 	unsigned long		nr_tables;
29 };
30 
31 #define nr_entries(__start, __stop) \
32 	(((unsigned long)__stop - (unsigned long)__start) / sizeof(*__start))
33 
hyp_table_add(struct hyp_mod_tables * mod_tables,void * start,unsigned long nr_entries)34 static int hyp_table_add(struct hyp_mod_tables *mod_tables, void *start,
35 			 unsigned long nr_entries)
36 {
37 	struct hyp_table *new, *old;
38 	int i;
39 
40 	new = kmalloc_array(mod_tables->nr_tables + 1, sizeof(*new), GFP_KERNEL);
41 	if (!new)
42 		return -ENOMEM;
43 
44 	for (i = 0; i < mod_tables->nr_tables; i++) {
45 		new[i].start = mod_tables->tables[i].start;
46 		new[i].nr_entries = mod_tables->tables[i].nr_entries;
47 	}
48 	new[i].start = start;
49 	new[i].nr_entries = nr_entries;
50 
51 	old = rcu_replace_pointer(mod_tables->tables, new, true);
52 	synchronize_rcu();
53 	mod_tables->nr_tables++;
54 	kfree(old);
55 
56 	return 0;
57 }
58 
hyp_table_entry(struct hyp_mod_tables * mod_tables,size_t entry_size,unsigned long id)59 static void *hyp_table_entry(struct hyp_mod_tables *mod_tables,
60 			     size_t entry_size, unsigned long id)
61 {
62 	struct hyp_table *table;
63 	void *entry = NULL;
64 
65 	rcu_read_lock();
66 	table = rcu_dereference(mod_tables->tables);
67 
68 	for (int i = 0; i < mod_tables->nr_tables; i++) {
69 		if (table->nr_entries <= id) {
70 			id -= table->nr_entries;
71 			table++;
72 			continue;
73 		}
74 
75 		entry = (void *)((char *)table->start + (id * entry_size));
76 		break;
77 	}
78 	rcu_read_unlock();
79 
80 	return entry;
81 }
82 
83 extern struct hyp_printk_fmt __hyp_printk_fmts_start[];
84 extern struct hyp_printk_fmt __hyp_printk_fmts_end[];
85 
86 static struct hyp_mod_tables mod_printk_fmt_tables;
87 static unsigned long total_printk_fmts;
88 
hyp_printk_fmt_from_id(u8 fmt_id)89 static const char *hyp_printk_fmt_from_id(u8 fmt_id)
90 {
91 	u8 nr_fmts = nr_entries(__hyp_printk_fmts_start, __hyp_printk_fmts_end);
92 	struct hyp_printk_fmt *fmt = NULL;
93 
94 	if (fmt_id < nr_fmts)
95 		return (__hyp_printk_fmts_start + fmt_id)->fmt;
96 
97 	fmt_id -= nr_fmts;
98 
99 	fmt = hyp_table_entry(&mod_printk_fmt_tables, sizeof(*fmt), fmt_id);
100 
101 	return fmt ? fmt->fmt : "Unknown Format";
102 }
103 
104 #ifdef CONFIG_PKVM_FTRACE
105 extern unsigned long __hyp_patchable_function_entries_start[];
106 extern unsigned long __hyp_patchable_function_entries_end[];
107 extern unsigned long kvm_nvhe_sym(__hyp_text_start_kern);
108 
hyp_ftrace_init_lr_ins(unsigned long addr)109 static int hyp_ftrace_init_lr_ins(unsigned long addr)
110 {
111 	u32 old, new;
112 
113 	if (aarch64_insn_read((void *)addr, &old))
114 		return -EFAULT;
115 
116 	if (old != aarch64_insn_gen_nop())
117 		return -EINVAL;
118 
119 	new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
120 					AARCH64_INSN_REG_LR,
121 					AARCH64_INSN_VARIANT_64BIT);
122 	if (aarch64_insn_patch_text_nosync((void *)addr, new))
123 		return -EPERM;
124 
125 	return 0;
126 }
127 
128 static unsigned long *hyp_ftrace_funcs_pg;
129 static char hyp_ftrace_filter_early[128];
130 
setup_hyp_ftrace_filter_early_early(char * str)131 static __init int setup_hyp_ftrace_filter_early_early(char *str)
132 {
133 	strscpy(hyp_ftrace_filter_early, str, sizeof(hyp_ftrace_filter_early));
134 
135 	return 1;
136 }
137 __setup("hyp_ftrace_filter=", setup_hyp_ftrace_filter_early_early);
138 
139 DEFINE_MUTEX(hyp_ftrace_funcs_lock);
140 
141 /* Instructions are word-aligned, let's repurpose the LSB */
142 #define func_enable(func)	((func) | 0x1)
143 #define func_disable(func)	((func) & (~BIT(0)))
144 #define func_is_enabled(func)	((func) & BIT(0))
145 
146 /* The last 8-bytes store are a pointer to the next page */
147 #define funcs_pg_end(pg) ((typeof(pg))((void *)(pg) + PAGE_SIZE - 8))
148 
149 #define for_each_funcs_pg(pg) \
150 	for (pg = hyp_ftrace_funcs_pg; pg; pg = (typeof(pg))*((unsigned long *)funcs_pg_end(pg)))
151 
152 #define for_each_func(pg, func) \
153 	for (func = pg; (void *)func < funcs_pg_end(pg) && *func; func++)
154 
hyp_ftrace_func_add(unsigned long entry,bool enable)155 static int hyp_ftrace_func_add(unsigned long entry, bool enable)
156 {
157 	static void *funcs_pg_end;
158 	static void *entry_addr;
159 
160 	if (!entry_addr) {
161 		entry_addr = hyp_ftrace_funcs_pg;
162 		funcs_pg_end = funcs_pg_end(entry_addr);
163 	}
164 
165 	if (entry_addr >= funcs_pg_end) {
166 		unsigned long new_func_pg;
167 
168 		new_func_pg = __get_free_page(GFP_KERNEL);
169 		if (!new_func_pg)
170 			return -ENOMEM;
171 
172 		memset((char *)new_func_pg, 0, PAGE_SIZE);
173 
174 		*(unsigned long *)entry_addr = new_func_pg;
175 		entry_addr = (unsigned long *)new_func_pg;
176 		funcs_pg_end = funcs_pg_end(entry_addr);
177 	}
178 
179 	*(unsigned long *)entry_addr = enable ? func_enable(entry) : entry;
180 	entry_addr += sizeof(entry);
181 
182 	return 0;
183 }
184 
hyp_ftrace_func_match(unsigned long kern_addr,const char * regex)185 static bool hyp_ftrace_func_match(unsigned long kern_addr, const char *regex)
186 {
187 	char sym[KSYM_SYMBOL_LEN];
188 	char *modname;
189 
190 	if (!strlen(regex))
191 		return true;
192 
193 	kallsyms_lookup(kern_addr, NULL, NULL, &modname, sym);
194 
195 	return glob_match(regex, sym);
196 }
197 
hyp_ftrace_funcs_apply_filter(const char * filter,bool enable)198 static int hyp_ftrace_funcs_apply_filter(const char *filter, bool enable)
199 {
200 	bool match = false;
201 	void *func_pg;
202 
203 	for_each_funcs_pg(func_pg) {
204 		unsigned long *func;
205 
206 		for_each_func(func_pg, func) {
207 			if (hyp_ftrace_func_match(func_disable(*func), filter)) {
208 				*func = enable ? func_enable(*func): func_disable(*func);
209 				match = true;
210 			}
211 		}
212 	}
213 
214 	return match ? 0 : -EINVAL;
215 }
216 
hyp_ftrace_filter_show(struct seq_file * m,void * v)217 static int hyp_ftrace_filter_show(struct seq_file *m, void *v)
218 {
219 	void *func_pg;
220 
221 	for_each_funcs_pg(func_pg) {
222 		char sym[KSYM_SYMBOL_LEN];
223 		unsigned long *func;
224 		char *modname;
225 
226 		for_each_func(func_pg, func) {
227 			if (!func_is_enabled(*func))
228 				continue;
229 
230 			kallsyms_lookup(func_disable(*func), NULL, NULL,
231 					&modname, sym);
232 			seq_printf(m, "%s\n", sym);
233 		}
234 	}
235 
236 	return 0;
237 }
238 
hyp_ftrace_sync(bool force_enable,bool force_sync)239 static void hyp_ftrace_sync(bool force_enable, bool force_sync)
240 {
241 	static bool enabled;
242 	bool enable;
243 	void *func_pg;
244 
245 	lockdep_assert_held(&hyp_ftrace_funcs_lock);
246 
247 	enable = *hyp_event_func.enabled || *hyp_event_func_ret.enabled || force_enable;
248 	force_sync = force_sync && enable;
249 
250 	if (!force_sync && enable == enabled)
251 		return;
252 
253 	if (!enable) {
254 		kvm_call_hyp_nvhe(__pkvm_disable_ftrace);
255 		enabled = false;
256 		return;
257 	}
258 
259 	for_each_funcs_pg(func_pg)
260 		kvm_call_hyp_nvhe(__pkvm_sync_ftrace, func_pg);
261 
262 	enabled = true;
263 }
264 
265 static ssize_t
hyp_ftrace_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)266 hyp_ftrace_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
267 			loff_t *ppos)
268 {
269 	struct seq_file *m = filp->private_data;
270 	bool enable = (bool)m->private;
271 	char regex[128];
272 	int ret;
273 
274 	if (cnt >= (sizeof(regex) - 1))
275 		return -E2BIG;
276 
277 	ret = strncpy_from_user(regex, ubuf, sizeof(regex));
278 	if (ret < 0)
279 		return ret;
280 
281 	regex[cnt - 1] = '\0';
282 
283 	ret = hyp_ftrace_funcs_apply_filter(regex, enable);
284 	if (ret)
285 		return ret;
286 
287 	hyp_ftrace_sync(false, true);
288 
289 	return cnt;
290 }
291 
hyp_ftrace_filter_open(struct inode * inode,struct file * file)292 static int hyp_ftrace_filter_open(struct inode *inode, struct file *file)
293 {
294 	int ret = single_open(file, hyp_ftrace_filter_show, inode->i_private);
295 
296 	if (!ret)
297 		mutex_lock(&hyp_ftrace_funcs_lock);
298 
299 	return ret;
300 }
301 
hyp_ftrace_filter_release(struct inode * inode,struct file * file)302 static int hyp_ftrace_filter_release(struct inode *inode, struct file *file)
303 {
304 	mutex_unlock(&hyp_ftrace_funcs_lock);
305 
306 	return single_release(inode, file);
307 }
308 
309 static const struct file_operations hyp_ftrace_filter_fops = {
310 	.open		= hyp_ftrace_filter_open,
311 	.read		= seq_read,
312 	.write		= hyp_ftrace_filter_write,
313 	.llseek		= seq_lseek,
314 	.release	= hyp_ftrace_filter_release,
315 };
316 
317 static const struct file_operations hyp_ftrace_notrace_fops = {
318 	.open		= hyp_ftrace_filter_open,
319 	.write		= hyp_ftrace_filter_write,
320 	.release	= hyp_ftrace_filter_release,
321 };
322 
323 #define HYP_FTRACE_SKIP_FUNC (-1ULL)
324 
hyp_ftrace_funcs_init(unsigned long * funcs,unsigned long * funcs_end,unsigned long hyp_kern_offset,bool clear)325 static void hyp_ftrace_funcs_init(unsigned long *funcs, unsigned long *funcs_end,
326 				  unsigned long hyp_kern_offset, bool clear)
327 {
328 	unsigned long *func;
329 	int ret;
330 
331 	func = funcs;
332 	while (func < funcs_end) {
333 		unsigned long kern_addr = *func + hyp_kern_offset;
334 		char sym[KSYM_SYMBOL_LEN];
335 		bool enable;
336 
337 		if (!*func)
338 			break;
339 
340 		if (clear)
341 			goto skip;
342 
343 		sprint_symbol_no_offset(sym, kern_addr);
344 		if (!strncmp(sym, "__kvm_nvhe_$", 12))
345 			goto skip;
346 
347 		ret = hyp_ftrace_init_lr_ins(kern_addr);
348 		if (ret) {
349 			pr_warn("Failed to patch %ps (%d)\n", (void *)kern_addr, ret);
350 			goto skip;
351 		}
352 
353 		enable = hyp_ftrace_func_match(kern_addr, hyp_ftrace_filter_early);
354 		if (hyp_ftrace_func_add(kern_addr, enable))
355 			goto skip;
356 
357 		/*
358 		 * Tell the hypervisor to enable the function as early as
359 		 * possible
360 		 */
361 		if (enable)
362 			*func = func_enable(*func);
363 
364 		goto next;
365 
366 skip:
367 		*func = HYP_FTRACE_SKIP_FUNC;
368 next:
369 		func++;
370 	}
371 }
372 
hyp_ftrace_init(void)373 static void hyp_ftrace_init(void)
374 {
375 	unsigned long hyp_base;
376 
377 	hyp_ftrace_funcs_pg = (unsigned long *)__get_free_page(GFP_KERNEL);
378 	if (!hyp_ftrace_funcs_pg)
379 		return;
380 
381 	memset(hyp_ftrace_funcs_pg, 0, PAGE_SIZE);
382 
383 	hyp_base = (unsigned long)kern_hyp_va(lm_alias((unsigned long)__hyp_text_start));
384 
385 	hyp_ftrace_funcs_init(__hyp_patchable_function_entries_start,
386 			      __hyp_patchable_function_entries_end,
387 			      (unsigned long)__hyp_text_start - hyp_base, false);
388 
389 	/* For the hypervisor to compute its hyp_kern_offset */
390 	kvm_nvhe_sym(__hyp_text_start_kern) = (unsigned long)__hyp_text_start;
391 }
392 
393 extern void kvm_nvhe_sym(__hyp_ftrace_tramp)(void);
394 
hyp_ftrace_init_mod_tramp(struct pkvm_el2_module * mod)395 static int hyp_ftrace_init_mod_tramp(struct pkvm_el2_module *mod)
396 {
397 	u64 tramp_dst = (u64)kern_hyp_va(lm_alias((unsigned long)kvm_nvhe_sym(__hyp_ftrace_tramp)));
398 	enum aarch64_insn_register reg = AARCH64_INSN_REG_16;
399 	void *tramp = mod->text.end - 20; /* see module.lds.h */
400 	static u32 insns[5];
401 	u32 *insn = insns;
402 	int shift = 0;
403 
404 	/*
405 	 * adrp is not enough for that massive jump between the private and
406 	 * linear, it's not a trampoline we need, it's a space shuttle!
407 	 *
408 	 * XXX: Relocate .hyp.text into the private range
409 	 */
410 
411 	if (*insn)
412 		goto write;
413 
414 	while (shift < 64) {
415 		u64 mask = GENMASK(shift + 15, shift);
416 
417 		*insn = cpu_to_le32(
418 			aarch64_insn_gen_movewide(
419 				AARCH64_INSN_REG_16,
420 				(tramp_dst & mask) >> shift,
421 				shift,
422 				AARCH64_INSN_VARIANT_64BIT,
423 				shift ? AARCH64_INSN_MOVEWIDE_KEEP : AARCH64_INSN_MOVEWIDE_ZERO));
424 		shift += 16;
425 		insn++;
426 	}
427 
428 	*insn = cpu_to_le32(aarch64_insn_gen_branch_reg(reg, AARCH64_INSN_BRANCH_NOLINK));
429 
430 write:
431 	return aarch64_insn_copy((void *)tramp, insns, sizeof(insns))
432 		? 0 : -EINVAL;
433 }
434 
hyp_ftrace_init_mod(struct pkvm_el2_module * mod)435 static void hyp_ftrace_init_mod(struct pkvm_el2_module *mod)
436 {
437 	/* Install a trampoline to reach __hyp_ftrace_tramp */
438 	int ret = hyp_ftrace_init_mod_tramp(mod);
439 
440 	if (ret)
441 		pr_warn("Failed to install trampoline for hyp ftrace\n");
442 
443 	mutex_lock(&hyp_ftrace_funcs_lock);
444 
445 	hyp_ftrace_funcs_init(mod->patchable_function_entries.start,
446 			      mod->patchable_function_entries.end,
447 			      mod->sections.start - mod->hyp_va,
448 			      ret);
449 
450 	mutex_unlock(&hyp_ftrace_funcs_lock);
451 
452 	sync_icache_aliases((unsigned long)mod->text.start,
453 			    (unsigned long)mod->text.end);
454 }
455 
enable_func_hyp_event(struct hyp_event * event,bool enable)456 static int enable_func_hyp_event(struct hyp_event *event, bool enable)
457 {
458 	unsigned short id = event->id;
459 	int ret = 1;
460 
461 	if (event != &hyp_event_func && event != &hyp_event_func_ret)
462 		return 0;
463 
464 	mutex_lock(&hyp_ftrace_funcs_lock);
465 
466 	if (enable == *event->enabled)
467 		goto handled;
468 
469 	if (enable)
470 		hyp_ftrace_sync(true, false);
471 
472 	ret = kvm_call_hyp_nvhe(__pkvm_enable_event, id, enable);
473 	if (ret) {
474 		hyp_ftrace_sync(false, false);
475 		goto handled;
476 	}
477 
478 	*event->enabled = enable;
479 
480 	if (!enable)
481 		hyp_ftrace_sync(false, false);
482 
483 handled:
484 	mutex_unlock(&hyp_ftrace_funcs_lock);
485 
486 	return ret;
487 }
488 #else
hyp_ftrace_init_mod(struct pkvm_el2_module * mod)489 static void hyp_ftrace_init_mod(struct pkvm_el2_module *mod) { }
hyp_ftrace_init(void)490 static void hyp_ftrace_init(void) { }
enable_func_hyp_event(struct hyp_event * event,bool enable)491 static int enable_func_hyp_event(struct hyp_event *event, bool enable)
492 {
493 	return 0;
494 }
495 #endif
496 
497 extern struct hyp_event __hyp_events_start[];
498 extern struct hyp_event __hyp_events_end[];
499 
500 /* hyp_event section used by the hypervisor */
501 extern struct hyp_event_id __hyp_event_ids_start[];
502 extern struct hyp_event_id __hyp_event_ids_end[];
503 
enable_hyp_event(struct hyp_event * event,bool enable)504 static int enable_hyp_event(struct hyp_event *event, bool enable)
505 {
506 	unsigned short id = event->id;
507 	int ret;
508 
509 	ret = enable_func_hyp_event(event, enable);
510 	if (ret)
511 		return ret > 0 ? 0 : ret;
512 
513 	if (enable == *event->enabled)
514 		return 0;
515 
516 	ret = kvm_call_hyp_nvhe(__pkvm_enable_event, id, enable);
517 	if (ret)
518 		return ret;
519 
520 	*event->enabled = enable;
521 
522 	return 0;
523 }
524 
525 static ssize_t
hyp_event_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)526 hyp_event_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
527 {
528 	struct seq_file *seq_file = (struct seq_file *)filp->private_data;
529 	struct hyp_event *evt = (struct hyp_event *)seq_file->private;
530 	bool enabling;
531 	int ret;
532 	char c;
533 
534 	if (!cnt || cnt > 2)
535 		return -EINVAL;
536 
537 	if (get_user(c, ubuf))
538 		return -EFAULT;
539 
540 	switch (c) {
541 	case '1':
542 		enabling = true;
543 		break;
544 	case '0':
545 		enabling = false;
546 		break;
547 	default:
548 		return -EINVAL;
549 	}
550 
551 	ret = enable_hyp_event(evt, enabling);
552 	if (ret)
553 		return ret;
554 
555 	return cnt;
556 }
557 
hyp_event_show(struct seq_file * m,void * v)558 static int hyp_event_show(struct seq_file *m, void *v)
559 {
560 	struct hyp_event *evt = (struct hyp_event *)m->private;
561 
562 	seq_printf(m, "%d\n", *evt->enabled);
563 
564 	return 0;
565 }
566 
hyp_event_open(struct inode * inode,struct file * filp)567 static int hyp_event_open(struct inode *inode, struct file *filp)
568 {
569 	return single_open(filp, hyp_event_show, inode->i_private);
570 }
571 
572 static const struct file_operations hyp_event_fops = {
573 	.open		= hyp_event_open,
574 	.write		= hyp_event_write,
575 	.read		= seq_read,
576 	.llseek		= seq_lseek,
577 	.release	= single_release,
578 };
579 
hyp_event_id_show(struct seq_file * m,void * v)580 static int hyp_event_id_show(struct seq_file *m, void *v)
581 {
582 	struct hyp_event *evt = (struct hyp_event *)m->private;
583 
584 	seq_printf(m, "%d\n", evt->id);
585 
586 	return 0;
587 }
588 
hyp_event_id_open(struct inode * inode,struct file * filp)589 static int hyp_event_id_open(struct inode *inode, struct file *filp)
590 {
591 	return single_open(filp, hyp_event_id_show, inode->i_private);
592 }
593 
594 static const struct file_operations hyp_event_id_fops = {
595 	.open = hyp_event_id_open,
596 	.read = seq_read,
597 	.llseek = seq_lseek,
598 	.release = single_release,
599 };
600 
hyp_event_format_show(struct seq_file * m,void * v)601 static int hyp_event_format_show(struct seq_file *m, void *v)
602 {
603 	struct hyp_event *evt = (struct hyp_event *)m->private;
604 	struct trace_event_fields *field;
605 	unsigned int offset = sizeof(struct hyp_entry_hdr);
606 
607 	seq_printf(m, "name: %s\n", evt->name);
608 	seq_printf(m, "ID: %d\n", evt->id);
609 	seq_puts(m, "format:\n\tfield:unsigned short common_type;\toffset:0;\tsize:2;\tsigned:0;\n");
610 	seq_puts(m, "\n");
611 
612 	field = &evt->fields[0];
613 	while (field->name) {
614 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
615 			  field->type, field->name, offset, field->size,
616 			  !!field->is_signed);
617 		offset += field->size;
618 		field++;
619 	}
620 
621 	if (field != &evt->fields[0])
622 		seq_puts(m, "\n");
623 
624 	seq_printf(m, "print fmt: %s\n", evt->print_fmt);
625 
626 	return 0;
627 }
628 
hyp_event_format_open(struct inode * inode,struct file * file)629 static int hyp_event_format_open(struct inode *inode, struct file *file)
630 {
631 	return single_open(file, hyp_event_format_show, inode->i_private);
632 }
633 
634 static const struct file_operations hyp_event_format_fops = {
635 	.open = hyp_event_format_open,
636 	.read = seq_read,
637 	.llseek = seq_lseek,
638 	.release = single_release,
639 };
640 
hyp_header_page_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)641 static ssize_t hyp_header_page_read(struct file *filp, char __user *ubuf,
642 				   size_t cnt, loff_t *ppos)
643 {
644 	struct buffer_data_page field;
645 	struct trace_seq *s;
646 	ssize_t r;
647 
648 	s = kmalloc(sizeof(*s), GFP_KERNEL);
649 	if (!s)
650 		return -ENOMEM;
651 
652 	trace_seq_init(s);
653 	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
654 			 "offset:0;\tsize:%u;\tsigned:%u;\n",
655 			 (unsigned int)sizeof(field.time_stamp),
656 			 (unsigned int)is_signed_type(u64));
657 
658 	trace_seq_printf(s, "\tfield: local_t commit;\t"
659 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
660 			 (unsigned int)offsetof(typeof(field), commit),
661 			 (unsigned int)sizeof(field.commit),
662 			 (unsigned int)is_signed_type(long));
663 
664 	trace_seq_printf(s, "\tfield: int overwrite;\t"
665 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
666 			 (unsigned int)offsetof(typeof(field), commit),
667 			 1,
668 			 (unsigned int)is_signed_type(long));
669 
670 	trace_seq_printf(s, "\tfield: char data;\t"
671 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
672 			 (unsigned int)offsetof(typeof(field), data),
673 			 (unsigned int)BUF_PAGE_SIZE,
674 			 (unsigned int)is_signed_type(char));
675 
676 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer,
677 				    trace_seq_used(s));
678 	kfree(s);
679 
680 	return r;
681 }
682 
683 static const struct file_operations hyp_header_page_fops = {
684 	.read = hyp_header_page_read,
685 	.llseek = default_llseek,
686 };
687 
688 static struct dentry *event_tracefs;
689 static unsigned int last_event_id;
690 
691 static struct hyp_mod_tables mod_event_tables;
692 
__hyp_trace_find_event_name(const char * name,struct hyp_event * start,struct hyp_event * end)693 struct hyp_event *__hyp_trace_find_event_name(const char *name,
694 					      struct hyp_event *start,
695 					      struct hyp_event *end)
696 {
697 	for (; start < end; start++) {
698 		if (!strncmp(name, start->name, HYP_EVENT_NAME_MAX))
699 			return start;
700 	}
701 
702 	return NULL;
703 }
704 
hyp_trace_find_event_name(const char * name)705 struct hyp_event *hyp_trace_find_event_name(const char *name)
706 {
707 	struct hyp_table *table;
708 	struct hyp_event *event =
709 		__hyp_trace_find_event_name(name, __hyp_events_start,
710 					    __hyp_events_end);
711 
712 	if (event)
713 		return event;
714 
715 	rcu_read_lock();
716 	table = rcu_dereference(mod_event_tables.tables);
717 
718 	for (int i = 0; i < mod_event_tables.nr_tables; i++) {
719 		struct hyp_event *end = (struct hyp_event *)table->start +
720 							    table->nr_entries;
721 
722 		event = __hyp_trace_find_event_name(name, table->start, end);
723 		if (event)
724 			break;
725 	}
726 
727 	rcu_read_unlock();
728 
729 	return event;
730 }
731 
hyp_trace_find_event(int id)732 struct hyp_event *hyp_trace_find_event(int id)
733 {
734 	struct hyp_event *event = __hyp_events_start + id;
735 
736 	if ((unsigned long)event >= (unsigned long)__hyp_events_end) {
737 
738 		id -= nr_entries(__hyp_events_start, __hyp_events_end);
739 
740 		event = hyp_table_entry(&mod_event_tables, sizeof(*event), id);
741 	}
742 
743 	return event;
744 }
745 
746 static char early_events[COMMAND_LINE_SIZE];
747 
setup_hyp_event_early(char * str)748 static __init int setup_hyp_event_early(char *str)
749 {
750 	strscpy(early_events, str, COMMAND_LINE_SIZE);
751 
752 	return 1;
753 }
754 __setup("hyp_event=", setup_hyp_event_early);
755 
hyp_event_early_probe(void)756 bool hyp_event_early_probe(void)
757 {
758 	char *token, *buf = early_events;
759 	bool enabled = false;
760 
761 	while (true) {
762 		token = strsep(&buf, ",");
763 
764 		if (!token)
765 			break;
766 
767 		if (*token) {
768 			struct hyp_event *event;
769 			int ret;
770 
771 			event = hyp_trace_find_event_name(token);
772 			if (event) {
773 				ret = enable_hyp_event(event, true);
774 				if (ret)
775 					pr_warn("Couldn't enable hyp event %s:%d\n",
776 						token, ret);
777 				else
778 					enabled = true;
779 			}
780 		}
781 
782 		if (buf)
783 			*(buf - 1) = ',';
784 	}
785 
786 	return enabled;
787 }
788 
hyp_event_table_init_tracefs(struct hyp_event * event,int nr_events)789 static void hyp_event_table_init_tracefs(struct hyp_event *event, int nr_events)
790 {
791 	struct dentry *event_dir;
792 	int i;
793 
794 	if (!event_tracefs)
795 		return;
796 
797 	for (i = 0; i < nr_events; event++, i++) {
798 		event_dir = tracefs_create_dir(event->name, event_tracefs);
799 		if (!event_dir) {
800 			pr_err("Failed to create events/hypervisor/%s\n", event->name);
801 			continue;
802 		}
803 
804 		tracefs_create_file("enable", 0700, event_dir, (void *)event,
805 				    &hyp_event_fops);
806 		tracefs_create_file("id", 0400, event_dir, (void *)event,
807 				    &hyp_event_id_fops);
808 		tracefs_create_file("format", 0400, event_dir, (void *)event,
809 				    &hyp_event_format_fops);
810 	}
811 }
812 
813 /*
814  * Register hyp events and write their id into the hyp section _hyp_event_ids.
815  */
hyp_event_table_init(struct hyp_event * event,struct hyp_event_id * event_id,int nr_events)816 static int hyp_event_table_init(struct hyp_event *event,
817 				struct hyp_event_id *event_id, int nr_events)
818 {
819 	while (nr_events--) {
820 		/*
821 		 * Both the host and the hypervisor rely on the same hyp event
822 		 * declarations from kvm_hypevents.h. We have then a 1:1
823 		 * mapping.
824 		 */
825 		event->id = event_id->id = last_event_id++;
826 
827 		event++;
828 		event_id++;
829 	}
830 
831 	return 0;
832 }
833 
hyp_trace_init_event_tracefs(struct dentry * parent)834 void hyp_trace_init_event_tracefs(struct dentry *parent)
835 {
836 	int nr_events = nr_entries(__hyp_events_start, __hyp_events_end);
837 
838 #ifdef CONFIG_PKVM_FTRACE
839 	tracefs_create_file("set_ftrace_filter", 0600, parent, (void *)true,
840 			    &hyp_ftrace_filter_fops);
841 	tracefs_create_file("set_ftrace_notrace", 0200, parent, (void *)false,
842 			    &hyp_ftrace_notrace_fops);
843 #endif
844 
845 	parent = tracefs_create_dir("events", parent);
846 	if (!parent) {
847 		pr_err("Failed to create tracefs folder for hyp events\n");
848 		return;
849 	}
850 
851 	tracefs_create_file("header_page", 0400, parent, NULL,
852 			    &hyp_header_page_fops);
853 
854 	event_tracefs = tracefs_create_dir("hypervisor", parent);
855 	if (!event_tracefs) {
856 		pr_err("Failed to create tracefs folder for hyp events\n");
857 		return;
858 	}
859 
860 	hyp_event_table_init_tracefs(__hyp_events_start, nr_events);
861 }
862 
hyp_trace_init_events(void)863 int hyp_trace_init_events(void)
864 {
865 	int nr_events = nr_entries(__hyp_events_start, __hyp_events_end);
866 	int nr_event_ids = nr_entries(__hyp_event_ids_start, __hyp_event_ids_end);
867 	int nr_printk_fmts = nr_entries(__hyp_printk_fmts_start, __hyp_printk_fmts_end);
868 	int ret;
869 
870 	/* __hyp_printk event only supports U8_MAX different formats */
871 	WARN_ON(nr_printk_fmts > U8_MAX);
872 
873 	total_printk_fmts = nr_printk_fmts;
874 
875 	if (WARN(nr_events != nr_event_ids, "Too many trace_hyp_printk()!"))
876 		return -EINVAL;
877 
878 	ret = hyp_event_table_init(__hyp_events_start, __hyp_event_ids_start,
879 				   nr_events);
880 	if (ret)
881 		return ret;
882 
883 	hyp_ftrace_init();
884 
885 	return 0;
886 }
887 
hyp_trace_init_mod_events(struct pkvm_el2_module * mod)888 int hyp_trace_init_mod_events(struct pkvm_el2_module *mod)
889 {
890 	struct hyp_event_id *event_id = mod->event_ids.start;
891 	struct hyp_printk_fmt *fmt = mod->hyp_printk_fmts;
892 	struct hyp_event *event = mod->hyp_events;
893 	size_t nr_events = mod->nr_hyp_events;
894 	size_t nr_fmts = mod->nr_hyp_printk_fmts;
895 	u8 *hyp_printk_fmt_offsets;
896 	int ret;
897 
898 	hyp_ftrace_init_mod(mod);
899 
900 	ret = hyp_event_table_init(event, event_id, nr_events);
901 	if (ret)
902 		return ret;
903 
904 	ret = hyp_table_add(&mod_event_tables, (void *)event, nr_events);
905 	if (ret)
906 		return ret;
907 
908 	hyp_event_table_init_tracefs(event, nr_events);
909 
910 	if (total_printk_fmts + nr_fmts > U8_MAX) {
911 		pr_warn("Too many trace_hyp_printk()!");
912 		return 0;
913 	}
914 
915 	if (WARN_ON(nr_fmts && !event_id))
916 		return 0;
917 
918 	ret = hyp_table_add(&mod_printk_fmt_tables, (void *)fmt, nr_fmts);
919 	if (ret) {
920 		pr_warn("Not enough memory to register trace_hyp_printk()");
921 		return 0;
922 	}
923 
924 	/* format offsets stored after event_ids (see module.lds.S) */
925 	hyp_printk_fmt_offsets = (u8 *)(event_id + nr_events);
926 	memset(hyp_printk_fmt_offsets, total_printk_fmts, nr_fmts);
927 
928 	total_printk_fmts += nr_fmts;
929 
930 	return 0;
931 }
932