1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __ARM64_KVM_HYP_NVHE_TRACE_H
3 #define __ARM64_KVM_HYP_NVHE_TRACE_H
4 #include <asm/kvm_hyptrace.h>
5 #include <asm/kvm_hypevents_defs.h>
6
7 #ifdef CONFIG_TRACING
8 void *tracing_reserve_entry(unsigned long length);
9 void tracing_commit_entry(void);
10 int register_hyp_mod_events(void *event_ids, size_t nr_events,
11 void *funcs, void *funcs_end,
12 void *tramp,
13 unsigned long kern_hyp_offset);
14
15 #define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
16 HYP_EVENT_FORMAT(__name, __struct); \
17 extern atomic_t __name##_enabled; \
18 extern struct hyp_event_id hyp_event_id_##__name; \
19 static inline void trace_##__name(__proto) \
20 { \
21 size_t length = sizeof(struct trace_hyp_format_##__name); \
22 struct trace_hyp_format_##__name *__entry; \
23 \
24 if (!atomic_read(&__name##_enabled)) \
25 return; \
26 __entry = tracing_reserve_entry(length); \
27 if (!__entry) \
28 return; \
29 __entry->hdr.id = hyp_event_id_##__name.id; \
30 __assign \
31 tracing_commit_entry(); \
32 }
33
34 void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc);
35 int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size);
36 void __pkvm_teardown_tracing(void);
37 int __pkvm_enable_tracing(bool enable);
38 int __pkvm_reset_tracing(unsigned int cpu);
39 int __pkvm_swap_reader_tracing(unsigned int cpu);
40 void __pkvm_panic_tracing(void);
41 int __pkvm_enable_event(unsigned short id, bool enable);
42
43 extern struct hyp_printk_fmt __hyp_printk_fmts_start[];
44
45 #ifdef MODULE
46 #define hyp_printk_fmt_to_id(fmt) \
47 ({ \
48 static u8 fmt_id_offset __section(".hyp.printk_fmt_offset") __used; \
49 (struct hyp_printk_fmt *)fmt - __hyp_printk_fmts_start + fmt_id_offset; \
50 })
51 #else
hyp_printk_fmt_to_id(const char * fmt)52 static inline u8 hyp_printk_fmt_to_id(const char *fmt)
53 {
54 return (struct hyp_printk_fmt *)fmt - __hyp_printk_fmts_start;
55 }
56 #endif
57
58 #define __trace_hyp_printk(__fmt, a, b, c, d) \
59 do { \
60 static struct hyp_printk_fmt __used \
61 __section(".hyp.printk_fmts") \
62 ht_fmt = { \
63 .fmt = __fmt \
64 }; \
65 trace___hyp_printk(hyp_printk_fmt_to_id(ht_fmt.fmt), a, b, c, d); \
66 } while (0)
67
68 #define __trace_hyp_printk_0(fmt, arg) \
69 __trace_hyp_printk(fmt, 0, 0, 0, 0)
70 #define __trace_hyp_printk_1(fmt, a) \
71 __trace_hyp_printk(fmt, a, 0, 0, 0)
72 #define __trace_hyp_printk_2(fmt, a, b) \
73 __trace_hyp_printk(fmt, a, b, 0, 0)
74 #define __trace_hyp_printk_3(fmt, a, b, c) \
75 __trace_hyp_printk(fmt, a, b, c, 0)
76 #define __trace_hyp_printk_4(fmt, a, b, c, d) \
77 __trace_hyp_printk(fmt, a, b, c, d)
78
79 #define __trace_hyp_printk_N(fmt, ...) \
80 CONCATENATE(__trace_hyp_printk_, COUNT_ARGS(__VA_ARGS__))(fmt, ##__VA_ARGS__)
81
82 #define trace_hyp_printk(fmt, ...) \
83 __trace_hyp_printk_N(fmt, __VA_ARGS__)
84
85 #ifdef CONFIG_PKVM_FTRACE
86 void hyp_ftrace_setup_core(void);
87 unsigned long *hyp_ftrace_find_host_func(unsigned long host_func,
88 unsigned long *funcs,
89 unsigned long *funcs_end,
90 unsigned long offset_idx);
91 void *hyp_ftrace_sync(unsigned long *func_pg, unsigned long *funcs,
92 unsigned long *funcs_end, unsigned long offset_idx,
93 void *tramp);
94 int hyp_ftrace_setup(unsigned long *funcs, unsigned long *funcs_end,
95 unsigned long hyp_kern_offset, void *tramp);
96 void hyp_ftrace_ret_flush(void);
97 void hyp_ftrace_disable(unsigned long *funcs, unsigned long *funcs_end);
98 int __pkvm_sync_ftrace(unsigned long host_func_pg);
99 int __pkvm_disable_ftrace(void);
100 #else
hyp_ftrace_setup_core(void)101 static inline void hyp_ftrace_setup_core(void) { }
hyp_ftrace_ret_flush(void)102 static inline void hyp_ftrace_ret_flush(void) { }
hyp_ftrace_setup(unsigned long * funcs,unsigned long * funcs_end,unsigned long hyp_kern_offset,void * tramp)103 static inline int hyp_ftrace_setup(unsigned long *funcs, unsigned long *funcs_end,
104 unsigned long hyp_kern_offset, void *tramp) { return 0; }
hyp_ftrace_enable(unsigned long * funcs,unsigned long * funcs_end,bool enable,void * tramp)105 static inline void hyp_ftrace_enable(unsigned long *funcs, unsigned long *funcs_end,
106 bool enable, void *tramp) { }
__pkvm_sync_ftrace(unsigned long host_func_pg)107 static inline int __pkvm_sync_ftrace(unsigned long host_func_pg) { return -EOPNOTSUPP; }
__pkvm_disable_ftrace(void)108 static inline int __pkvm_disable_ftrace(void) { return -EOPNOTSUPP; }
109 #endif /* CONFIG_PKVM_FTRACE */
110 #else /* CONFIG_TRACING */
111 static inline int
register_hyp_mod_events(void * event_ids,size_t nr_events,void * funcs,void * funcs_end,void * tramp,unsigned long kern_hyp_offset)112 register_hyp_mod_events(void *event_ids, size_t nr_events, void *funcs, void *funcs_end,
113 void *tramp, unsigned long kern_hyp_offset) { return 0; }
tracing_reserve_entry(unsigned long length)114 static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
tracing_commit_entry(void)115 static inline void tracing_commit_entry(void) { }
register_hyp_event_ids(void * event_ids,size_t nr_events)116 static inline int register_hyp_event_ids(void *event_ids, size_t nr_events)
117 {
118 return -ENODEV;
119 }
120
121 #define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
122 static inline void trace_##__name(__proto) {}
123
124 static inline
__pkvm_update_clock_tracing(u32 mult,u32 shift,u64 epoch_ns,u64 epoch_cyc)125 void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) { }
__pkvm_load_tracing(unsigned long desc_va,size_t desc_size)126 static inline int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size) { return -ENODEV; }
__pkvm_teardown_tracing(void)127 static inline void __pkvm_teardown_tracing(void) { }
__pkvm_enable_tracing(bool enable)128 static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
__pkvm_reset_tracing(unsigned int cpu)129 static inline int __pkvm_reset_tracing(unsigned int cpu) { return -ENODEV; }
__pkvm_swap_reader_tracing(unsigned int cpu)130 static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; }
__pkvm_panic_tracing(void)131 static inline void __pkvm_panic_tracing(void) { }
__pkvm_enable_event(unsigned short id,bool enable)132 static inline int __pkvm_enable_event(unsigned short id, bool enable) { return -ENODEV; }
133 #define trace_hyp_printk(fmt, ...)
134
hyp_ftrace_setup_core(void)135 static inline void hyp_ftrace_setup_core(void) { }
hyp_ftrace_ret_flush(void)136 static inline void hyp_ftrace_ret_flush(void) { }
__pkvm_sync_ftrace(unsigned long host_func_pg)137 static inline int __pkvm_sync_ftrace(unsigned long host_func_pg) { return -EOPNOTSUPP; }
__pkvm_disable_ftrace(void)138 static inline int __pkvm_disable_ftrace(void) { return -EOPNOTSUPP; }
139 #endif
140 #endif
141