• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * Copyright (C) 2025 Google LLC
5  * Author: Vincent Donnefort <vdonnefort@google.com>
6  */
7 
8 #include <nvhe/mm.h>
9 
10 #include <asm/kvm_mmu.h>
11 #include <asm/patching.h>
12 
13 #include <asm/kvm_hypevents.h>
14 
15 #define HYP_FTRACE_MAX_OFFSETS	17 /* MAX_MOD_EVENTS + 1 */
16 #define HYP_FTRACE_MAX_DEPTH	32
17 
18 extern unsigned long hyp_nr_cpus;
19 
20 extern void __hyp_ftrace_tramp(void);
21 extern void __hyp_ftrace_ret_tramp(void);
22 
23 static unsigned long hyp_kern_offsets[HYP_FTRACE_MAX_OFFSETS];
24 
__kern_addr(unsigned long offset_idx,unsigned long addr)25 static unsigned long __kern_addr(unsigned long offset_idx, unsigned long addr)
26 {
27 	return addr + hyp_kern_offsets[offset_idx];
28 }
29 
30 struct hyp_ftrace_stack_frame {
31 	unsigned long	func;
32 	unsigned long	ret;
33 };
34 
35 struct hyp_ftrace_stack {
36 	int				idx;
37 	struct hyp_ftrace_stack_frame	frames[HYP_FTRACE_MAX_DEPTH];
38 };
39 
40 static DEFINE_PER_CPU(struct hyp_ftrace_stack, __ftrace_saved_frames);
41 
hyp_ftrace_func_reset(void)42 static void hyp_ftrace_func_reset(void)
43 {
44 	unsigned int cpu;
45 
46 	for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
47 		struct hyp_ftrace_stack *stack;
48 
49 		stack = per_cpu_ptr(&__ftrace_saved_frames, cpu);
50 		stack->idx = -1;
51 	}
52 
53 	/*
54 	 * Make sure the stack init is observed by all CPUs before patching the
55 	 * code. Paired with smp_load_acquire() in hyp_ftrace_func_push().
56 	 */
57 	smp_mb();
58 }
59 
hyp_ftrace_func_push(unsigned long func,unsigned long ret)60 static __always_inline bool hyp_ftrace_func_push(unsigned long func, unsigned long ret)
61 {
62 	struct hyp_ftrace_stack *stack = this_cpu_ptr(&__ftrace_saved_frames);
63 	int idx = smp_load_acquire(&stack->idx);
64 
65 	if (idx >= (HYP_FTRACE_MAX_DEPTH - 1))
66 		return false;
67 
68 	idx++;
69 	stack->frames[idx].func = func;
70 	stack->frames[idx].ret = ret;
71 	stack->idx = idx;
72 
73 	return true;
74 }
75 
hyp_ftrace_func_pop(void)76 static __always_inline struct hyp_ftrace_stack_frame *hyp_ftrace_func_pop(void)
77 {
78 	struct hyp_ftrace_stack *stack = this_cpu_ptr(&__ftrace_saved_frames);
79 
80 	/*
81 	 * If in _pop(), then _push() has run on this CPU. No need for more
82 	 * memory ordering.
83 	 */
84 
85 	if (stack->idx < 0)
86 		return NULL;
87 
88 	return &stack->frames[stack->idx--];
89 }
90 
__hyp_ftrace_trace(unsigned long ip,unsigned long parent,unsigned long offset_idx)91 unsigned long __hyp_ftrace_trace(unsigned long ip, unsigned long parent,
92 				 unsigned long offset_idx)
93 {
94 	unsigned long func = __kern_addr(offset_idx, ip);
95 	unsigned long parent_offset_idx;
96 
97 	/* When modules are called from core */
98 	parent_offset_idx = parent > (unsigned long)__hyp_text_start ? 0 : offset_idx;
99 
100 	trace_func(func, __kern_addr(parent_offset_idx, parent));
101 
102 	/* Only install the trampoline if we can revert to the original parent */
103 	if (hyp_ftrace_func_push(func, parent))
104 		return (unsigned long)__hyp_ftrace_ret_tramp;
105 
106 	return parent;
107 }
108 
__hyp_ftrace_ret_trace(void)109 unsigned long __hyp_ftrace_ret_trace(void)
110 {
111 	struct hyp_ftrace_stack_frame *frame = hyp_ftrace_func_pop();
112 
113 	BUG_ON(!frame);
114 	trace_func_ret(frame->func);
115 
116 	return frame->ret;
117 }
118 
hyp_ftrace_ret_flush(void)119 void hyp_ftrace_ret_flush(void)
120 {
121 	struct hyp_ftrace_stack_frame *frame = hyp_ftrace_func_pop();
122 
123 	while (frame) {
124 		trace_func_ret(frame->func);
125 		frame = hyp_ftrace_func_pop();
126 	}
127 }
128 
__get_offset_idx_ins(unsigned long * func,unsigned long ip,u32 * insn,void * args)129 static int __get_offset_idx_ins(unsigned long *func, unsigned long ip, u32 *insn,
130 				void *args)
131 {
132 	unsigned long idx = (unsigned long)args;
133 	u32 imm, mask = (BIT(16) - 1) << 5;
134 
135 	imm = (idx << 5) & mask;
136 
137 	*insn = aarch64_insn_get_movz_value();
138 	*insn |= BIT(31);			/* 64-bits variant */
139 	*insn |= 10;				/* x10 */
140 	*insn &= ~mask;
141 	*insn |= imm;
142 	*insn = cpu_to_le32(*insn);
143 
144 	return 0;
145 }
146 
__get_disable_ins(unsigned long * func,unsigned long ip,u32 * insn,void * args)147 static int __get_disable_ins(unsigned long *func, unsigned long ip, u32 *insn,
148 			     void *args)
149 {
150 	static u32 nop;
151 
152 	if (!nop)
153 		nop = aarch64_insn_gen_nop();
154 
155 	*insn = cpu_to_le32(nop);
156 
157 	return 0;
158 }
159 
__get_enable_ins(unsigned long ip,u32 * insn,void * tramp)160 static int __get_enable_ins(unsigned long ip, u32 *insn, void *tramp)
161 {
162 	u32 imm, mask;
163 	long delta;
164 
165 	delta = (long)tramp - (long)ip;
166 
167 	if (delta > SZ_128M || delta <= -SZ_128M)
168 		return -ERANGE;
169 
170 	mask = BIT(26) - 1;
171 	imm = (delta >> 2) & mask;
172 
173 	*insn = aarch64_insn_get_bl_value() & ~(mask);
174 	*insn |= imm;
175 	*insn = cpu_to_le32(*insn);
176 
177 	return 0;
178 }
179 
180 #define funcs_pg_enabled(func)	((func) & 0x1)
181 #define funcs_pg_func(func)	((func) & ~BIT(0))
182 #define funcs_pg_is_end(func)						      \
183 ({									      \
184 	(!(*(func)) ||							      \
185 	 ((PAGE_ALIGN((unsigned long)(func) + 1) - (unsigned long)(func)) <= 8)); \
186 })
187 
188 /*
189  * During init the kernel can notify a function needs to be enabled. This is
190  * relying on the same encoding as the func_pg.
191  */
192 #define get_func(func)		funcs_pg_func(func)
193 
__get_enable_disable_ins_early(unsigned long * func,unsigned long ip,u32 * insn,void * tramp)194 static int __get_enable_disable_ins_early(unsigned long *func, unsigned long ip,
195 					  u32 *insn, void *tramp)
196 {
197 	if (funcs_pg_enabled(*func))
198 		return __get_enable_ins(ip, insn, tramp);
199 
200 	/* Nothing else to do */
201 	return 1;
202 }
203 
204 struct __ftrace_sync_patch_args {
205 	void		*tramp;
206 	unsigned long	offset_idx;
207 	unsigned long	*funcs_pg;
208 };
209 
210 static int
__get_enable_disable_ins_from_funcs_pg(unsigned long * func,unsigned long ip,u32 * insn,void * __args)211 __get_enable_disable_ins_from_funcs_pg(unsigned long *func, unsigned long ip,
212 				       u32 *insn, void *__args)
213 {
214 	struct __ftrace_sync_patch_args *args = __args;
215 	unsigned long kern_addr;
216 	static u32 nop;
217 	u32 cur_insn;
218 	bool enable;
219 	int ret = 0;
220 
221 	if (funcs_pg_is_end(args->funcs_pg))
222 		return -EAGAIN;
223 
224 	kern_addr = __kern_addr(args->offset_idx, *func);
225 	if (get_func(kern_addr) != funcs_pg_func(*args->funcs_pg)) {
226 		ret = -EINVAL;
227 		goto end;
228 	}
229 
230 	if (!nop)
231 		nop = aarch64_insn_gen_nop();
232 
233 	enable = funcs_pg_enabled(*args->funcs_pg);
234 	cur_insn = *(u32 *)ip;
235 
236 	/* Are we modifying anything? */
237 	if ((cur_insn == nop) != enable) {
238 		ret = -EBUSY;
239 		goto end;
240 	}
241 
242 	if (funcs_pg_enabled(*args->funcs_pg))
243 		ret = __get_enable_ins(ip, insn, args->tramp);
244 	else
245 		*insn = cpu_to_le32(nop);
246 
247 end:
248 	args->funcs_pg++;
249 	return ret;
250 }
251 
__get_phys(unsigned long addr)252 phys_addr_t __get_phys(unsigned long addr)
253 {
254 	if (addr >= (unsigned long)__hyp_text_start)
255 		return __hyp_pa(addr);
256 
257 	return __pkvm_private_range_pa((void *)addr);
258 }
259 
260 #define HYP_FTRACE_SKIP_FUNC (-1ULL)
261 
hyp_ftrace_patch(unsigned long * funcs,unsigned long * funcs_end,size_t func_offset,int (* get_ins)(unsigned long * func,unsigned long ip,u32 * insn,void * args),void * args)262 static void hyp_ftrace_patch(unsigned long *funcs, unsigned long *funcs_end,
263 			     size_t func_offset,
264 			     int (*get_ins)(unsigned long *func, unsigned long ip,
265 					    u32 *insn, void *args),
266 			     void *args)
267 {
268 	unsigned long prev_ip;
269 	void *map = NULL;
270 
271 	while (funcs < funcs_end) {
272 		unsigned long ip;
273 		size_t delta;
274 		u32 insn;
275 
276 		if (!*funcs)
277 			break;
278 
279 		if (*funcs == HYP_FTRACE_SKIP_FUNC)
280 			goto next;
281 
282 		ip = get_func(*funcs) + func_offset;
283 		delta = ip - prev_ip;
284 
285 		if (!map) {
286 			map = hyp_fixmap_map(__get_phys(ip));
287 		} else if ((unsigned long)(map + delta) >=
288 			   PAGE_ALIGN((unsigned long)map + 4)) {
289 			hyp_fixmap_unmap();
290 			map = hyp_fixmap_map(__get_phys(ip));
291 		} else {
292 			map = (void *)PAGE_ALIGN_DOWN((unsigned long)map) +
293 					      offset_in_page(ip);
294 		}
295 
296 		prev_ip = ip;
297 
298 		if (get_ins(funcs, ip, &insn, args))
299 			goto next;
300 
301 		WRITE_ONCE(*(u32 *)map, insn);
302 
303 		caches_clean_inval_pou((unsigned long)map,
304 				       (unsigned long)map + AARCH64_INSN_SIZE);
305 next:
306 		funcs++;
307 	}
308 
309 	if (map)
310 		hyp_fixmap_unmap();
311 }
312 
hyp_ftrace_setup(unsigned long * funcs,unsigned long * funcs_end,unsigned long hyp_kern_offset,void * tramp)313 int hyp_ftrace_setup(unsigned long *funcs, unsigned long *funcs_end,
314 		     unsigned long hyp_kern_offset, void *tramp)
315 {
316 	unsigned long idx;
317 
318 	for (idx = 0; idx < HYP_FTRACE_MAX_OFFSETS; idx++) {
319 		if (!hyp_kern_offsets[idx])
320 			break;
321 	}
322 
323 	if (idx >= HYP_FTRACE_MAX_OFFSETS)
324 		return -ENOMEM;
325 
326 	hyp_kern_offsets[idx] = hyp_kern_offset;
327 
328 	hyp_ftrace_patch(funcs, funcs_end, AARCH64_INSN_SIZE,
329 			 __get_offset_idx_ins, (void *)idx);
330 
331 	hyp_ftrace_patch(funcs, funcs_end, 2 * AARCH64_INSN_SIZE,
332 			 __get_enable_disable_ins_early, tramp);
333 
334 	return idx;
335 }
336 
337 extern unsigned long __hyp_patchable_function_entries_start[];
338 extern unsigned long __hyp_patchable_function_entries_end[];
339 
340 unsigned long __hyp_text_start_kern;
341 
hyp_ftrace_setup_core(void)342 void hyp_ftrace_setup_core(void)
343 {
344 	hyp_ftrace_func_reset();
345 
346 	hyp_ftrace_setup(__hyp_patchable_function_entries_start,
347 			 __hyp_patchable_function_entries_end,
348 			 __hyp_text_start_kern - (unsigned long)__hyp_text_start,
349 			 __hyp_ftrace_tramp);
350 }
351 
hyp_ftrace_find_host_func(unsigned long host_func,unsigned long * funcs,unsigned long * funcs_end,unsigned long offset_idx)352 unsigned long *hyp_ftrace_find_host_func(unsigned long host_func,
353 					 unsigned long *funcs,
354 					 unsigned long *funcs_end,
355 					 unsigned long offset_idx)
356 {
357 	if (!funcs) {
358 		funcs = __hyp_patchable_function_entries_start;
359 		funcs_end = __hyp_patchable_function_entries_end;
360 		offset_idx = 0;
361 	}
362 
363 	while (funcs < funcs_end) {
364 		unsigned long kern_addr = __kern_addr(offset_idx, *funcs);
365 
366 		if (get_func(kern_addr) == funcs_pg_func(host_func))
367 			return funcs;
368 
369 		funcs++;
370 	}
371 
372 	return NULL;
373 }
374 
375 /*
376  * funcs_pg is the host donated page containing the list of functions to
377  * enable/disable.
378  *
379  * funcs and funcs_end are the hypervisor owned ELF sections. For security
380  * purposes, funcs_pg is validated against funcs/funcs_end and for efficency
381  * purposes, it is expected from funcs_pg to have the same order as
382  * funcs/funcs_end.
383  *
384  * Returns NULL if the entire funcs_pg has been consumed otherwise the next
385  * entry to process if funcs_end has been reached.
386  */
hyp_ftrace_sync(unsigned long * funcs_pg,unsigned long * funcs,unsigned long * funcs_end,unsigned long offset_idx,void * tramp)387 void *hyp_ftrace_sync(unsigned long *funcs_pg, unsigned long *funcs,
388 		      unsigned long *funcs_end, unsigned long offset_idx,
389 		      void *tramp)
390 {
391 	struct __ftrace_sync_patch_args args = {
392 		.tramp = tramp ? tramp : (void *)__hyp_ftrace_tramp,
393 		.offset_idx = funcs ? offset_idx : 0,
394 		.funcs_pg = funcs_pg,
395 	};
396 
397 	if (!funcs_end)
398 		funcs_end = __hyp_patchable_function_entries_end;
399 
400 	hyp_ftrace_patch(funcs, funcs_end, 2 * AARCH64_INSN_SIZE,
401 			 __get_enable_disable_ins_from_funcs_pg, (void *)&args);
402 
403 	return funcs_pg_is_end(args.funcs_pg) ? NULL : args.funcs_pg;
404 }
405 
hyp_ftrace_disable(unsigned long * funcs,unsigned long * funcs_end)406 void hyp_ftrace_disable(unsigned long *funcs, unsigned long *funcs_end)
407 {
408 	if (!funcs || !funcs_end) {
409 		funcs = __hyp_patchable_function_entries_start;
410 		funcs_end = __hyp_patchable_function_entries_end;
411 	}
412 
413 	hyp_ftrace_patch(funcs, funcs_end, 2 * AARCH64_INSN_SIZE,
414 			 __get_disable_ins, NULL);
415 }
416