• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/kernel/ftrace.c
4  *
5  * Copyright (C) 2013 Linaro Limited
6  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7  */
8 
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/swab.h>
12 #include <linux/uaccess.h>
13 
14 #include <asm/cacheflush.h>
15 #include <asm/debug-monitors.h>
16 #include <asm/ftrace.h>
17 #include <asm/insn.h>
18 
19 #ifdef CONFIG_DYNAMIC_FTRACE
20 /*
21  * Replace a single instruction, which may be a branch or NOP.
22  * If @validate == true, a replaced instruction is checked against 'old'.
23  */
ftrace_modify_code(unsigned long pc,u32 old,u32 new,bool validate)24 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
25 			      bool validate)
26 {
27 	u32 replaced;
28 
29 	/*
30 	 * Note:
31 	 * We are paranoid about modifying text, as if a bug were to happen, it
32 	 * could cause us to read or write to someplace that could cause harm.
33 	 * Carefully read and modify the code with aarch64_insn_*() which uses
34 	 * probe_kernel_*(), and make sure what we read is what we expected it
35 	 * to be before modifying it.
36 	 */
37 	if (validate) {
38 		if (aarch64_insn_read((void *)pc, &replaced))
39 			return -EFAULT;
40 
41 		if (replaced != old)
42 			return -EINVAL;
43 	}
44 	if (aarch64_insn_patch_text_nosync((void *)pc, new))
45 		return -EPERM;
46 
47 	return 0;
48 }
49 
50 /*
51  * Replace tracer function in ftrace_caller()
52  */
ftrace_update_ftrace_func(ftrace_func_t func)53 int ftrace_update_ftrace_func(ftrace_func_t func)
54 {
55 	unsigned long pc;
56 	u32 new;
57 
58 	pc = (unsigned long)&ftrace_call;
59 	new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
60 					  AARCH64_INSN_BRANCH_LINK);
61 
62 	return ftrace_modify_code(pc, 0, new, false);
63 }
64 
get_ftrace_plt(struct module * mod,unsigned long addr)65 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
66 {
67 #ifdef CONFIG_ARM64_MODULE_PLTS
68 	struct plt_entry *plt = mod->arch.ftrace_trampolines;
69 
70 	if (addr == FTRACE_ADDR)
71 		return &plt[FTRACE_PLT_IDX];
72 	if (addr == FTRACE_REGS_ADDR &&
73 	    IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
74 		return &plt[FTRACE_REGS_PLT_IDX];
75 #endif
76 	return NULL;
77 }
78 
79 /*
80  * Find the address the callsite must branch to in order to reach '*addr'.
81  *
82  * Due to the limited range of 'BL' instructions, modules may be placed too far
83  * away to branch directly and must use a PLT.
84  *
85  * Returns true when '*addr' contains a reachable target address, or has been
86  * modified to contain a PLT address. Returns false otherwise.
87  */
ftrace_find_callable_addr(struct dyn_ftrace * rec,struct module * mod,unsigned long * addr)88 static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
89 				      struct module *mod,
90 				      unsigned long *addr)
91 {
92 	unsigned long pc = rec->ip;
93 	long offset = (long)*addr - (long)pc;
94 	struct plt_entry *plt;
95 
96 	/*
97 	 * When the target is within range of the 'BL' instruction, use 'addr'
98 	 * as-is and branch to that directly.
99 	 */
100 	if (offset >= -SZ_128M && offset < SZ_128M)
101 		return true;
102 
103 	/*
104 	 * When the target is outside of the range of a 'BL' instruction, we
105 	 * must use a PLT to reach it. We can only place PLTs for modules, and
106 	 * only when module PLT support is built-in.
107 	 */
108 	if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
109 		return false;
110 
111 	/*
112 	 * 'mod' is only set at module load time, but if we end up
113 	 * dealing with an out-of-range condition, we can assume it
114 	 * is due to a module being loaded far away from the kernel.
115 	 *
116 	 * NOTE: __module_text_address() must be called with preemption
117 	 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
118 	 * retains its validity throughout the remainder of this code.
119 	 */
120 	if (!mod) {
121 		preempt_disable();
122 		mod = __module_text_address(pc);
123 		preempt_enable();
124 	}
125 
126 	if (WARN_ON(!mod))
127 		return false;
128 
129 	plt = get_ftrace_plt(mod, *addr);
130 	if (!plt) {
131 		pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
132 		return false;
133 	}
134 
135 	*addr = (unsigned long)plt;
136 	return true;
137 }
138 
139 /*
140  * Turn on the call to ftrace_caller() in instrumented function
141  */
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)142 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
143 {
144 	unsigned long pc = rec->ip;
145 	u32 old, new;
146 
147 	if (!ftrace_find_callable_addr(rec, NULL, &addr))
148 		return -EINVAL;
149 
150 	old = aarch64_insn_gen_nop();
151 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
152 
153 	return ftrace_modify_code(pc, old, new, true);
154 }
155 
156 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)157 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
158 			unsigned long addr)
159 {
160 	unsigned long pc = rec->ip;
161 	u32 old, new;
162 
163 	if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
164 		return -EINVAL;
165 	if (!ftrace_find_callable_addr(rec, NULL, &addr))
166 		return -EINVAL;
167 
168 	old = aarch64_insn_gen_branch_imm(pc, old_addr,
169 					  AARCH64_INSN_BRANCH_LINK);
170 	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
171 
172 	return ftrace_modify_code(pc, old, new, true);
173 }
174 
175 /*
176  * The compiler has inserted two NOPs before the regular function prologue.
177  * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
178  * and x9-x18 are free for our use.
179  *
180  * At runtime we want to be able to swing a single NOP <-> BL to enable or
181  * disable the ftrace call. The BL requires us to save the original LR value,
182  * so here we insert a <MOV X9, LR> over the first NOP so the instructions
183  * before the regular prologue are:
184  *
185  * | Compiled | Disabled   | Enabled    |
186  * +----------+------------+------------+
187  * | NOP      | MOV X9, LR | MOV X9, LR |
188  * | NOP      | NOP        | BL <entry> |
189  *
190  * The LR value will be recovered by ftrace_regs_entry, and restored into LR
191  * before returning to the regular function prologue. When a function is not
192  * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
193  *
194  * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
195  * the BL.
196  */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)197 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
198 {
199 	unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
200 	u32 old, new;
201 
202 	old = aarch64_insn_gen_nop();
203 	new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
204 					AARCH64_INSN_REG_LR,
205 					AARCH64_INSN_VARIANT_64BIT);
206 	return ftrace_modify_code(pc, old, new, true);
207 }
208 #endif
209 
210 /*
211  * Turn off the call to ftrace_caller() in instrumented function
212  */
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)213 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
214 		    unsigned long addr)
215 {
216 	unsigned long pc = rec->ip;
217 	u32 old = 0, new;
218 
219 	new = aarch64_insn_gen_nop();
220 
221 	/*
222 	 * When using mcount, callsites in modules may have been initalized to
223 	 * call an arbitrary module PLT (which redirects to the _mcount stub)
224 	 * rather than the ftrace PLT we'll use at runtime (which redirects to
225 	 * the ftrace trampoline). We can ignore the old PLT when initializing
226 	 * the callsite.
227 	 *
228 	 * Note: 'mod' is only set at module load time.
229 	 */
230 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
231 	    IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
232 		return aarch64_insn_patch_text_nosync((void *)pc, new);
233 	}
234 
235 	if (!ftrace_find_callable_addr(rec, mod, &addr))
236 		return -EINVAL;
237 
238 	old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
239 
240 	return ftrace_modify_code(pc, old, new, true);
241 }
242 
arch_ftrace_update_code(int command)243 void arch_ftrace_update_code(int command)
244 {
245 	command |= FTRACE_MAY_SLEEP;
246 	ftrace_modify_all_code(command);
247 }
248 
ftrace_dyn_arch_init(void)249 int __init ftrace_dyn_arch_init(void)
250 {
251 	return 0;
252 }
253 #endif /* CONFIG_DYNAMIC_FTRACE */
254 
255 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
256 /*
257  * function_graph tracer expects ftrace_return_to_handler() to be called
258  * on the way back to parent. For this purpose, this function is called
259  * in _mcount() or ftrace_caller() to replace return address (*parent) on
260  * the call stack to return_to_handler.
261  *
262  * Note that @frame_pointer is used only for sanity check later.
263  */
prepare_ftrace_return(unsigned long self_addr,unsigned long * parent,unsigned long frame_pointer)264 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
265 			   unsigned long frame_pointer)
266 {
267 	unsigned long return_hooker = (unsigned long)&return_to_handler;
268 	unsigned long old;
269 
270 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
271 		return;
272 
273 	/*
274 	 * Note:
275 	 * No protection against faulting at *parent, which may be seen
276 	 * on other archs. It's unlikely on AArch64.
277 	 */
278 	old = *parent;
279 
280 	if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
281 		*parent = return_hooker;
282 }
283 
284 #ifdef CONFIG_DYNAMIC_FTRACE
285 /*
286  * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
287  * depending on @enable.
288  */
ftrace_modify_graph_caller(bool enable)289 static int ftrace_modify_graph_caller(bool enable)
290 {
291 	unsigned long pc = (unsigned long)&ftrace_graph_call;
292 	u32 branch, nop;
293 
294 	branch = aarch64_insn_gen_branch_imm(pc,
295 					     (unsigned long)ftrace_graph_caller,
296 					     AARCH64_INSN_BRANCH_NOLINK);
297 	nop = aarch64_insn_gen_nop();
298 
299 	if (enable)
300 		return ftrace_modify_code(pc, nop, branch, true);
301 	else
302 		return ftrace_modify_code(pc, branch, nop, true);
303 }
304 
ftrace_enable_ftrace_graph_caller(void)305 int ftrace_enable_ftrace_graph_caller(void)
306 {
307 	return ftrace_modify_graph_caller(true);
308 }
309 
ftrace_disable_ftrace_graph_caller(void)310 int ftrace_disable_ftrace_graph_caller(void)
311 {
312 	return ftrace_modify_graph_caller(false);
313 }
314 #endif /* CONFIG_DYNAMIC_FTRACE */
315 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
316