1 /*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
6 *
7 * For licencing details, see COPYING.
8 *
9 * Defines low-level handling of mcount calls when the kernel
10 * is compiled with the -pg flag. When using dynamic ftrace, the
11 * mcount call-sites get patched with NOP till they are enabled.
12 * All code mutation routines here are called under stop_machine().
13 */
14
15 #include <linux/ftrace.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/opcodes.h>
21 #include <asm/ftrace.h>
22
23 #include "insn.h"
24
25 #ifdef CONFIG_THUMB2_KERNEL
26 #define NOP 0xf85deb04 /* pop.w {lr} */
27 #else
28 #define NOP 0xe8bd4000 /* pop {lr} */
29 #endif
30
31 #ifdef CONFIG_DYNAMIC_FTRACE
32 #ifdef CONFIG_OLD_MCOUNT
33 #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
34 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
35
36 #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
37
ftrace_nop_replace(struct dyn_ftrace * rec)38 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
39 {
40 return rec->arch.old_mcount ? OLD_NOP : NOP;
41 }
42
adjust_address(struct dyn_ftrace * rec,unsigned long addr)43 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
44 {
45 if (!rec->arch.old_mcount)
46 return addr;
47
48 if (addr == MCOUNT_ADDR)
49 addr = OLD_MCOUNT_ADDR;
50 else if (addr == FTRACE_ADDR)
51 addr = OLD_FTRACE_ADDR;
52
53 return addr;
54 }
55 #else
ftrace_nop_replace(struct dyn_ftrace * rec)56 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
57 {
58 return NOP;
59 }
60
adjust_address(struct dyn_ftrace * rec,unsigned long addr)61 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
62 {
63 return addr;
64 }
65 #endif
66
ftrace_arch_code_modify_prepare(void)67 int ftrace_arch_code_modify_prepare(void)
68 {
69 set_kernel_text_rw();
70 set_all_modules_text_rw();
71 return 0;
72 }
73
ftrace_arch_code_modify_post_process(void)74 int ftrace_arch_code_modify_post_process(void)
75 {
76 set_all_modules_text_ro();
77 set_kernel_text_ro();
78 return 0;
79 }
80
ftrace_call_replace(unsigned long pc,unsigned long addr)81 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
82 {
83 return arm_gen_branch_link(pc, addr);
84 }
85
ftrace_modify_code(unsigned long pc,unsigned long old,unsigned long new,bool validate)86 static int ftrace_modify_code(unsigned long pc, unsigned long old,
87 unsigned long new, bool validate)
88 {
89 unsigned long replaced;
90
91 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
92 old = __opcode_to_mem_thumb32(old);
93 new = __opcode_to_mem_thumb32(new);
94 } else {
95 old = __opcode_to_mem_arm(old);
96 new = __opcode_to_mem_arm(new);
97 }
98
99 if (validate) {
100 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
101 return -EFAULT;
102
103 if (replaced != old)
104 return -EINVAL;
105 }
106
107 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
108 return -EPERM;
109
110 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
111
112 return 0;
113 }
114
ftrace_update_ftrace_func(ftrace_func_t func)115 int ftrace_update_ftrace_func(ftrace_func_t func)
116 {
117 unsigned long pc;
118 unsigned long new;
119 int ret;
120
121 pc = (unsigned long)&ftrace_call;
122 new = ftrace_call_replace(pc, (unsigned long)func);
123
124 ret = ftrace_modify_code(pc, 0, new, false);
125
126 #ifdef CONFIG_OLD_MCOUNT
127 if (!ret) {
128 pc = (unsigned long)&ftrace_call_old;
129 new = ftrace_call_replace(pc, (unsigned long)func);
130
131 ret = ftrace_modify_code(pc, 0, new, false);
132 }
133 #endif
134
135 return ret;
136 }
137
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)138 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
139 {
140 unsigned long new, old;
141 unsigned long ip = rec->ip;
142
143 old = ftrace_nop_replace(rec);
144 new = ftrace_call_replace(ip, adjust_address(rec, addr));
145
146 return ftrace_modify_code(rec->ip, old, new, true);
147 }
148
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)149 int ftrace_make_nop(struct module *mod,
150 struct dyn_ftrace *rec, unsigned long addr)
151 {
152 unsigned long ip = rec->ip;
153 unsigned long old;
154 unsigned long new;
155 int ret;
156
157 old = ftrace_call_replace(ip, adjust_address(rec, addr));
158 new = ftrace_nop_replace(rec);
159 ret = ftrace_modify_code(ip, old, new, true);
160
161 #ifdef CONFIG_OLD_MCOUNT
162 if (ret == -EINVAL && addr == MCOUNT_ADDR) {
163 rec->arch.old_mcount = true;
164
165 old = ftrace_call_replace(ip, adjust_address(rec, addr));
166 new = ftrace_nop_replace(rec);
167 ret = ftrace_modify_code(ip, old, new, true);
168 }
169 #endif
170
171 return ret;
172 }
173
ftrace_dyn_arch_init(void * data)174 int __init ftrace_dyn_arch_init(void *data)
175 {
176 *(unsigned long *)data = 0;
177
178 return 0;
179 }
180 #endif /* CONFIG_DYNAMIC_FTRACE */
181
182 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)183 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
184 unsigned long frame_pointer)
185 {
186 unsigned long return_hooker = (unsigned long) &return_to_handler;
187 struct ftrace_graph_ent trace;
188 unsigned long old;
189 int err;
190
191 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
192 return;
193
194 old = *parent;
195 *parent = return_hooker;
196
197 trace.func = self_addr;
198 trace.depth = current->curr_ret_stack + 1;
199
200 /* Only trace if the calling function expects to */
201 if (!ftrace_graph_entry(&trace)) {
202 *parent = old;
203 return;
204 }
205
206 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
207 frame_pointer);
208 if (err == -EBUSY) {
209 *parent = old;
210 return;
211 }
212 }
213
214 #ifdef CONFIG_DYNAMIC_FTRACE
215 extern unsigned long ftrace_graph_call;
216 extern unsigned long ftrace_graph_call_old;
217 extern void ftrace_graph_caller_old(void);
218
__ftrace_modify_caller(unsigned long * callsite,void (* func)(void),bool enable)219 static int __ftrace_modify_caller(unsigned long *callsite,
220 void (*func) (void), bool enable)
221 {
222 unsigned long caller_fn = (unsigned long) func;
223 unsigned long pc = (unsigned long) callsite;
224 unsigned long branch = arm_gen_branch(pc, caller_fn);
225 unsigned long nop = 0xe1a00000; /* mov r0, r0 */
226 unsigned long old = enable ? nop : branch;
227 unsigned long new = enable ? branch : nop;
228
229 return ftrace_modify_code(pc, old, new, true);
230 }
231
ftrace_modify_graph_caller(bool enable)232 static int ftrace_modify_graph_caller(bool enable)
233 {
234 int ret;
235
236 ret = __ftrace_modify_caller(&ftrace_graph_call,
237 ftrace_graph_caller,
238 enable);
239
240 #ifdef CONFIG_OLD_MCOUNT
241 if (!ret)
242 ret = __ftrace_modify_caller(&ftrace_graph_call_old,
243 ftrace_graph_caller_old,
244 enable);
245 #endif
246
247 return ret;
248 }
249
ftrace_enable_ftrace_graph_caller(void)250 int ftrace_enable_ftrace_graph_caller(void)
251 {
252 return ftrace_modify_graph_caller(true);
253 }
254
ftrace_disable_ftrace_graph_caller(void)255 int ftrace_disable_ftrace_graph_caller(void)
256 {
257 return ftrace_modify_graph_caller(false);
258 }
259 #endif /* CONFIG_DYNAMIC_FTRACE */
260 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
261