• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Code for replacing ftrace calls with jumps.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7  *
8  * Thanks goes to Steven Rostedt for writing the original x86 version.
9  */
10 
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14 #include <linux/syscalls.h>
15 
16 #include <asm/asm.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/cacheflush.h>
19 #include <asm/syscall.h>
20 #include <asm/uasm.h>
21 #include <asm/unistd.h>
22 
23 #include <asm-generic/sections.h>
24 
25 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
26 #define MCOUNT_OFFSET_INSNS 5
27 #else
28 #define MCOUNT_OFFSET_INSNS 4
29 #endif
30 
31 /*
32  * Check if the address is in kernel space
33  *
34  * Clone core_kernel_text() from kernel/extable.c, but doesn't call
35  * init_kernel_text() for Ftrace doesn't trace functions in init sections.
36  */
in_kernel_space(unsigned long ip)37 static inline int in_kernel_space(unsigned long ip)
38 {
39 	if (ip >= (unsigned long)_stext &&
40 	    ip <= (unsigned long)_etext)
41 		return 1;
42 	return 0;
43 }
44 
45 #ifdef CONFIG_DYNAMIC_FTRACE
46 
47 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
48 #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
49 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
50 
51 #define INSN_NOP 0x00000000	/* nop */
52 #define INSN_JAL(addr)	\
53 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
54 
55 static unsigned int insn_jal_ftrace_caller __read_mostly;
56 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
57 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
58 
ftrace_dyn_arch_init_insns(void)59 static inline void ftrace_dyn_arch_init_insns(void)
60 {
61 	u32 *buf;
62 	unsigned int v1;
63 
64 	/* lui v1, hi16_mcount */
65 	v1 = 3;
66 	buf = (u32 *)&insn_lui_v1_hi16_mcount;
67 	UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
68 
69 	/* jal (ftrace_caller + 8), jump over the first two instruction */
70 	buf = (u32 *)&insn_jal_ftrace_caller;
71 	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
72 
73 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
74 	/* j ftrace_graph_caller */
75 	buf = (u32 *)&insn_j_ftrace_graph_caller;
76 	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
77 #endif
78 }
79 
ftrace_modify_code(unsigned long ip,unsigned int new_code)80 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
81 {
82 	int faulted;
83 
84 	/* *(unsigned int *)ip = new_code; */
85 	safe_store_code(new_code, ip, faulted);
86 
87 	if (unlikely(faulted))
88 		return -EFAULT;
89 
90 	flush_icache_range(ip, ip + 8);
91 
92 	return 0;
93 }
94 
95 /*
96  * The details about the calling site of mcount on MIPS
97  *
98  * 1. For kernel:
99  *
100  * move at, ra
101  * jal _mcount		--> nop
102  *
103  * 2. For modules:
104  *
105  * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
106  *
107  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
108  * addiu v1, v1, low_16bit_of_mcount
109  * move at, ra
110  * move $12, ra_address
111  * jalr v1
112  *  sub sp, sp, 8
113  *                                  1: offset = 5 instructions
114  * 2.2 For the Other situations
115  *
116  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
117  * addiu v1, v1, low_16bit_of_mcount
118  * move at, ra
119  * jalr v1
120  *  nop | move $12, ra_address | sub sp, sp, 8
121  *                                  1: offset = 4 instructions
122  */
123 
124 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
125 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)126 int ftrace_make_nop(struct module *mod,
127 		    struct dyn_ftrace *rec, unsigned long addr)
128 {
129 	unsigned int new;
130 	unsigned long ip = rec->ip;
131 
132 	/*
133 	 * If ip is in kernel space, no long call, otherwise, long call is
134 	 * needed.
135 	 */
136 	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
137 
138 	return ftrace_modify_code(ip, new);
139 }
140 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)141 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
142 {
143 	unsigned int new;
144 	unsigned long ip = rec->ip;
145 
146 	new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
147 		insn_lui_v1_hi16_mcount;
148 
149 	return ftrace_modify_code(ip, new);
150 }
151 
152 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
153 
ftrace_update_ftrace_func(ftrace_func_t func)154 int ftrace_update_ftrace_func(ftrace_func_t func)
155 {
156 	unsigned int new;
157 
158 	new = INSN_JAL((unsigned long)func);
159 
160 	return ftrace_modify_code(FTRACE_CALL_IP, new);
161 }
162 
ftrace_dyn_arch_init(void * data)163 int __init ftrace_dyn_arch_init(void *data)
164 {
165 	/* Encode the instructions when booting */
166 	ftrace_dyn_arch_init_insns();
167 
168 	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
169 	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
170 
171 	/* The return code is retured via data */
172 	*(unsigned long *)data = 0;
173 
174 	return 0;
175 }
176 #endif	/* CONFIG_DYNAMIC_FTRACE */
177 
178 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
179 
180 #ifdef CONFIG_DYNAMIC_FTRACE
181 
182 extern void ftrace_graph_call(void);
183 #define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))
184 
ftrace_enable_ftrace_graph_caller(void)185 int ftrace_enable_ftrace_graph_caller(void)
186 {
187 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
188 			insn_j_ftrace_graph_caller);
189 }
190 
ftrace_disable_ftrace_graph_caller(void)191 int ftrace_disable_ftrace_graph_caller(void)
192 {
193 	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
194 }
195 
196 #endif	/* CONFIG_DYNAMIC_FTRACE */
197 
198 #ifndef KBUILD_MCOUNT_RA_ADDRESS
199 
200 #define S_RA_SP	(0xafbf << 16)	/* s{d,w} ra, offset(sp) */
201 #define S_R_SP	(0xafb0 << 16)  /* s{d,w} R, offset(sp) */
202 #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
203 
ftrace_get_parent_ra_addr(unsigned long self_ra,unsigned long old_parent_ra,unsigned long parent_ra_addr,unsigned long fp)204 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
205 		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
206 {
207 	unsigned long sp, ip, tmp;
208 	unsigned int code;
209 	int faulted;
210 
211 	/*
212 	 * For module, move the ip from the return address after the
213 	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
214 	 * kernel, move after the instruction "move ra, at"(offset is 16)
215 	 */
216 	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
217 
218 	/*
219 	 * search the text until finding the non-store instruction or "s{d,w}
220 	 * ra, offset(sp)" instruction
221 	 */
222 	do {
223 		/* get the code at "ip": code = *(unsigned int *)ip; */
224 		safe_load_code(code, ip, faulted);
225 
226 		if (unlikely(faulted))
227 			return 0;
228 		/*
229 		 * If we hit the non-store instruction before finding where the
230 		 * ra is stored, then this is a leaf function and it does not
231 		 * store the ra on the stack
232 		 */
233 		if ((code & S_R_SP) != S_R_SP)
234 			return parent_ra_addr;
235 
236 		/* Move to the next instruction */
237 		ip -= 4;
238 	} while ((code & S_RA_SP) != S_RA_SP);
239 
240 	sp = fp + (code & OFFSET_MASK);
241 
242 	/* tmp = *(unsigned long *)sp; */
243 	safe_load_stack(tmp, sp, faulted);
244 	if (unlikely(faulted))
245 		return 0;
246 
247 	if (tmp == old_parent_ra)
248 		return sp;
249 	return 0;
250 }
251 
252 #endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
253 
254 /*
255  * Hook the return address and push it in the stack of return addrs
256  * in current thread info.
257  */
prepare_ftrace_return(unsigned long * parent_ra_addr,unsigned long self_ra,unsigned long fp)258 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
259 			   unsigned long fp)
260 {
261 	unsigned long old_parent_ra;
262 	struct ftrace_graph_ent trace;
263 	unsigned long return_hooker = (unsigned long)
264 	    &return_to_handler;
265 	int faulted, insns;
266 
267 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
268 		return;
269 
270 	/*
271 	 * "parent_ra_addr" is the stack address saved the return address of
272 	 * the caller of _mcount.
273 	 *
274 	 * if the gcc < 4.5, a leaf function does not save the return address
275 	 * in the stack address, so, we "emulate" one in _mcount's stack space,
276 	 * and hijack it directly, but for a non-leaf function, it save the
277 	 * return address to the its own stack space, we can not hijack it
278 	 * directly, but need to find the real stack address,
279 	 * ftrace_get_parent_addr() does it!
280 	 *
281 	 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
282 	 * non-leaf function, the location of the return address will be saved
283 	 * to $12 for us, and for a leaf function, only put a zero into $12. we
284 	 * do it in ftrace_graph_caller of mcount.S.
285 	 */
286 
287 	/* old_parent_ra = *parent_ra_addr; */
288 	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
289 	if (unlikely(faulted))
290 		goto out;
291 #ifndef KBUILD_MCOUNT_RA_ADDRESS
292 	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
293 			old_parent_ra, (unsigned long)parent_ra_addr, fp);
294 	/*
295 	 * If fails when getting the stack address of the non-leaf function's
296 	 * ra, stop function graph tracer and return
297 	 */
298 	if (parent_ra_addr == 0)
299 		goto out;
300 #endif
301 	/* *parent_ra_addr = return_hooker; */
302 	safe_store_stack(return_hooker, parent_ra_addr, faulted);
303 	if (unlikely(faulted))
304 		goto out;
305 
306 	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
307 	    == -EBUSY) {
308 		*parent_ra_addr = old_parent_ra;
309 		return;
310 	}
311 
312 	/*
313 	 * Get the recorded ip of the current mcount calling site in the
314 	 * __mcount_loc section, which will be used to filter the function
315 	 * entries configured through the tracing/set_graph_function interface.
316 	 */
317 
318 	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
319 	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
320 
321 	/* Only trace if the calling function expects to */
322 	if (!ftrace_graph_entry(&trace)) {
323 		current->curr_ret_stack--;
324 		*parent_ra_addr = old_parent_ra;
325 	}
326 	return;
327 out:
328 	ftrace_graph_stop();
329 	WARN_ON(1);
330 }
331 #endif	/* CONFIG_FUNCTION_GRAPH_TRACER */
332 
333 #ifdef CONFIG_FTRACE_SYSCALLS
334 
335 #ifdef CONFIG_32BIT
arch_syscall_addr(int nr)336 unsigned long __init arch_syscall_addr(int nr)
337 {
338 	return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
339 }
340 #endif
341 
342 #ifdef CONFIG_64BIT
343 
arch_syscall_addr(int nr)344 unsigned long __init arch_syscall_addr(int nr)
345 {
346 #ifdef CONFIG_MIPS32_N32
347 	if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
348 		return (unsigned long)sysn32_call_table[(nr - __NR_N32_Linux) * 2];
349 #endif
350 	if (nr >= __NR_64_Linux  && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
351 		return (unsigned long)sys_call_table[nr - __NR_64_Linux];
352 #ifdef CONFIG_MIPS32_O32
353 	if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
354 		return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
355 #endif
356 
357 	return (unsigned long) &sys_ni_syscall;
358 }
359 #endif
360 
361 #endif /* CONFIG_FTRACE_SYSCALLS */
362