1 /*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
7 *
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
10 *
11 */
12
13 #include <linux/spinlock.h>
14 #include <linux/hardirq.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21
22 #include <asm/cacheflush.h>
23 #include <asm/code-patching.h>
24 #include <asm/ftrace.h>
25 #include <asm/syscall.h>
26
27
28 #ifdef CONFIG_DYNAMIC_FTRACE
29 static unsigned int
ftrace_call_replace(unsigned long ip,unsigned long addr,int link)30 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
31 {
32 unsigned int op;
33
34 addr = ppc_function_entry((void *)addr);
35
36 /* if (link) set op to 'bl' else 'b' */
37 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
38
39 return op;
40 }
41
42 static int
ftrace_modify_code(unsigned long ip,unsigned int old,unsigned int new)43 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
44 {
45 unsigned int replaced;
46
47 /*
48 * Note: Due to modules and __init, code can
49 * disappear and change, we need to protect against faulting
50 * as well as code changing. We do this by using the
51 * probe_kernel_* functions.
52 *
53 * No real locking needed, this code is run through
54 * kstop_machine, or before SMP starts.
55 */
56
57 /* read the text we want to modify */
58 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
59 return -EFAULT;
60
61 /* Make sure it is what we expect it to be */
62 if (replaced != old)
63 return -EINVAL;
64
65 /* replace the text with the new text */
66 if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE))
67 return -EPERM;
68
69 flush_icache_range(ip, ip + 8);
70
71 return 0;
72 }
73
74 /*
75 * Helper functions that are the same for both PPC64 and PPC32.
76 */
test_24bit_addr(unsigned long ip,unsigned long addr)77 static int test_24bit_addr(unsigned long ip, unsigned long addr)
78 {
79
80 /* use the create_branch to verify that this offset can be branched */
81 return create_branch((unsigned int *)ip, addr, 0);
82 }
83
84 #ifdef CONFIG_MODULES
85
is_bl_op(unsigned int op)86 static int is_bl_op(unsigned int op)
87 {
88 return (op & 0xfc000003) == 0x48000001;
89 }
90
find_bl_target(unsigned long ip,unsigned int op)91 static unsigned long find_bl_target(unsigned long ip, unsigned int op)
92 {
93 static int offset;
94
95 offset = (op & 0x03fffffc);
96 /* make it signed */
97 if (offset & 0x02000000)
98 offset |= 0xfe000000;
99
100 return ip + (long)offset;
101 }
102
103 #ifdef CONFIG_PPC64
104 static int
__ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)105 __ftrace_make_nop(struct module *mod,
106 struct dyn_ftrace *rec, unsigned long addr)
107 {
108 unsigned int op;
109 unsigned int jmp[5];
110 unsigned long ptr;
111 unsigned long ip = rec->ip;
112 unsigned long tramp;
113 int offset;
114
115 /* read where this goes */
116 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
117 return -EFAULT;
118
119 /* Make sure that that this is still a 24bit jump */
120 if (!is_bl_op(op)) {
121 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
122 return -EINVAL;
123 }
124
125 /* lets find where the pointer goes */
126 tramp = find_bl_target(ip, op);
127
128 /*
129 * On PPC64 the trampoline looks like:
130 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
131 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
132 * Where the bytes 2,3,6 and 7 make up the 32bit offset
133 * to the TOC that holds the pointer.
134 * to jump to.
135 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
136 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
137 * The actually address is 32 bytes from the offset
138 * into the TOC.
139 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
140 */
141
142 pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
143
144 /* Find where the trampoline jumps to */
145 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
146 printk(KERN_ERR "Failed to read %lx\n", tramp);
147 return -EFAULT;
148 }
149
150 pr_devel(" %08x %08x", jmp[0], jmp[1]);
151
152 /* verify that this is what we expect it to be */
153 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
154 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
155 (jmp[2] != 0xf8410028) ||
156 (jmp[3] != 0xe96c0020) ||
157 (jmp[4] != 0xe84c0028)) {
158 printk(KERN_ERR "Not a trampoline\n");
159 return -EINVAL;
160 }
161
162 /* The bottom half is signed extended */
163 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
164 (int)((short)jmp[1]);
165
166 pr_devel(" %x ", offset);
167
168 /* get the address this jumps too */
169 tramp = mod->arch.toc + offset + 32;
170 pr_devel("toc: %lx", tramp);
171
172 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
173 printk(KERN_ERR "Failed to read %lx\n", tramp);
174 return -EFAULT;
175 }
176
177 pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
178
179 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
180
181 /* This should match what was called */
182 if (ptr != ppc_function_entry((void *)addr)) {
183 printk(KERN_ERR "addr does not match %lx\n", ptr);
184 return -EINVAL;
185 }
186
187 /*
188 * We want to nop the line, but the next line is
189 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
190 * This needs to be turned to a nop too.
191 */
192 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
193 return -EFAULT;
194
195 if (op != 0xe8410028) {
196 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
197 return -EINVAL;
198 }
199
200 /*
201 * Milton Miller pointed out that we can not blindly do nops.
202 * If a task was preempted when calling a trace function,
203 * the nops will remove the way to restore the TOC in r2
204 * and the r2 TOC will get corrupted.
205 */
206
207 /*
208 * Replace:
209 * bl <tramp> <==== will be replaced with "b 1f"
210 * ld r2,40(r1)
211 * 1:
212 */
213 op = 0x48000008; /* b +8 */
214
215 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
216 return -EPERM;
217
218
219 flush_icache_range(ip, ip + 8);
220
221 return 0;
222 }
223
224 #else /* !PPC64 */
225 static int
__ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)226 __ftrace_make_nop(struct module *mod,
227 struct dyn_ftrace *rec, unsigned long addr)
228 {
229 unsigned int op;
230 unsigned int jmp[4];
231 unsigned long ip = rec->ip;
232 unsigned long tramp;
233
234 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
235 return -EFAULT;
236
237 /* Make sure that that this is still a 24bit jump */
238 if (!is_bl_op(op)) {
239 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
240 return -EINVAL;
241 }
242
243 /* lets find where the pointer goes */
244 tramp = find_bl_target(ip, op);
245
246 /*
247 * On PPC32 the trampoline looks like:
248 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
249 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
250 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
251 * 0x4e, 0x80, 0x04, 0x20 bctr
252 */
253
254 pr_devel("ip:%lx jumps to %lx", ip, tramp);
255
256 /* Find where the trampoline jumps to */
257 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
258 printk(KERN_ERR "Failed to read %lx\n", tramp);
259 return -EFAULT;
260 }
261
262 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
263
264 /* verify that this is what we expect it to be */
265 if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
266 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
267 (jmp[2] != 0x7d8903a6) ||
268 (jmp[3] != 0x4e800420)) {
269 printk(KERN_ERR "Not a trampoline\n");
270 return -EINVAL;
271 }
272
273 tramp = (jmp[1] & 0xffff) |
274 ((jmp[0] & 0xffff) << 16);
275 if (tramp & 0x8000)
276 tramp -= 0x10000;
277
278 pr_devel(" %lx ", tramp);
279
280 if (tramp != addr) {
281 printk(KERN_ERR
282 "Trampoline location %08lx does not match addr\n",
283 tramp);
284 return -EINVAL;
285 }
286
287 op = PPC_INST_NOP;
288
289 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
290 return -EPERM;
291
292 flush_icache_range(ip, ip + 8);
293
294 return 0;
295 }
296 #endif /* PPC64 */
297 #endif /* CONFIG_MODULES */
298
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)299 int ftrace_make_nop(struct module *mod,
300 struct dyn_ftrace *rec, unsigned long addr)
301 {
302 unsigned long ip = rec->ip;
303 unsigned int old, new;
304
305 /*
306 * If the calling address is more that 24 bits away,
307 * then we had to use a trampoline to make the call.
308 * Otherwise just update the call site.
309 */
310 if (test_24bit_addr(ip, addr)) {
311 /* within range */
312 old = ftrace_call_replace(ip, addr, 1);
313 new = PPC_INST_NOP;
314 return ftrace_modify_code(ip, old, new);
315 }
316
317 #ifdef CONFIG_MODULES
318 /*
319 * Out of range jumps are called from modules.
320 * We should either already have a pointer to the module
321 * or it has been passed in.
322 */
323 if (!rec->arch.mod) {
324 if (!mod) {
325 printk(KERN_ERR "No module loaded addr=%lx\n",
326 addr);
327 return -EFAULT;
328 }
329 rec->arch.mod = mod;
330 } else if (mod) {
331 if (mod != rec->arch.mod) {
332 printk(KERN_ERR
333 "Record mod %p not equal to passed in mod %p\n",
334 rec->arch.mod, mod);
335 return -EINVAL;
336 }
337 /* nothing to do if mod == rec->arch.mod */
338 } else
339 mod = rec->arch.mod;
340
341 return __ftrace_make_nop(mod, rec, addr);
342 #else
343 /* We should not get here without modules */
344 return -EINVAL;
345 #endif /* CONFIG_MODULES */
346 }
347
348 #ifdef CONFIG_MODULES
349 #ifdef CONFIG_PPC64
350 static int
__ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)351 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
352 {
353 unsigned int op[2];
354 unsigned long ip = rec->ip;
355
356 /* read where this goes */
357 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
358 return -EFAULT;
359
360 /*
361 * It should be pointing to two nops or
362 * b +8; ld r2,40(r1)
363 */
364 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
365 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
366 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
367 return -EINVAL;
368 }
369
370 /* If we never set up a trampoline to ftrace_caller, then bail */
371 if (!rec->arch.mod->arch.tramp) {
372 printk(KERN_ERR "No ftrace trampoline\n");
373 return -EINVAL;
374 }
375
376 /* create the branch to the trampoline */
377 op[0] = create_branch((unsigned int *)ip,
378 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
379 if (!op[0]) {
380 printk(KERN_ERR "REL24 out of range!\n");
381 return -EINVAL;
382 }
383
384 /* ld r2,40(r1) */
385 op[1] = 0xe8410028;
386
387 pr_devel("write to %lx\n", rec->ip);
388
389 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
390 return -EPERM;
391
392 flush_icache_range(ip, ip + 8);
393
394 return 0;
395 }
396 #else
397 static int
__ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)398 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
399 {
400 unsigned int op;
401 unsigned long ip = rec->ip;
402
403 /* read where this goes */
404 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
405 return -EFAULT;
406
407 /* It should be pointing to a nop */
408 if (op != PPC_INST_NOP) {
409 printk(KERN_ERR "Expected NOP but have %x\n", op);
410 return -EINVAL;
411 }
412
413 /* If we never set up a trampoline to ftrace_caller, then bail */
414 if (!rec->arch.mod->arch.tramp) {
415 printk(KERN_ERR "No ftrace trampoline\n");
416 return -EINVAL;
417 }
418
419 /* create the branch to the trampoline */
420 op = create_branch((unsigned int *)ip,
421 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
422 if (!op) {
423 printk(KERN_ERR "REL24 out of range!\n");
424 return -EINVAL;
425 }
426
427 pr_devel("write to %lx\n", rec->ip);
428
429 if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
430 return -EPERM;
431
432 flush_icache_range(ip, ip + 8);
433
434 return 0;
435 }
436 #endif /* CONFIG_PPC64 */
437 #endif /* CONFIG_MODULES */
438
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)439 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
440 {
441 unsigned long ip = rec->ip;
442 unsigned int old, new;
443
444 /*
445 * If the calling address is more that 24 bits away,
446 * then we had to use a trampoline to make the call.
447 * Otherwise just update the call site.
448 */
449 if (test_24bit_addr(ip, addr)) {
450 /* within range */
451 old = PPC_INST_NOP;
452 new = ftrace_call_replace(ip, addr, 1);
453 return ftrace_modify_code(ip, old, new);
454 }
455
456 #ifdef CONFIG_MODULES
457 /*
458 * Out of range jumps are called from modules.
459 * Being that we are converting from nop, it had better
460 * already have a module defined.
461 */
462 if (!rec->arch.mod) {
463 printk(KERN_ERR "No module loaded\n");
464 return -EINVAL;
465 }
466
467 return __ftrace_make_call(rec, addr);
468 #else
469 /* We should not get here without modules */
470 return -EINVAL;
471 #endif /* CONFIG_MODULES */
472 }
473
ftrace_update_ftrace_func(ftrace_func_t func)474 int ftrace_update_ftrace_func(ftrace_func_t func)
475 {
476 unsigned long ip = (unsigned long)(&ftrace_call);
477 unsigned int old, new;
478 int ret;
479
480 old = *(unsigned int *)&ftrace_call;
481 new = ftrace_call_replace(ip, (unsigned long)func, 1);
482 ret = ftrace_modify_code(ip, old, new);
483
484 return ret;
485 }
486
ftrace_dyn_arch_init(void * data)487 int __init ftrace_dyn_arch_init(void *data)
488 {
489 /* caller expects data to be zero */
490 unsigned long *p = data;
491
492 *p = 0;
493
494 return 0;
495 }
496 #endif /* CONFIG_DYNAMIC_FTRACE */
497
498 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
499
500 #ifdef CONFIG_DYNAMIC_FTRACE
501 extern void ftrace_graph_call(void);
502 extern void ftrace_graph_stub(void);
503
ftrace_enable_ftrace_graph_caller(void)504 int ftrace_enable_ftrace_graph_caller(void)
505 {
506 unsigned long ip = (unsigned long)(&ftrace_graph_call);
507 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
508 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
509 unsigned int old, new;
510
511 old = ftrace_call_replace(ip, stub, 0);
512 new = ftrace_call_replace(ip, addr, 0);
513
514 return ftrace_modify_code(ip, old, new);
515 }
516
ftrace_disable_ftrace_graph_caller(void)517 int ftrace_disable_ftrace_graph_caller(void)
518 {
519 unsigned long ip = (unsigned long)(&ftrace_graph_call);
520 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
521 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
522 unsigned int old, new;
523
524 old = ftrace_call_replace(ip, addr, 0);
525 new = ftrace_call_replace(ip, stub, 0);
526
527 return ftrace_modify_code(ip, old, new);
528 }
529 #endif /* CONFIG_DYNAMIC_FTRACE */
530
531 #ifdef CONFIG_PPC64
532 extern void mod_return_to_handler(void);
533 #endif
534
535 /*
536 * Hook the return address and push it in the stack of return addrs
537 * in current thread info.
538 */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr)539 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
540 {
541 unsigned long old;
542 int faulted;
543 struct ftrace_graph_ent trace;
544 unsigned long return_hooker = (unsigned long)&return_to_handler;
545
546 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
547 return;
548
549 #ifdef CONFIG_PPC64
550 /* non core kernel code needs to save and restore the TOC */
551 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
552 return_hooker = (unsigned long)&mod_return_to_handler;
553 #endif
554
555 return_hooker = ppc_function_entry((void *)return_hooker);
556
557 /*
558 * Protect against fault, even if it shouldn't
559 * happen. This tool is too much intrusive to
560 * ignore such a protection.
561 */
562 asm volatile(
563 "1: " PPC_LL "%[old], 0(%[parent])\n"
564 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
565 " li %[faulted], 0\n"
566 "3:\n"
567
568 ".section .fixup, \"ax\"\n"
569 "4: li %[faulted], 1\n"
570 " b 3b\n"
571 ".previous\n"
572
573 ".section __ex_table,\"a\"\n"
574 PPC_LONG_ALIGN "\n"
575 PPC_LONG "1b,4b\n"
576 PPC_LONG "2b,4b\n"
577 ".previous"
578
579 : [old] "=&r" (old), [faulted] "=r" (faulted)
580 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
581 : "memory"
582 );
583
584 if (unlikely(faulted)) {
585 ftrace_graph_stop();
586 WARN_ON(1);
587 return;
588 }
589
590 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
591 *parent = old;
592 return;
593 }
594
595 trace.func = self_addr;
596
597 /* Only trace if the calling function expects to */
598 if (!ftrace_graph_entry(&trace)) {
599 current->curr_ret_stack--;
600 *parent = old;
601 }
602 }
603 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
604
605 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
arch_syscall_addr(int nr)606 unsigned long __init arch_syscall_addr(int nr)
607 {
608 return sys_call_table[nr*2];
609 }
610 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
611