1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Dynamic function tracing support.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/memory.h>
26 #include <linux/vmalloc.h>
27
28 #include <trace/syscall.h>
29
30 #include <asm/set_memory.h>
31 #include <asm/kprobes.h>
32 #include <asm/ftrace.h>
33 #include <asm/nops.h>
34 #include <asm/text-patching.h>
35
36 #ifdef CONFIG_DYNAMIC_FTRACE
37
38 static int ftrace_poke_late = 0;
39
ftrace_arch_code_modify_prepare(void)40 int ftrace_arch_code_modify_prepare(void)
41 __acquires(&text_mutex)
42 {
43 /*
44 * Need to grab text_mutex to prevent a race from module loading
45 * and live kernel patching from changing the text permissions while
46 * ftrace has it set to "read/write".
47 */
48 mutex_lock(&text_mutex);
49 ftrace_poke_late = 1;
50 return 0;
51 }
52
ftrace_arch_code_modify_post_process(void)53 int ftrace_arch_code_modify_post_process(void)
54 __releases(&text_mutex)
55 {
56 /*
57 * ftrace_make_{call,nop}() may be called during
58 * module load, and we need to finish the text_poke_queue()
59 * that they do, here.
60 */
61 text_poke_finish();
62 ftrace_poke_late = 0;
63 mutex_unlock(&text_mutex);
64 return 0;
65 }
66
ftrace_nop_replace(void)67 static const char *ftrace_nop_replace(void)
68 {
69 return x86_nops[5];
70 }
71
ftrace_call_replace(unsigned long ip,unsigned long addr)72 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
73 {
74 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
75 }
76
ftrace_verify_code(unsigned long ip,const char * old_code)77 static int ftrace_verify_code(unsigned long ip, const char *old_code)
78 {
79 char cur_code[MCOUNT_INSN_SIZE];
80
81 /*
82 * Note:
83 * We are paranoid about modifying text, as if a bug was to happen, it
84 * could cause us to read or write to someplace that could cause harm.
85 * Carefully read and modify the code with probe_kernel_*(), and make
86 * sure what we read is what we expected it to be before modifying it.
87 */
88 /* read the text we want to modify */
89 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
90 WARN_ON(1);
91 return -EFAULT;
92 }
93
94 /* Make sure it is what we expect it to be */
95 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
96 ftrace_expected = old_code;
97 WARN_ON(1);
98 return -EINVAL;
99 }
100
101 return 0;
102 }
103
104 /*
105 * Marked __ref because it calls text_poke_early() which is .init.text. That is
106 * ok because that call will happen early, during boot, when .init sections are
107 * still present.
108 */
109 static int __ref
ftrace_modify_code_direct(unsigned long ip,const char * old_code,const char * new_code)110 ftrace_modify_code_direct(unsigned long ip, const char *old_code,
111 const char *new_code)
112 {
113 int ret = ftrace_verify_code(ip, old_code);
114 if (ret)
115 return ret;
116
117 /* replace the text with the new text */
118 if (ftrace_poke_late)
119 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
120 else
121 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
122 return 0;
123 }
124
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)125 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
126 {
127 unsigned long ip = rec->ip;
128 const char *new, *old;
129
130 old = ftrace_call_replace(ip, addr);
131 new = ftrace_nop_replace();
132
133 /*
134 * On boot up, and when modules are loaded, the MCOUNT_ADDR
135 * is converted to a nop, and will never become MCOUNT_ADDR
136 * again. This code is either running before SMP (on boot up)
137 * or before the code will ever be executed (module load).
138 * We do not want to use the breakpoint version in this case,
139 * just modify the code directly.
140 */
141 if (addr == MCOUNT_ADDR)
142 return ftrace_modify_code_direct(ip, old, new);
143
144 /*
145 * x86 overrides ftrace_replace_code -- this function will never be used
146 * in this case.
147 */
148 WARN_ONCE(1, "invalid use of ftrace_make_nop");
149 return -EINVAL;
150 }
151
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)152 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
153 {
154 unsigned long ip = rec->ip;
155 const char *new, *old;
156
157 old = ftrace_nop_replace();
158 new = ftrace_call_replace(ip, addr);
159
160 /* Should only be called when module is loaded */
161 return ftrace_modify_code_direct(rec->ip, old, new);
162 }
163
164 /*
165 * Should never be called:
166 * As it is only called by __ftrace_replace_code() which is called by
167 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
168 * which is called to turn mcount into nops or nops into function calls
169 * but not to convert a function from not using regs to one that uses
170 * regs, which ftrace_modify_call() is for.
171 */
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)172 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
173 unsigned long addr)
174 {
175 WARN_ON(1);
176 return -EINVAL;
177 }
178
ftrace_update_ftrace_func(ftrace_func_t func)179 int ftrace_update_ftrace_func(ftrace_func_t func)
180 {
181 unsigned long ip;
182 const char *new;
183
184 ip = (unsigned long)(&ftrace_call);
185 new = ftrace_call_replace(ip, (unsigned long)func);
186 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
187
188 ip = (unsigned long)(&ftrace_regs_call);
189 new = ftrace_call_replace(ip, (unsigned long)func);
190 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
191
192 return 0;
193 }
194
ftrace_replace_code(int enable)195 void ftrace_replace_code(int enable)
196 {
197 struct ftrace_rec_iter *iter;
198 struct dyn_ftrace *rec;
199 const char *new, *old;
200 int ret;
201
202 for_ftrace_rec_iter(iter) {
203 rec = ftrace_rec_iter_record(iter);
204
205 switch (ftrace_test_record(rec, enable)) {
206 case FTRACE_UPDATE_IGNORE:
207 default:
208 continue;
209
210 case FTRACE_UPDATE_MAKE_CALL:
211 old = ftrace_nop_replace();
212 break;
213
214 case FTRACE_UPDATE_MODIFY_CALL:
215 case FTRACE_UPDATE_MAKE_NOP:
216 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
217 break;
218 }
219
220 ret = ftrace_verify_code(rec->ip, old);
221 if (ret) {
222 ftrace_expected = old;
223 ftrace_bug(ret, rec);
224 ftrace_expected = NULL;
225 return;
226 }
227 }
228
229 for_ftrace_rec_iter(iter) {
230 rec = ftrace_rec_iter_record(iter);
231
232 switch (ftrace_test_record(rec, enable)) {
233 case FTRACE_UPDATE_IGNORE:
234 default:
235 continue;
236
237 case FTRACE_UPDATE_MAKE_CALL:
238 case FTRACE_UPDATE_MODIFY_CALL:
239 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
240 break;
241
242 case FTRACE_UPDATE_MAKE_NOP:
243 new = ftrace_nop_replace();
244 break;
245 }
246
247 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
248 ftrace_update_record(rec, enable);
249 }
250 text_poke_finish();
251 }
252
arch_ftrace_update_code(int command)253 void arch_ftrace_update_code(int command)
254 {
255 ftrace_modify_all_code(command);
256 }
257
ftrace_dyn_arch_init(void)258 int __init ftrace_dyn_arch_init(void)
259 {
260 return 0;
261 }
262
263 /* Currently only x86_64 supports dynamic trampolines */
264 #ifdef CONFIG_X86_64
265
266 #ifdef CONFIG_MODULES
267 #include <linux/moduleloader.h>
268 /* Module allocation simplifies allocating memory for code */
alloc_tramp(unsigned long size)269 static inline void *alloc_tramp(unsigned long size)
270 {
271 return module_alloc(size);
272 }
tramp_free(void * tramp)273 static inline void tramp_free(void *tramp)
274 {
275 module_memfree(tramp);
276 }
277 #else
278 /* Trampolines can only be created if modules are supported */
alloc_tramp(unsigned long size)279 static inline void *alloc_tramp(unsigned long size)
280 {
281 return NULL;
282 }
tramp_free(void * tramp)283 static inline void tramp_free(void *tramp) { }
284 #endif
285
286 /* Defined as markers to the end of the ftrace default trampolines */
287 extern void ftrace_regs_caller_end(void);
288 extern void ftrace_regs_caller_ret(void);
289 extern void ftrace_caller_end(void);
290 extern void ftrace_caller_op_ptr(void);
291 extern void ftrace_regs_caller_op_ptr(void);
292 extern void ftrace_regs_caller_jmp(void);
293
294 /* movq function_trace_op(%rip), %rdx */
295 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
296 #define OP_REF_SIZE 7
297
298 /*
299 * The ftrace_ops is passed to the function callback. Since the
300 * trampoline only services a single ftrace_ops, we can pass in
301 * that ops directly.
302 *
303 * The ftrace_op_code_union is used to create a pointer to the
304 * ftrace_ops that will be passed to the callback function.
305 */
306 union ftrace_op_code_union {
307 char code[OP_REF_SIZE];
308 struct {
309 char op[3];
310 int offset;
311 } __attribute__((packed));
312 };
313
314 #define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
315
316 static unsigned long
create_trampoline(struct ftrace_ops * ops,unsigned int * tramp_size)317 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
318 {
319 unsigned long start_offset;
320 unsigned long end_offset;
321 unsigned long op_offset;
322 unsigned long call_offset;
323 unsigned long jmp_offset;
324 unsigned long offset;
325 unsigned long npages;
326 unsigned long size;
327 unsigned long *ptr;
328 void *trampoline;
329 void *ip;
330 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
331 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
332 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
333 union ftrace_op_code_union op_ptr;
334 int ret;
335
336 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
337 start_offset = (unsigned long)ftrace_regs_caller;
338 end_offset = (unsigned long)ftrace_regs_caller_end;
339 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
340 call_offset = (unsigned long)ftrace_regs_call;
341 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
342 } else {
343 start_offset = (unsigned long)ftrace_caller;
344 end_offset = (unsigned long)ftrace_caller_end;
345 op_offset = (unsigned long)ftrace_caller_op_ptr;
346 call_offset = (unsigned long)ftrace_call;
347 jmp_offset = 0;
348 }
349
350 size = end_offset - start_offset;
351
352 /*
353 * Allocate enough size to store the ftrace_caller code,
354 * the iret , as well as the address of the ftrace_ops this
355 * trampoline is used for.
356 */
357 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
358 if (!trampoline)
359 return 0;
360
361 *tramp_size = size + RET_SIZE + sizeof(void *);
362 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
363
364 /* Copy ftrace_caller onto the trampoline memory */
365 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
366 if (WARN_ON(ret < 0))
367 goto fail;
368
369 ip = trampoline + size;
370
371 /* The trampoline ends with ret(q) */
372 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
373 memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE);
374 else
375 memcpy(ip, retq, sizeof(retq));
376
377 /* No need to test direct calls on created trampolines */
378 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
379 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
380 ip = trampoline + (jmp_offset - start_offset);
381 if (WARN_ON(*(char *)ip != 0x75))
382 goto fail;
383 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
384 if (ret < 0)
385 goto fail;
386 }
387
388 /*
389 * The address of the ftrace_ops that is used for this trampoline
390 * is stored at the end of the trampoline. This will be used to
391 * load the third parameter for the callback. Basically, that
392 * location at the end of the trampoline takes the place of
393 * the global function_trace_op variable.
394 */
395
396 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
397 *ptr = (unsigned long)ops;
398
399 op_offset -= start_offset;
400 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
401
402 /* Are we pointing to the reference? */
403 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
404 goto fail;
405
406 /* Load the contents of ptr into the callback parameter */
407 offset = (unsigned long)ptr;
408 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
409
410 op_ptr.offset = offset;
411
412 /* put in the new offset to the ftrace_ops */
413 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
414
415 /* put in the call to the function */
416 mutex_lock(&text_mutex);
417 call_offset -= start_offset;
418 memcpy(trampoline + call_offset,
419 text_gen_insn(CALL_INSN_OPCODE,
420 trampoline + call_offset,
421 ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
422 mutex_unlock(&text_mutex);
423
424 /* ALLOC_TRAMP flags lets us know we created it */
425 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
426
427 set_vm_flush_reset_perms(trampoline);
428
429 if (likely(system_state != SYSTEM_BOOTING))
430 set_memory_ro((unsigned long)trampoline, npages);
431 set_memory_x((unsigned long)trampoline, npages);
432 return (unsigned long)trampoline;
433 fail:
434 tramp_free(trampoline);
435 return 0;
436 }
437
set_ftrace_ops_ro(void)438 void set_ftrace_ops_ro(void)
439 {
440 struct ftrace_ops *ops;
441 unsigned long start_offset;
442 unsigned long end_offset;
443 unsigned long npages;
444 unsigned long size;
445
446 do_for_each_ftrace_op(ops, ftrace_ops_list) {
447 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
448 continue;
449
450 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
451 start_offset = (unsigned long)ftrace_regs_caller;
452 end_offset = (unsigned long)ftrace_regs_caller_end;
453 } else {
454 start_offset = (unsigned long)ftrace_caller;
455 end_offset = (unsigned long)ftrace_caller_end;
456 }
457 size = end_offset - start_offset;
458 size = size + RET_SIZE + sizeof(void *);
459 npages = DIV_ROUND_UP(size, PAGE_SIZE);
460 set_memory_ro((unsigned long)ops->trampoline, npages);
461 } while_for_each_ftrace_op(ops);
462 }
463
calc_trampoline_call_offset(bool save_regs)464 static unsigned long calc_trampoline_call_offset(bool save_regs)
465 {
466 unsigned long start_offset;
467 unsigned long call_offset;
468
469 if (save_regs) {
470 start_offset = (unsigned long)ftrace_regs_caller;
471 call_offset = (unsigned long)ftrace_regs_call;
472 } else {
473 start_offset = (unsigned long)ftrace_caller;
474 call_offset = (unsigned long)ftrace_call;
475 }
476
477 return call_offset - start_offset;
478 }
479
arch_ftrace_update_trampoline(struct ftrace_ops * ops)480 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
481 {
482 ftrace_func_t func;
483 unsigned long offset;
484 unsigned long ip;
485 unsigned int size;
486 const char *new;
487
488 if (!ops->trampoline) {
489 ops->trampoline = create_trampoline(ops, &size);
490 if (!ops->trampoline)
491 return;
492 ops->trampoline_size = size;
493 return;
494 }
495
496 /*
497 * The ftrace_ops caller may set up its own trampoline.
498 * In such a case, this code must not modify it.
499 */
500 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
501 return;
502
503 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
504 ip = ops->trampoline + offset;
505 func = ftrace_ops_get_func(ops);
506
507 mutex_lock(&text_mutex);
508 /* Do a safe modify in case the trampoline is executing */
509 new = ftrace_call_replace(ip, (unsigned long)func);
510 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
511 mutex_unlock(&text_mutex);
512 }
513
514 /* Return the address of the function the trampoline calls */
addr_from_call(void * ptr)515 static void *addr_from_call(void *ptr)
516 {
517 union text_poke_insn call;
518 int ret;
519
520 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
521 if (WARN_ON_ONCE(ret < 0))
522 return NULL;
523
524 /* Make sure this is a call */
525 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
526 pr_warn("Expected E8, got %x\n", call.opcode);
527 return NULL;
528 }
529
530 return ptr + CALL_INSN_SIZE + call.disp;
531 }
532
533 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
534 unsigned long frame_pointer);
535
536 /*
537 * If the ops->trampoline was not allocated, then it probably
538 * has a static trampoline func, or is the ftrace caller itself.
539 */
static_tramp_func(struct ftrace_ops * ops,struct dyn_ftrace * rec)540 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
541 {
542 unsigned long offset;
543 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
544 void *ptr;
545
546 if (ops && ops->trampoline) {
547 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
548 /*
549 * We only know about function graph tracer setting as static
550 * trampoline.
551 */
552 if (ops->trampoline == FTRACE_GRAPH_ADDR)
553 return (void *)prepare_ftrace_return;
554 #endif
555 return NULL;
556 }
557
558 offset = calc_trampoline_call_offset(save_regs);
559
560 if (save_regs)
561 ptr = (void *)FTRACE_REGS_ADDR + offset;
562 else
563 ptr = (void *)FTRACE_ADDR + offset;
564
565 return addr_from_call(ptr);
566 }
567
arch_ftrace_trampoline_func(struct ftrace_ops * ops,struct dyn_ftrace * rec)568 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
569 {
570 unsigned long offset;
571
572 /* If we didn't allocate this trampoline, consider it static */
573 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
574 return static_tramp_func(ops, rec);
575
576 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
577 return addr_from_call((void *)ops->trampoline + offset);
578 }
579
arch_ftrace_trampoline_free(struct ftrace_ops * ops)580 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
581 {
582 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
583 return;
584
585 tramp_free((void *)ops->trampoline);
586 ops->trampoline = 0;
587 }
588
589 #endif /* CONFIG_X86_64 */
590 #endif /* CONFIG_DYNAMIC_FTRACE */
591
592 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
593
594 #ifdef CONFIG_DYNAMIC_FTRACE
595 extern void ftrace_graph_call(void);
596
ftrace_jmp_replace(unsigned long ip,unsigned long addr)597 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
598 {
599 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
600 }
601
ftrace_mod_jmp(unsigned long ip,void * func)602 static int ftrace_mod_jmp(unsigned long ip, void *func)
603 {
604 const char *new;
605
606 new = ftrace_jmp_replace(ip, (unsigned long)func);
607 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
608 return 0;
609 }
610
ftrace_enable_ftrace_graph_caller(void)611 int ftrace_enable_ftrace_graph_caller(void)
612 {
613 unsigned long ip = (unsigned long)(&ftrace_graph_call);
614
615 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
616 }
617
ftrace_disable_ftrace_graph_caller(void)618 int ftrace_disable_ftrace_graph_caller(void)
619 {
620 unsigned long ip = (unsigned long)(&ftrace_graph_call);
621
622 return ftrace_mod_jmp(ip, &ftrace_stub);
623 }
624
625 #endif /* !CONFIG_DYNAMIC_FTRACE */
626
627 /*
628 * Hook the return address and push it in the stack of return addrs
629 * in current thread info.
630 */
prepare_ftrace_return(unsigned long self_addr,unsigned long * parent,unsigned long frame_pointer)631 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
632 unsigned long frame_pointer)
633 {
634 unsigned long return_hooker = (unsigned long)&return_to_handler;
635 unsigned long old;
636 int faulted;
637
638 /*
639 * When resuming from suspend-to-ram, this function can be indirectly
640 * called from early CPU startup code while the CPU is in real mode,
641 * which would fail miserably. Make sure the stack pointer is a
642 * virtual address.
643 *
644 * This check isn't as accurate as virt_addr_valid(), but it should be
645 * good enough for this purpose, and it's fast.
646 */
647 if (unlikely((long)__builtin_frame_address(0) >= 0))
648 return;
649
650 if (unlikely(ftrace_graph_is_dead()))
651 return;
652
653 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
654 return;
655
656 /*
657 * Protect against fault, even if it shouldn't
658 * happen. This tool is too much intrusive to
659 * ignore such a protection.
660 */
661 asm volatile(
662 "1: " _ASM_MOV " (%[parent]), %[old]\n"
663 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
664 " movl $0, %[faulted]\n"
665 "3:\n"
666
667 ".section .fixup, \"ax\"\n"
668 "4: movl $1, %[faulted]\n"
669 " jmp 3b\n"
670 ".previous\n"
671
672 _ASM_EXTABLE(1b, 4b)
673 _ASM_EXTABLE(2b, 4b)
674
675 : [old] "=&r" (old), [faulted] "=r" (faulted)
676 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
677 : "memory"
678 );
679
680 if (unlikely(faulted)) {
681 ftrace_graph_stop();
682 WARN_ON(1);
683 return;
684 }
685
686 if (function_graph_enter(old, self_addr, frame_pointer, parent))
687 *parent = old;
688 }
689 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
690