1 /*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22
23 #include "trace_probe.h"
24
25 #define KPROBE_EVENT_SYSTEM "kprobes"
26
27 /**
28 * Kprobe event core functions
29 */
30 struct trace_kprobe {
31 struct list_head list;
32 struct kretprobe rp; /* Use rp.kp for kprobe use */
33 unsigned long nhit;
34 const char *symbol; /* symbol name */
35 struct trace_probe tp;
36 };
37
38 #define SIZEOF_TRACE_KPROBE(n) \
39 (offsetof(struct trace_kprobe, tp.args) + \
40 (sizeof(struct probe_arg) * (n)))
41
42
trace_kprobe_is_return(struct trace_kprobe * tk)43 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
44 {
45 return tk->rp.handler != NULL;
46 }
47
trace_kprobe_symbol(struct trace_kprobe * tk)48 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
49 {
50 return tk->symbol ? tk->symbol : "unknown";
51 }
52
trace_kprobe_offset(struct trace_kprobe * tk)53 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
54 {
55 return tk->rp.kp.offset;
56 }
57
trace_kprobe_has_gone(struct trace_kprobe * tk)58 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
59 {
60 return !!(kprobe_gone(&tk->rp.kp));
61 }
62
trace_kprobe_within_module(struct trace_kprobe * tk,struct module * mod)63 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
64 struct module *mod)
65 {
66 int len = strlen(mod->name);
67 const char *name = trace_kprobe_symbol(tk);
68 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
69 }
70
trace_kprobe_is_on_module(struct trace_kprobe * tk)71 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
72 {
73 return !!strchr(trace_kprobe_symbol(tk), ':');
74 }
75
76 static int register_kprobe_event(struct trace_kprobe *tk);
77 static int unregister_kprobe_event(struct trace_kprobe *tk);
78
79 static DEFINE_MUTEX(probe_lock);
80 static LIST_HEAD(probe_list);
81
82 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
83 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
84 struct pt_regs *regs);
85
86 /* Memory fetching by symbol */
87 struct symbol_cache {
88 char *symbol;
89 long offset;
90 unsigned long addr;
91 };
92
update_symbol_cache(struct symbol_cache * sc)93 unsigned long update_symbol_cache(struct symbol_cache *sc)
94 {
95 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
96
97 if (sc->addr)
98 sc->addr += sc->offset;
99
100 return sc->addr;
101 }
102
free_symbol_cache(struct symbol_cache * sc)103 void free_symbol_cache(struct symbol_cache *sc)
104 {
105 kfree(sc->symbol);
106 kfree(sc);
107 }
108
alloc_symbol_cache(const char * sym,long offset)109 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
110 {
111 struct symbol_cache *sc;
112
113 if (!sym || strlen(sym) == 0)
114 return NULL;
115
116 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
117 if (!sc)
118 return NULL;
119
120 sc->symbol = kstrdup(sym, GFP_KERNEL);
121 if (!sc->symbol) {
122 kfree(sc);
123 return NULL;
124 }
125 sc->offset = offset;
126 update_symbol_cache(sc);
127
128 return sc;
129 }
130
131 /*
132 * Kprobes-specific fetch functions
133 */
134 #define DEFINE_FETCH_stack(type) \
135 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
136 void *offset, void *dest) \
137 { \
138 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
139 (unsigned int)((unsigned long)offset)); \
140 } \
141 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
142
143 DEFINE_BASIC_FETCH_FUNCS(stack)
144 /* No string on the stack entry */
145 #define fetch_stack_string NULL
146 #define fetch_stack_string_size NULL
147
148 #define DEFINE_FETCH_memory(type) \
149 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
150 void *addr, void *dest) \
151 { \
152 type retval; \
153 if (probe_kernel_address(addr, retval)) \
154 *(type *)dest = 0; \
155 else \
156 *(type *)dest = retval; \
157 } \
158 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
159
DEFINE_BASIC_FETCH_FUNCS(memory)160 DEFINE_BASIC_FETCH_FUNCS(memory)
161 /*
162 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
163 * length and relative data location.
164 */
165 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
166 void *addr, void *dest)
167 {
168 long ret;
169 int maxlen = get_rloc_len(*(u32 *)dest);
170 u8 *dst = get_rloc_data(dest);
171 u8 *src = addr;
172 mm_segment_t old_fs = get_fs();
173
174 if (!maxlen)
175 return;
176
177 /*
178 * Try to get string again, since the string can be changed while
179 * probing.
180 */
181 set_fs(KERNEL_DS);
182 pagefault_disable();
183
184 do
185 ret = __copy_from_user_inatomic(dst++, src++, 1);
186 while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
187
188 dst[-1] = '\0';
189 pagefault_enable();
190 set_fs(old_fs);
191
192 if (ret < 0) { /* Failed to fetch string */
193 ((u8 *)get_rloc_data(dest))[0] = '\0';
194 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
195 } else {
196 *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
197 get_rloc_offs(*(u32 *)dest));
198 }
199 }
200 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
201
202 /* Return the length of string -- including null terminal byte */
FETCH_FUNC_NAME(memory,string_size)203 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
204 void *addr, void *dest)
205 {
206 mm_segment_t old_fs;
207 int ret, len = 0;
208 u8 c;
209
210 old_fs = get_fs();
211 set_fs(KERNEL_DS);
212 pagefault_disable();
213
214 do {
215 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
216 len++;
217 } while (c && ret == 0 && len < MAX_STRING_SIZE);
218
219 pagefault_enable();
220 set_fs(old_fs);
221
222 if (ret < 0) /* Failed to check the length */
223 *(u32 *)dest = 0;
224 else
225 *(u32 *)dest = len;
226 }
227 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
228
229 #define DEFINE_FETCH_symbol(type) \
230 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
231 { \
232 struct symbol_cache *sc = data; \
233 if (sc->addr) \
234 fetch_memory_##type(regs, (void *)sc->addr, dest); \
235 else \
236 *(type *)dest = 0; \
237 } \
238 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
239
240 DEFINE_BASIC_FETCH_FUNCS(symbol)
241 DEFINE_FETCH_symbol(string)
242 DEFINE_FETCH_symbol(string_size)
243
244 /* kprobes don't support file_offset fetch methods */
245 #define fetch_file_offset_u8 NULL
246 #define fetch_file_offset_u16 NULL
247 #define fetch_file_offset_u32 NULL
248 #define fetch_file_offset_u64 NULL
249 #define fetch_file_offset_string NULL
250 #define fetch_file_offset_string_size NULL
251
252 /* Fetch type information table */
253 const struct fetch_type kprobes_fetch_type_table[] = {
254 /* Special types */
255 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
256 sizeof(u32), 1, "__data_loc char[]"),
257 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
258 string_size, sizeof(u32), 0, "u32"),
259 /* Basic types */
260 ASSIGN_FETCH_TYPE(u8, u8, 0),
261 ASSIGN_FETCH_TYPE(u16, u16, 0),
262 ASSIGN_FETCH_TYPE(u32, u32, 0),
263 ASSIGN_FETCH_TYPE(u64, u64, 0),
264 ASSIGN_FETCH_TYPE(s8, u8, 1),
265 ASSIGN_FETCH_TYPE(s16, u16, 1),
266 ASSIGN_FETCH_TYPE(s32, u32, 1),
267 ASSIGN_FETCH_TYPE(s64, u64, 1),
268
269 ASSIGN_FETCH_TYPE_END
270 };
271
272 /*
273 * Allocate new trace_probe and initialize it (including kprobes).
274 */
alloc_trace_kprobe(const char * group,const char * event,void * addr,const char * symbol,unsigned long offs,int nargs,bool is_return)275 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
276 const char *event,
277 void *addr,
278 const char *symbol,
279 unsigned long offs,
280 int nargs, bool is_return)
281 {
282 struct trace_kprobe *tk;
283 int ret = -ENOMEM;
284
285 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
286 if (!tk)
287 return ERR_PTR(ret);
288
289 if (symbol) {
290 tk->symbol = kstrdup(symbol, GFP_KERNEL);
291 if (!tk->symbol)
292 goto error;
293 tk->rp.kp.symbol_name = tk->symbol;
294 tk->rp.kp.offset = offs;
295 } else
296 tk->rp.kp.addr = addr;
297
298 if (is_return)
299 tk->rp.handler = kretprobe_dispatcher;
300 else
301 tk->rp.kp.pre_handler = kprobe_dispatcher;
302
303 if (!event || !is_good_name(event)) {
304 ret = -EINVAL;
305 goto error;
306 }
307
308 tk->tp.call.class = &tk->tp.class;
309 tk->tp.call.name = kstrdup(event, GFP_KERNEL);
310 if (!tk->tp.call.name)
311 goto error;
312
313 if (!group || !is_good_name(group)) {
314 ret = -EINVAL;
315 goto error;
316 }
317
318 tk->tp.class.system = kstrdup(group, GFP_KERNEL);
319 if (!tk->tp.class.system)
320 goto error;
321
322 INIT_LIST_HEAD(&tk->list);
323 INIT_LIST_HEAD(&tk->tp.files);
324 return tk;
325 error:
326 kfree(tk->tp.call.name);
327 kfree(tk->symbol);
328 kfree(tk);
329 return ERR_PTR(ret);
330 }
331
free_trace_kprobe(struct trace_kprobe * tk)332 static void free_trace_kprobe(struct trace_kprobe *tk)
333 {
334 int i;
335
336 for (i = 0; i < tk->tp.nr_args; i++)
337 traceprobe_free_probe_arg(&tk->tp.args[i]);
338
339 kfree(tk->tp.call.class->system);
340 kfree(tk->tp.call.name);
341 kfree(tk->symbol);
342 kfree(tk);
343 }
344
find_trace_kprobe(const char * event,const char * group)345 static struct trace_kprobe *find_trace_kprobe(const char *event,
346 const char *group)
347 {
348 struct trace_kprobe *tk;
349
350 list_for_each_entry(tk, &probe_list, list)
351 if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
352 strcmp(tk->tp.call.class->system, group) == 0)
353 return tk;
354 return NULL;
355 }
356
357 /*
358 * Enable trace_probe
359 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
360 */
361 static int
enable_trace_kprobe(struct trace_kprobe * tk,struct ftrace_event_file * file)362 enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
363 {
364 int ret = 0;
365
366 if (file) {
367 struct event_file_link *link;
368
369 link = kmalloc(sizeof(*link), GFP_KERNEL);
370 if (!link) {
371 ret = -ENOMEM;
372 goto out;
373 }
374
375 link->file = file;
376 list_add_tail_rcu(&link->list, &tk->tp.files);
377
378 tk->tp.flags |= TP_FLAG_TRACE;
379 } else
380 tk->tp.flags |= TP_FLAG_PROFILE;
381
382 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
383 if (trace_kprobe_is_return(tk))
384 ret = enable_kretprobe(&tk->rp);
385 else
386 ret = enable_kprobe(&tk->rp.kp);
387 }
388 out:
389 return ret;
390 }
391
392 /*
393 * Disable trace_probe
394 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
395 */
396 static int
disable_trace_kprobe(struct trace_kprobe * tk,struct ftrace_event_file * file)397 disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
398 {
399 struct event_file_link *link = NULL;
400 int wait = 0;
401 int ret = 0;
402
403 if (file) {
404 link = find_event_file_link(&tk->tp, file);
405 if (!link) {
406 ret = -EINVAL;
407 goto out;
408 }
409
410 list_del_rcu(&link->list);
411 wait = 1;
412 if (!list_empty(&tk->tp.files))
413 goto out;
414
415 tk->tp.flags &= ~TP_FLAG_TRACE;
416 } else
417 tk->tp.flags &= ~TP_FLAG_PROFILE;
418
419 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
420 if (trace_kprobe_is_return(tk))
421 disable_kretprobe(&tk->rp);
422 else
423 disable_kprobe(&tk->rp.kp);
424 wait = 1;
425 }
426 out:
427 if (wait) {
428 /*
429 * Synchronize with kprobe_trace_func/kretprobe_trace_func
430 * to ensure disabled (all running handlers are finished).
431 * This is not only for kfree(), but also the caller,
432 * trace_remove_event_call() supposes it for releasing
433 * event_call related objects, which will be accessed in
434 * the kprobe_trace_func/kretprobe_trace_func.
435 */
436 synchronize_sched();
437 kfree(link); /* Ignored if link == NULL */
438 }
439
440 return ret;
441 }
442
443 /* Internal register function - just handle k*probes and flags */
__register_trace_kprobe(struct trace_kprobe * tk)444 static int __register_trace_kprobe(struct trace_kprobe *tk)
445 {
446 int i, ret;
447
448 if (trace_probe_is_registered(&tk->tp))
449 return -EINVAL;
450
451 for (i = 0; i < tk->tp.nr_args; i++)
452 traceprobe_update_arg(&tk->tp.args[i]);
453
454 /* Set/clear disabled flag according to tp->flag */
455 if (trace_probe_is_enabled(&tk->tp))
456 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
457 else
458 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
459
460 if (trace_kprobe_is_return(tk))
461 ret = register_kretprobe(&tk->rp);
462 else
463 ret = register_kprobe(&tk->rp.kp);
464
465 if (ret == 0)
466 tk->tp.flags |= TP_FLAG_REGISTERED;
467 else {
468 pr_warning("Could not insert probe at %s+%lu: %d\n",
469 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
470 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
471 pr_warning("This probe might be able to register after"
472 "target module is loaded. Continue.\n");
473 ret = 0;
474 } else if (ret == -EILSEQ) {
475 pr_warning("Probing address(0x%p) is not an "
476 "instruction boundary.\n",
477 tk->rp.kp.addr);
478 ret = -EINVAL;
479 }
480 }
481
482 return ret;
483 }
484
485 /* Internal unregister function - just handle k*probes and flags */
__unregister_trace_kprobe(struct trace_kprobe * tk)486 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
487 {
488 if (trace_probe_is_registered(&tk->tp)) {
489 if (trace_kprobe_is_return(tk))
490 unregister_kretprobe(&tk->rp);
491 else
492 unregister_kprobe(&tk->rp.kp);
493 tk->tp.flags &= ~TP_FLAG_REGISTERED;
494 /* Cleanup kprobe for reuse */
495 if (tk->rp.kp.symbol_name)
496 tk->rp.kp.addr = NULL;
497 }
498 }
499
500 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
unregister_trace_kprobe(struct trace_kprobe * tk)501 static int unregister_trace_kprobe(struct trace_kprobe *tk)
502 {
503 /* Enabled event can not be unregistered */
504 if (trace_probe_is_enabled(&tk->tp))
505 return -EBUSY;
506
507 /* Will fail if probe is being used by ftrace or perf */
508 if (unregister_kprobe_event(tk))
509 return -EBUSY;
510
511 __unregister_trace_kprobe(tk);
512 list_del(&tk->list);
513
514 return 0;
515 }
516
517 /* Register a trace_probe and probe_event */
register_trace_kprobe(struct trace_kprobe * tk)518 static int register_trace_kprobe(struct trace_kprobe *tk)
519 {
520 struct trace_kprobe *old_tk;
521 int ret;
522
523 mutex_lock(&probe_lock);
524
525 /* Delete old (same name) event if exist */
526 old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
527 tk->tp.call.class->system);
528 if (old_tk) {
529 ret = unregister_trace_kprobe(old_tk);
530 if (ret < 0)
531 goto end;
532 free_trace_kprobe(old_tk);
533 }
534
535 /* Register new event */
536 ret = register_kprobe_event(tk);
537 if (ret) {
538 pr_warning("Failed to register probe event(%d)\n", ret);
539 goto end;
540 }
541
542 /* Register k*probe */
543 ret = __register_trace_kprobe(tk);
544 if (ret < 0)
545 unregister_kprobe_event(tk);
546 else
547 list_add_tail(&tk->list, &probe_list);
548
549 end:
550 mutex_unlock(&probe_lock);
551 return ret;
552 }
553
554 /* Module notifier call back, checking event on the module */
trace_kprobe_module_callback(struct notifier_block * nb,unsigned long val,void * data)555 static int trace_kprobe_module_callback(struct notifier_block *nb,
556 unsigned long val, void *data)
557 {
558 struct module *mod = data;
559 struct trace_kprobe *tk;
560 int ret;
561
562 if (val != MODULE_STATE_COMING)
563 return NOTIFY_DONE;
564
565 /* Update probes on coming module */
566 mutex_lock(&probe_lock);
567 list_for_each_entry(tk, &probe_list, list) {
568 if (trace_kprobe_within_module(tk, mod)) {
569 /* Don't need to check busy - this should have gone. */
570 __unregister_trace_kprobe(tk);
571 ret = __register_trace_kprobe(tk);
572 if (ret)
573 pr_warning("Failed to re-register probe %s on"
574 "%s: %d\n",
575 ftrace_event_name(&tk->tp.call),
576 mod->name, ret);
577 }
578 }
579 mutex_unlock(&probe_lock);
580
581 return NOTIFY_DONE;
582 }
583
584 static struct notifier_block trace_kprobe_module_nb = {
585 .notifier_call = trace_kprobe_module_callback,
586 .priority = 1 /* Invoked after kprobe module callback */
587 };
588
create_trace_kprobe(int argc,char ** argv)589 static int create_trace_kprobe(int argc, char **argv)
590 {
591 /*
592 * Argument syntax:
593 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
594 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
595 * Fetch args:
596 * $retval : fetch return value
597 * $stack : fetch stack address
598 * $stackN : fetch Nth of stack (N:0-)
599 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
600 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
601 * %REG : fetch register REG
602 * Dereferencing memory fetch:
603 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
604 * Alias name of args:
605 * NAME=FETCHARG : set NAME as alias of FETCHARG.
606 * Type of args:
607 * FETCHARG:TYPE : use TYPE instead of unsigned long.
608 */
609 struct trace_kprobe *tk;
610 int i, ret = 0;
611 bool is_return = false, is_delete = false;
612 char *symbol = NULL, *event = NULL, *group = NULL;
613 char *arg;
614 unsigned long offset = 0;
615 void *addr = NULL;
616 char buf[MAX_EVENT_NAME_LEN];
617
618 /* argc must be >= 1 */
619 if (argv[0][0] == 'p')
620 is_return = false;
621 else if (argv[0][0] == 'r')
622 is_return = true;
623 else if (argv[0][0] == '-')
624 is_delete = true;
625 else {
626 pr_info("Probe definition must be started with 'p', 'r' or"
627 " '-'.\n");
628 return -EINVAL;
629 }
630
631 if (argv[0][1] == ':') {
632 event = &argv[0][2];
633 if (strchr(event, '/')) {
634 group = event;
635 event = strchr(group, '/') + 1;
636 event[-1] = '\0';
637 if (strlen(group) == 0) {
638 pr_info("Group name is not specified\n");
639 return -EINVAL;
640 }
641 }
642 if (strlen(event) == 0) {
643 pr_info("Event name is not specified\n");
644 return -EINVAL;
645 }
646 }
647 if (!group)
648 group = KPROBE_EVENT_SYSTEM;
649
650 if (is_delete) {
651 if (!event) {
652 pr_info("Delete command needs an event name.\n");
653 return -EINVAL;
654 }
655 mutex_lock(&probe_lock);
656 tk = find_trace_kprobe(event, group);
657 if (!tk) {
658 mutex_unlock(&probe_lock);
659 pr_info("Event %s/%s doesn't exist.\n", group, event);
660 return -ENOENT;
661 }
662 /* delete an event */
663 ret = unregister_trace_kprobe(tk);
664 if (ret == 0)
665 free_trace_kprobe(tk);
666 mutex_unlock(&probe_lock);
667 return ret;
668 }
669
670 if (argc < 2) {
671 pr_info("Probe point is not specified.\n");
672 return -EINVAL;
673 }
674
675 /* try to parse an address. if that fails, try to read the
676 * input as a symbol. */
677 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
678 /* a symbol specified */
679 symbol = argv[1];
680 /* TODO: support .init module functions */
681 ret = traceprobe_split_symbol_offset(symbol, &offset);
682 if (ret) {
683 pr_info("Failed to parse either an address or a symbol.\n");
684 return ret;
685 }
686 if (offset && is_return) {
687 pr_info("Return probe must be used without offset.\n");
688 return -EINVAL;
689 }
690 } else if (is_return) {
691 pr_info("Return probe point must be a symbol.\n");
692 return -EINVAL;
693 }
694 argc -= 2; argv += 2;
695
696 /* setup a probe */
697 if (!event) {
698 /* Make a new event name */
699 if (symbol)
700 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
701 is_return ? 'r' : 'p', symbol, offset);
702 else
703 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
704 is_return ? 'r' : 'p', addr);
705 event = buf;
706 }
707 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
708 is_return);
709 if (IS_ERR(tk)) {
710 pr_info("Failed to allocate trace_probe.(%d)\n",
711 (int)PTR_ERR(tk));
712 return PTR_ERR(tk);
713 }
714
715 /* parse arguments */
716 ret = 0;
717 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
718 struct probe_arg *parg = &tk->tp.args[i];
719
720 /* Increment count for freeing args in error case */
721 tk->tp.nr_args++;
722
723 /* Parse argument name */
724 arg = strchr(argv[i], '=');
725 if (arg) {
726 *arg++ = '\0';
727 parg->name = kstrdup(argv[i], GFP_KERNEL);
728 } else {
729 arg = argv[i];
730 /* If argument name is omitted, set "argN" */
731 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
732 parg->name = kstrdup(buf, GFP_KERNEL);
733 }
734
735 if (!parg->name) {
736 pr_info("Failed to allocate argument[%d] name.\n", i);
737 ret = -ENOMEM;
738 goto error;
739 }
740
741 if (!is_good_name(parg->name)) {
742 pr_info("Invalid argument[%d] name: %s\n",
743 i, parg->name);
744 ret = -EINVAL;
745 goto error;
746 }
747
748 if (traceprobe_conflict_field_name(parg->name,
749 tk->tp.args, i)) {
750 pr_info("Argument[%d] name '%s' conflicts with "
751 "another field.\n", i, argv[i]);
752 ret = -EINVAL;
753 goto error;
754 }
755
756 /* Parse fetch argument */
757 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
758 is_return, true);
759 if (ret) {
760 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
761 goto error;
762 }
763 }
764
765 ret = register_trace_kprobe(tk);
766 if (ret)
767 goto error;
768 return 0;
769
770 error:
771 free_trace_kprobe(tk);
772 return ret;
773 }
774
release_all_trace_kprobes(void)775 static int release_all_trace_kprobes(void)
776 {
777 struct trace_kprobe *tk;
778 int ret = 0;
779
780 mutex_lock(&probe_lock);
781 /* Ensure no probe is in use. */
782 list_for_each_entry(tk, &probe_list, list)
783 if (trace_probe_is_enabled(&tk->tp)) {
784 ret = -EBUSY;
785 goto end;
786 }
787 /* TODO: Use batch unregistration */
788 while (!list_empty(&probe_list)) {
789 tk = list_entry(probe_list.next, struct trace_kprobe, list);
790 ret = unregister_trace_kprobe(tk);
791 if (ret)
792 goto end;
793 free_trace_kprobe(tk);
794 }
795
796 end:
797 mutex_unlock(&probe_lock);
798
799 return ret;
800 }
801
802 /* Probes listing interfaces */
probes_seq_start(struct seq_file * m,loff_t * pos)803 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
804 {
805 mutex_lock(&probe_lock);
806 return seq_list_start(&probe_list, *pos);
807 }
808
probes_seq_next(struct seq_file * m,void * v,loff_t * pos)809 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
810 {
811 return seq_list_next(v, &probe_list, pos);
812 }
813
probes_seq_stop(struct seq_file * m,void * v)814 static void probes_seq_stop(struct seq_file *m, void *v)
815 {
816 mutex_unlock(&probe_lock);
817 }
818
probes_seq_show(struct seq_file * m,void * v)819 static int probes_seq_show(struct seq_file *m, void *v)
820 {
821 struct trace_kprobe *tk = v;
822 int i;
823
824 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
825 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
826 ftrace_event_name(&tk->tp.call));
827
828 if (!tk->symbol)
829 seq_printf(m, " 0x%p", tk->rp.kp.addr);
830 else if (tk->rp.kp.offset)
831 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
832 tk->rp.kp.offset);
833 else
834 seq_printf(m, " %s", trace_kprobe_symbol(tk));
835
836 for (i = 0; i < tk->tp.nr_args; i++)
837 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
838 seq_printf(m, "\n");
839
840 return 0;
841 }
842
843 static const struct seq_operations probes_seq_op = {
844 .start = probes_seq_start,
845 .next = probes_seq_next,
846 .stop = probes_seq_stop,
847 .show = probes_seq_show
848 };
849
probes_open(struct inode * inode,struct file * file)850 static int probes_open(struct inode *inode, struct file *file)
851 {
852 int ret;
853
854 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
855 ret = release_all_trace_kprobes();
856 if (ret < 0)
857 return ret;
858 }
859
860 return seq_open(file, &probes_seq_op);
861 }
862
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)863 static ssize_t probes_write(struct file *file, const char __user *buffer,
864 size_t count, loff_t *ppos)
865 {
866 return traceprobe_probes_write(file, buffer, count, ppos,
867 create_trace_kprobe);
868 }
869
870 static const struct file_operations kprobe_events_ops = {
871 .owner = THIS_MODULE,
872 .open = probes_open,
873 .read = seq_read,
874 .llseek = seq_lseek,
875 .release = seq_release,
876 .write = probes_write,
877 };
878
879 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)880 static int probes_profile_seq_show(struct seq_file *m, void *v)
881 {
882 struct trace_kprobe *tk = v;
883
884 seq_printf(m, " %-44s %15lu %15lu\n",
885 ftrace_event_name(&tk->tp.call), tk->nhit,
886 tk->rp.kp.nmissed);
887
888 return 0;
889 }
890
891 static const struct seq_operations profile_seq_op = {
892 .start = probes_seq_start,
893 .next = probes_seq_next,
894 .stop = probes_seq_stop,
895 .show = probes_profile_seq_show
896 };
897
profile_open(struct inode * inode,struct file * file)898 static int profile_open(struct inode *inode, struct file *file)
899 {
900 return seq_open(file, &profile_seq_op);
901 }
902
903 static const struct file_operations kprobe_profile_ops = {
904 .owner = THIS_MODULE,
905 .open = profile_open,
906 .read = seq_read,
907 .llseek = seq_lseek,
908 .release = seq_release,
909 };
910
911 /* Kprobe handler */
912 static nokprobe_inline void
__kprobe_trace_func(struct trace_kprobe * tk,struct pt_regs * regs,struct ftrace_event_file * ftrace_file)913 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
914 struct ftrace_event_file *ftrace_file)
915 {
916 struct kprobe_trace_entry_head *entry;
917 struct ring_buffer_event *event;
918 struct ring_buffer *buffer;
919 int size, dsize, pc;
920 unsigned long irq_flags;
921 struct ftrace_event_call *call = &tk->tp.call;
922
923 WARN_ON(call != ftrace_file->event_call);
924
925 if (ftrace_trigger_soft_disabled(ftrace_file))
926 return;
927
928 local_save_flags(irq_flags);
929 pc = preempt_count();
930
931 dsize = __get_data_size(&tk->tp, regs);
932 size = sizeof(*entry) + tk->tp.size + dsize;
933
934 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
935 call->event.type,
936 size, irq_flags, pc);
937 if (!event)
938 return;
939
940 entry = ring_buffer_event_data(event);
941 entry->ip = (unsigned long)tk->rp.kp.addr;
942 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
943
944 event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
945 entry, irq_flags, pc, regs);
946 }
947
948 static void
kprobe_trace_func(struct trace_kprobe * tk,struct pt_regs * regs)949 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
950 {
951 struct event_file_link *link;
952
953 list_for_each_entry_rcu(link, &tk->tp.files, list)
954 __kprobe_trace_func(tk, regs, link->file);
955 }
956 NOKPROBE_SYMBOL(kprobe_trace_func);
957
958 /* Kretprobe handler */
959 static nokprobe_inline void
__kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs,struct ftrace_event_file * ftrace_file)960 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
961 struct pt_regs *regs,
962 struct ftrace_event_file *ftrace_file)
963 {
964 struct kretprobe_trace_entry_head *entry;
965 struct ring_buffer_event *event;
966 struct ring_buffer *buffer;
967 int size, pc, dsize;
968 unsigned long irq_flags;
969 struct ftrace_event_call *call = &tk->tp.call;
970
971 WARN_ON(call != ftrace_file->event_call);
972
973 if (ftrace_trigger_soft_disabled(ftrace_file))
974 return;
975
976 local_save_flags(irq_flags);
977 pc = preempt_count();
978
979 dsize = __get_data_size(&tk->tp, regs);
980 size = sizeof(*entry) + tk->tp.size + dsize;
981
982 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
983 call->event.type,
984 size, irq_flags, pc);
985 if (!event)
986 return;
987
988 entry = ring_buffer_event_data(event);
989 entry->func = (unsigned long)tk->rp.kp.addr;
990 entry->ret_ip = (unsigned long)ri->ret_addr;
991 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
992
993 event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
994 entry, irq_flags, pc, regs);
995 }
996
997 static void
kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)998 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
999 struct pt_regs *regs)
1000 {
1001 struct event_file_link *link;
1002
1003 list_for_each_entry_rcu(link, &tk->tp.files, list)
1004 __kretprobe_trace_func(tk, ri, regs, link->file);
1005 }
1006 NOKPROBE_SYMBOL(kretprobe_trace_func);
1007
1008 /* Event entry printers */
1009 static enum print_line_t
print_kprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1010 print_kprobe_event(struct trace_iterator *iter, int flags,
1011 struct trace_event *event)
1012 {
1013 struct kprobe_trace_entry_head *field;
1014 struct trace_seq *s = &iter->seq;
1015 struct trace_probe *tp;
1016 u8 *data;
1017 int i;
1018
1019 field = (struct kprobe_trace_entry_head *)iter->ent;
1020 tp = container_of(event, struct trace_probe, call.event);
1021
1022 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1023 goto partial;
1024
1025 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1026 goto partial;
1027
1028 if (!trace_seq_puts(s, ")"))
1029 goto partial;
1030
1031 data = (u8 *)&field[1];
1032 for (i = 0; i < tp->nr_args; i++)
1033 if (!tp->args[i].type->print(s, tp->args[i].name,
1034 data + tp->args[i].offset, field))
1035 goto partial;
1036
1037 if (!trace_seq_puts(s, "\n"))
1038 goto partial;
1039
1040 return TRACE_TYPE_HANDLED;
1041 partial:
1042 return TRACE_TYPE_PARTIAL_LINE;
1043 }
1044
1045 static enum print_line_t
print_kretprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1046 print_kretprobe_event(struct trace_iterator *iter, int flags,
1047 struct trace_event *event)
1048 {
1049 struct kretprobe_trace_entry_head *field;
1050 struct trace_seq *s = &iter->seq;
1051 struct trace_probe *tp;
1052 u8 *data;
1053 int i;
1054
1055 field = (struct kretprobe_trace_entry_head *)iter->ent;
1056 tp = container_of(event, struct trace_probe, call.event);
1057
1058 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1059 goto partial;
1060
1061 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1062 goto partial;
1063
1064 if (!trace_seq_puts(s, " <- "))
1065 goto partial;
1066
1067 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1068 goto partial;
1069
1070 if (!trace_seq_puts(s, ")"))
1071 goto partial;
1072
1073 data = (u8 *)&field[1];
1074 for (i = 0; i < tp->nr_args; i++)
1075 if (!tp->args[i].type->print(s, tp->args[i].name,
1076 data + tp->args[i].offset, field))
1077 goto partial;
1078
1079 if (!trace_seq_puts(s, "\n"))
1080 goto partial;
1081
1082 return TRACE_TYPE_HANDLED;
1083 partial:
1084 return TRACE_TYPE_PARTIAL_LINE;
1085 }
1086
1087
kprobe_event_define_fields(struct ftrace_event_call * event_call)1088 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1089 {
1090 int ret, i;
1091 struct kprobe_trace_entry_head field;
1092 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1093
1094 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1095 /* Set argument names as fields */
1096 for (i = 0; i < tk->tp.nr_args; i++) {
1097 struct probe_arg *parg = &tk->tp.args[i];
1098
1099 ret = trace_define_field(event_call, parg->type->fmttype,
1100 parg->name,
1101 sizeof(field) + parg->offset,
1102 parg->type->size,
1103 parg->type->is_signed,
1104 FILTER_OTHER);
1105 if (ret)
1106 return ret;
1107 }
1108 return 0;
1109 }
1110
kretprobe_event_define_fields(struct ftrace_event_call * event_call)1111 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1112 {
1113 int ret, i;
1114 struct kretprobe_trace_entry_head field;
1115 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1116
1117 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1118 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1119 /* Set argument names as fields */
1120 for (i = 0; i < tk->tp.nr_args; i++) {
1121 struct probe_arg *parg = &tk->tp.args[i];
1122
1123 ret = trace_define_field(event_call, parg->type->fmttype,
1124 parg->name,
1125 sizeof(field) + parg->offset,
1126 parg->type->size,
1127 parg->type->is_signed,
1128 FILTER_OTHER);
1129 if (ret)
1130 return ret;
1131 }
1132 return 0;
1133 }
1134
1135 #ifdef CONFIG_PERF_EVENTS
1136
1137 /* Kprobe profile handler */
1138 static void
kprobe_perf_func(struct trace_kprobe * tk,struct pt_regs * regs)1139 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1140 {
1141 struct ftrace_event_call *call = &tk->tp.call;
1142 struct kprobe_trace_entry_head *entry;
1143 struct hlist_head *head;
1144 int size, __size, dsize;
1145 int rctx;
1146
1147 head = this_cpu_ptr(call->perf_events);
1148 if (hlist_empty(head))
1149 return;
1150
1151 dsize = __get_data_size(&tk->tp, regs);
1152 __size = sizeof(*entry) + tk->tp.size + dsize;
1153 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1154 size -= sizeof(u32);
1155
1156 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1157 if (!entry)
1158 return;
1159
1160 entry->ip = (unsigned long)tk->rp.kp.addr;
1161 memset(&entry[1], 0, dsize);
1162 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1163 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1164 }
1165 NOKPROBE_SYMBOL(kprobe_perf_func);
1166
1167 /* Kretprobe profile handler */
1168 static void
kretprobe_perf_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1169 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1170 struct pt_regs *regs)
1171 {
1172 struct ftrace_event_call *call = &tk->tp.call;
1173 struct kretprobe_trace_entry_head *entry;
1174 struct hlist_head *head;
1175 int size, __size, dsize;
1176 int rctx;
1177
1178 head = this_cpu_ptr(call->perf_events);
1179 if (hlist_empty(head))
1180 return;
1181
1182 dsize = __get_data_size(&tk->tp, regs);
1183 __size = sizeof(*entry) + tk->tp.size + dsize;
1184 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1185 size -= sizeof(u32);
1186
1187 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1188 if (!entry)
1189 return;
1190
1191 entry->func = (unsigned long)tk->rp.kp.addr;
1192 entry->ret_ip = (unsigned long)ri->ret_addr;
1193 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1194 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1195 }
1196 NOKPROBE_SYMBOL(kretprobe_perf_func);
1197 #endif /* CONFIG_PERF_EVENTS */
1198
1199 /*
1200 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1201 *
1202 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1203 * lockless, but we can't race with this __init function.
1204 */
kprobe_register(struct ftrace_event_call * event,enum trace_reg type,void * data)1205 static int kprobe_register(struct ftrace_event_call *event,
1206 enum trace_reg type, void *data)
1207 {
1208 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1209 struct ftrace_event_file *file = data;
1210
1211 switch (type) {
1212 case TRACE_REG_REGISTER:
1213 return enable_trace_kprobe(tk, file);
1214 case TRACE_REG_UNREGISTER:
1215 return disable_trace_kprobe(tk, file);
1216
1217 #ifdef CONFIG_PERF_EVENTS
1218 case TRACE_REG_PERF_REGISTER:
1219 return enable_trace_kprobe(tk, NULL);
1220 case TRACE_REG_PERF_UNREGISTER:
1221 return disable_trace_kprobe(tk, NULL);
1222 case TRACE_REG_PERF_OPEN:
1223 case TRACE_REG_PERF_CLOSE:
1224 case TRACE_REG_PERF_ADD:
1225 case TRACE_REG_PERF_DEL:
1226 return 0;
1227 #endif
1228 }
1229 return 0;
1230 }
1231
kprobe_dispatcher(struct kprobe * kp,struct pt_regs * regs)1232 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1233 {
1234 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1235
1236 tk->nhit++;
1237
1238 if (tk->tp.flags & TP_FLAG_TRACE)
1239 kprobe_trace_func(tk, regs);
1240 #ifdef CONFIG_PERF_EVENTS
1241 if (tk->tp.flags & TP_FLAG_PROFILE)
1242 kprobe_perf_func(tk, regs);
1243 #endif
1244 return 0; /* We don't tweek kernel, so just return 0 */
1245 }
1246 NOKPROBE_SYMBOL(kprobe_dispatcher);
1247
1248 static int
kretprobe_dispatcher(struct kretprobe_instance * ri,struct pt_regs * regs)1249 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1250 {
1251 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1252
1253 tk->nhit++;
1254
1255 if (tk->tp.flags & TP_FLAG_TRACE)
1256 kretprobe_trace_func(tk, ri, regs);
1257 #ifdef CONFIG_PERF_EVENTS
1258 if (tk->tp.flags & TP_FLAG_PROFILE)
1259 kretprobe_perf_func(tk, ri, regs);
1260 #endif
1261 return 0; /* We don't tweek kernel, so just return 0 */
1262 }
1263 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1264
1265 static struct trace_event_functions kretprobe_funcs = {
1266 .trace = print_kretprobe_event
1267 };
1268
1269 static struct trace_event_functions kprobe_funcs = {
1270 .trace = print_kprobe_event
1271 };
1272
register_kprobe_event(struct trace_kprobe * tk)1273 static int register_kprobe_event(struct trace_kprobe *tk)
1274 {
1275 struct ftrace_event_call *call = &tk->tp.call;
1276 int ret;
1277
1278 /* Initialize ftrace_event_call */
1279 INIT_LIST_HEAD(&call->class->fields);
1280 if (trace_kprobe_is_return(tk)) {
1281 call->event.funcs = &kretprobe_funcs;
1282 call->class->define_fields = kretprobe_event_define_fields;
1283 } else {
1284 call->event.funcs = &kprobe_funcs;
1285 call->class->define_fields = kprobe_event_define_fields;
1286 }
1287 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1288 return -ENOMEM;
1289 ret = register_ftrace_event(&call->event);
1290 if (!ret) {
1291 kfree(call->print_fmt);
1292 return -ENODEV;
1293 }
1294 call->flags = 0;
1295 call->class->reg = kprobe_register;
1296 call->data = tk;
1297 ret = trace_add_event_call(call);
1298 if (ret) {
1299 pr_info("Failed to register kprobe event: %s\n",
1300 ftrace_event_name(call));
1301 kfree(call->print_fmt);
1302 unregister_ftrace_event(&call->event);
1303 }
1304 return ret;
1305 }
1306
unregister_kprobe_event(struct trace_kprobe * tk)1307 static int unregister_kprobe_event(struct trace_kprobe *tk)
1308 {
1309 int ret;
1310
1311 /* tp->event is unregistered in trace_remove_event_call() */
1312 ret = trace_remove_event_call(&tk->tp.call);
1313 if (!ret)
1314 kfree(tk->tp.call.print_fmt);
1315 return ret;
1316 }
1317
1318 /* Make a tracefs interface for controlling probe points */
init_kprobe_trace(void)1319 static __init int init_kprobe_trace(void)
1320 {
1321 struct dentry *d_tracer;
1322 struct dentry *entry;
1323
1324 if (register_module_notifier(&trace_kprobe_module_nb))
1325 return -EINVAL;
1326
1327 d_tracer = tracing_init_dentry();
1328 if (IS_ERR(d_tracer))
1329 return 0;
1330
1331 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1332 NULL, &kprobe_events_ops);
1333
1334 /* Event list interface */
1335 if (!entry)
1336 pr_warning("Could not create tracefs "
1337 "'kprobe_events' entry\n");
1338
1339 /* Profile interface */
1340 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1341 NULL, &kprobe_profile_ops);
1342
1343 if (!entry)
1344 pr_warning("Could not create tracefs "
1345 "'kprobe_profile' entry\n");
1346 return 0;
1347 }
1348 fs_initcall(init_kprobe_trace);
1349
1350
1351 #ifdef CONFIG_FTRACE_STARTUP_TEST
1352
1353 /*
1354 * The "__used" keeps gcc from removing the function symbol
1355 * from the kallsyms table.
1356 */
kprobe_trace_selftest_target(int a1,int a2,int a3,int a4,int a5,int a6)1357 static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1358 int a4, int a5, int a6)
1359 {
1360 return a1 + a2 + a3 + a4 + a5 + a6;
1361 }
1362
1363 static struct ftrace_event_file *
find_trace_probe_file(struct trace_kprobe * tk,struct trace_array * tr)1364 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1365 {
1366 struct ftrace_event_file *file;
1367
1368 list_for_each_entry(file, &tr->events, list)
1369 if (file->event_call == &tk->tp.call)
1370 return file;
1371
1372 return NULL;
1373 }
1374
1375 /*
1376 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1377 * stage, we can do this lockless.
1378 */
kprobe_trace_self_tests_init(void)1379 static __init int kprobe_trace_self_tests_init(void)
1380 {
1381 int ret, warn = 0;
1382 int (*target)(int, int, int, int, int, int);
1383 struct trace_kprobe *tk;
1384 struct ftrace_event_file *file;
1385
1386 if (tracing_is_disabled())
1387 return -ENODEV;
1388
1389 target = kprobe_trace_selftest_target;
1390
1391 pr_info("Testing kprobe tracing: ");
1392
1393 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1394 "$stack $stack0 +0($stack)",
1395 create_trace_kprobe);
1396 if (WARN_ON_ONCE(ret)) {
1397 pr_warn("error on probing function entry.\n");
1398 warn++;
1399 } else {
1400 /* Enable trace point */
1401 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1402 if (WARN_ON_ONCE(tk == NULL)) {
1403 pr_warn("error on getting new probe.\n");
1404 warn++;
1405 } else {
1406 file = find_trace_probe_file(tk, top_trace_array());
1407 if (WARN_ON_ONCE(file == NULL)) {
1408 pr_warn("error on getting probe file.\n");
1409 warn++;
1410 } else
1411 enable_trace_kprobe(tk, file);
1412 }
1413 }
1414
1415 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1416 "$retval", create_trace_kprobe);
1417 if (WARN_ON_ONCE(ret)) {
1418 pr_warn("error on probing function return.\n");
1419 warn++;
1420 } else {
1421 /* Enable trace point */
1422 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1423 if (WARN_ON_ONCE(tk == NULL)) {
1424 pr_warn("error on getting 2nd new probe.\n");
1425 warn++;
1426 } else {
1427 file = find_trace_probe_file(tk, top_trace_array());
1428 if (WARN_ON_ONCE(file == NULL)) {
1429 pr_warn("error on getting probe file.\n");
1430 warn++;
1431 } else
1432 enable_trace_kprobe(tk, file);
1433 }
1434 }
1435
1436 if (warn)
1437 goto end;
1438
1439 ret = target(1, 2, 3, 4, 5, 6);
1440
1441 /* Disable trace points before removing it */
1442 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1443 if (WARN_ON_ONCE(tk == NULL)) {
1444 pr_warn("error on getting test probe.\n");
1445 warn++;
1446 } else {
1447 file = find_trace_probe_file(tk, top_trace_array());
1448 if (WARN_ON_ONCE(file == NULL)) {
1449 pr_warn("error on getting probe file.\n");
1450 warn++;
1451 } else
1452 disable_trace_kprobe(tk, file);
1453 }
1454
1455 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1456 if (WARN_ON_ONCE(tk == NULL)) {
1457 pr_warn("error on getting 2nd test probe.\n");
1458 warn++;
1459 } else {
1460 file = find_trace_probe_file(tk, top_trace_array());
1461 if (WARN_ON_ONCE(file == NULL)) {
1462 pr_warn("error on getting probe file.\n");
1463 warn++;
1464 } else
1465 disable_trace_kprobe(tk, file);
1466 }
1467
1468 ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1469 if (WARN_ON_ONCE(ret)) {
1470 pr_warn("error on deleting a probe.\n");
1471 warn++;
1472 }
1473
1474 ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1475 if (WARN_ON_ONCE(ret)) {
1476 pr_warn("error on deleting a probe.\n");
1477 warn++;
1478 }
1479
1480 end:
1481 release_all_trace_kprobes();
1482 /*
1483 * Wait for the optimizer work to finish. Otherwise it might fiddle
1484 * with probes in already freed __init text.
1485 */
1486 wait_for_kprobe_optimizer();
1487 if (warn)
1488 pr_cont("NG: Some tests are failed. Please check them.\n");
1489 else
1490 pr_cont("OK\n");
1491 return 0;
1492 }
1493
1494 late_initcall(kprobe_trace_self_tests_init);
1495
1496 #endif
1497