1 /*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define pr_fmt(fmt) "trace_kprobe: " fmt
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/rculist.h>
24
25 #include "trace_probe.h"
26
27 #define KPROBE_EVENT_SYSTEM "kprobes"
28 #define KRETPROBE_MAXACTIVE_MAX 4096
29
30 /**
31 * Kprobe event core functions
32 */
33 struct trace_kprobe {
34 struct list_head list;
35 struct kretprobe rp; /* Use rp.kp for kprobe use */
36 unsigned long __percpu *nhit;
37 const char *symbol; /* symbol name */
38 struct trace_probe tp;
39 };
40
41 #define SIZEOF_TRACE_KPROBE(n) \
42 (offsetof(struct trace_kprobe, tp.args) + \
43 (sizeof(struct probe_arg) * (n)))
44
45
trace_kprobe_is_return(struct trace_kprobe * tk)46 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
47 {
48 return tk->rp.handler != NULL;
49 }
50
trace_kprobe_symbol(struct trace_kprobe * tk)51 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
52 {
53 return tk->symbol ? tk->symbol : "unknown";
54 }
55
trace_kprobe_offset(struct trace_kprobe * tk)56 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
57 {
58 return tk->rp.kp.offset;
59 }
60
trace_kprobe_has_gone(struct trace_kprobe * tk)61 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
62 {
63 return !!(kprobe_gone(&tk->rp.kp));
64 }
65
trace_kprobe_within_module(struct trace_kprobe * tk,struct module * mod)66 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
67 struct module *mod)
68 {
69 int len = strlen(mod->name);
70 const char *name = trace_kprobe_symbol(tk);
71 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
72 }
73
trace_kprobe_is_on_module(struct trace_kprobe * tk)74 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
75 {
76 return !!strchr(trace_kprobe_symbol(tk), ':');
77 }
78
trace_kprobe_nhit(struct trace_kprobe * tk)79 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
80 {
81 unsigned long nhit = 0;
82 int cpu;
83
84 for_each_possible_cpu(cpu)
85 nhit += *per_cpu_ptr(tk->nhit, cpu);
86
87 return nhit;
88 }
89
90 static int register_kprobe_event(struct trace_kprobe *tk);
91 static int unregister_kprobe_event(struct trace_kprobe *tk);
92
93 static DEFINE_MUTEX(probe_lock);
94 static LIST_HEAD(probe_list);
95
96 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
97 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
98 struct pt_regs *regs);
99
100 /* Memory fetching by symbol */
101 struct symbol_cache {
102 char *symbol;
103 long offset;
104 unsigned long addr;
105 };
106
update_symbol_cache(struct symbol_cache * sc)107 unsigned long update_symbol_cache(struct symbol_cache *sc)
108 {
109 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
110
111 if (sc->addr)
112 sc->addr += sc->offset;
113
114 return sc->addr;
115 }
116
free_symbol_cache(struct symbol_cache * sc)117 void free_symbol_cache(struct symbol_cache *sc)
118 {
119 kfree(sc->symbol);
120 kfree(sc);
121 }
122
alloc_symbol_cache(const char * sym,long offset)123 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
124 {
125 struct symbol_cache *sc;
126
127 if (!sym || strlen(sym) == 0)
128 return NULL;
129
130 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
131 if (!sc)
132 return NULL;
133
134 sc->symbol = kstrdup(sym, GFP_KERNEL);
135 if (!sc->symbol) {
136 kfree(sc);
137 return NULL;
138 }
139 sc->offset = offset;
140 update_symbol_cache(sc);
141
142 return sc;
143 }
144
145 /*
146 * Kprobes-specific fetch functions
147 */
148 #define DEFINE_FETCH_stack(type) \
149 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
150 void *offset, void *dest) \
151 { \
152 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
153 (unsigned int)((unsigned long)offset)); \
154 } \
155 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
156
157 DEFINE_BASIC_FETCH_FUNCS(stack)
158 /* No string on the stack entry */
159 #define fetch_stack_string NULL
160 #define fetch_stack_string_size NULL
161
162 #define DEFINE_FETCH_memory(type) \
163 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
164 void *addr, void *dest) \
165 { \
166 type retval; \
167 if (probe_kernel_address(addr, retval)) \
168 *(type *)dest = 0; \
169 else \
170 *(type *)dest = retval; \
171 } \
172 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
173
DEFINE_BASIC_FETCH_FUNCS(memory)174 DEFINE_BASIC_FETCH_FUNCS(memory)
175 /*
176 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
177 * length and relative data location.
178 */
179 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
180 void *addr, void *dest)
181 {
182 int maxlen = get_rloc_len(*(u32 *)dest);
183 u8 *dst = get_rloc_data(dest);
184 long ret;
185
186 if (!maxlen)
187 return;
188
189 /*
190 * Try to get string again, since the string can be changed while
191 * probing.
192 */
193 ret = strncpy_from_unsafe(dst, addr, maxlen);
194
195 if (ret < 0) { /* Failed to fetch string */
196 dst[0] = '\0';
197 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
198 } else {
199 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
200 }
201 }
202 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
203
204 /* Return the length of string -- including null terminal byte */
FETCH_FUNC_NAME(memory,string_size)205 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
206 void *addr, void *dest)
207 {
208 mm_segment_t old_fs;
209 int ret, len = 0;
210 u8 c;
211
212 old_fs = get_fs();
213 set_fs(KERNEL_DS);
214 pagefault_disable();
215
216 do {
217 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
218 len++;
219 } while (c && ret == 0 && len < MAX_STRING_SIZE);
220
221 pagefault_enable();
222 set_fs(old_fs);
223
224 if (ret < 0) /* Failed to check the length */
225 *(u32 *)dest = 0;
226 else
227 *(u32 *)dest = len;
228 }
229 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
230
231 #define DEFINE_FETCH_symbol(type) \
232 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
233 { \
234 struct symbol_cache *sc = data; \
235 if (sc->addr) \
236 fetch_memory_##type(regs, (void *)sc->addr, dest); \
237 else \
238 *(type *)dest = 0; \
239 } \
240 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
241
242 DEFINE_BASIC_FETCH_FUNCS(symbol)
243 DEFINE_FETCH_symbol(string)
244 DEFINE_FETCH_symbol(string_size)
245
246 /* kprobes don't support file_offset fetch methods */
247 #define fetch_file_offset_u8 NULL
248 #define fetch_file_offset_u16 NULL
249 #define fetch_file_offset_u32 NULL
250 #define fetch_file_offset_u64 NULL
251 #define fetch_file_offset_string NULL
252 #define fetch_file_offset_string_size NULL
253
254 /* Fetch type information table */
255 static const struct fetch_type kprobes_fetch_type_table[] = {
256 /* Special types */
257 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
258 sizeof(u32), 1, "__data_loc char[]"),
259 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
260 string_size, sizeof(u32), 0, "u32"),
261 /* Basic types */
262 ASSIGN_FETCH_TYPE(u8, u8, 0),
263 ASSIGN_FETCH_TYPE(u16, u16, 0),
264 ASSIGN_FETCH_TYPE(u32, u32, 0),
265 ASSIGN_FETCH_TYPE(u64, u64, 0),
266 ASSIGN_FETCH_TYPE(s8, u8, 1),
267 ASSIGN_FETCH_TYPE(s16, u16, 1),
268 ASSIGN_FETCH_TYPE(s32, u32, 1),
269 ASSIGN_FETCH_TYPE(s64, u64, 1),
270 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
271 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
272 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
273 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
274
275 ASSIGN_FETCH_TYPE_END
276 };
277
278 /*
279 * Allocate new trace_probe and initialize it (including kprobes).
280 */
alloc_trace_kprobe(const char * group,const char * event,void * addr,const char * symbol,unsigned long offs,int maxactive,int nargs,bool is_return)281 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
282 const char *event,
283 void *addr,
284 const char *symbol,
285 unsigned long offs,
286 int maxactive,
287 int nargs, bool is_return)
288 {
289 struct trace_kprobe *tk;
290 int ret = -ENOMEM;
291
292 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
293 if (!tk)
294 return ERR_PTR(ret);
295
296 tk->nhit = alloc_percpu(unsigned long);
297 if (!tk->nhit)
298 goto error;
299
300 if (symbol) {
301 tk->symbol = kstrdup(symbol, GFP_KERNEL);
302 if (!tk->symbol)
303 goto error;
304 tk->rp.kp.symbol_name = tk->symbol;
305 tk->rp.kp.offset = offs;
306 } else
307 tk->rp.kp.addr = addr;
308
309 if (is_return)
310 tk->rp.handler = kretprobe_dispatcher;
311 else
312 tk->rp.kp.pre_handler = kprobe_dispatcher;
313
314 tk->rp.maxactive = maxactive;
315
316 if (!event || !is_good_name(event)) {
317 ret = -EINVAL;
318 goto error;
319 }
320
321 tk->tp.call.class = &tk->tp.class;
322 tk->tp.call.name = kstrdup(event, GFP_KERNEL);
323 if (!tk->tp.call.name)
324 goto error;
325
326 if (!group || !is_good_name(group)) {
327 ret = -EINVAL;
328 goto error;
329 }
330
331 tk->tp.class.system = kstrdup(group, GFP_KERNEL);
332 if (!tk->tp.class.system)
333 goto error;
334
335 INIT_LIST_HEAD(&tk->list);
336 INIT_LIST_HEAD(&tk->tp.files);
337 return tk;
338 error:
339 kfree(tk->tp.call.name);
340 kfree(tk->symbol);
341 free_percpu(tk->nhit);
342 kfree(tk);
343 return ERR_PTR(ret);
344 }
345
free_trace_kprobe(struct trace_kprobe * tk)346 static void free_trace_kprobe(struct trace_kprobe *tk)
347 {
348 int i;
349
350 for (i = 0; i < tk->tp.nr_args; i++)
351 traceprobe_free_probe_arg(&tk->tp.args[i]);
352
353 kfree(tk->tp.call.class->system);
354 kfree(tk->tp.call.name);
355 kfree(tk->symbol);
356 free_percpu(tk->nhit);
357 kfree(tk);
358 }
359
find_trace_kprobe(const char * event,const char * group)360 static struct trace_kprobe *find_trace_kprobe(const char *event,
361 const char *group)
362 {
363 struct trace_kprobe *tk;
364
365 list_for_each_entry(tk, &probe_list, list)
366 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
367 strcmp(tk->tp.call.class->system, group) == 0)
368 return tk;
369 return NULL;
370 }
371
372 /*
373 * Enable trace_probe
374 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
375 */
376 static int
enable_trace_kprobe(struct trace_kprobe * tk,struct trace_event_file * file)377 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
378 {
379 struct event_file_link *link = NULL;
380 int ret = 0;
381
382 if (file) {
383 link = kmalloc(sizeof(*link), GFP_KERNEL);
384 if (!link) {
385 ret = -ENOMEM;
386 goto out;
387 }
388
389 link->file = file;
390 list_add_tail_rcu(&link->list, &tk->tp.files);
391
392 tk->tp.flags |= TP_FLAG_TRACE;
393 } else
394 tk->tp.flags |= TP_FLAG_PROFILE;
395
396 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
397 if (trace_kprobe_is_return(tk))
398 ret = enable_kretprobe(&tk->rp);
399 else
400 ret = enable_kprobe(&tk->rp.kp);
401 }
402
403 if (ret) {
404 if (file) {
405 /* Notice the if is true on not WARN() */
406 if (!WARN_ON_ONCE(!link))
407 list_del_rcu(&link->list);
408 kfree(link);
409 tk->tp.flags &= ~TP_FLAG_TRACE;
410 } else {
411 tk->tp.flags &= ~TP_FLAG_PROFILE;
412 }
413 }
414 out:
415 return ret;
416 }
417
418 /*
419 * Disable trace_probe
420 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
421 */
422 static int
disable_trace_kprobe(struct trace_kprobe * tk,struct trace_event_file * file)423 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
424 {
425 struct event_file_link *link = NULL;
426 int wait = 0;
427 int ret = 0;
428
429 if (file) {
430 link = find_event_file_link(&tk->tp, file);
431 if (!link) {
432 ret = -EINVAL;
433 goto out;
434 }
435
436 list_del_rcu(&link->list);
437 wait = 1;
438 if (!list_empty(&tk->tp.files))
439 goto out;
440
441 tk->tp.flags &= ~TP_FLAG_TRACE;
442 } else
443 tk->tp.flags &= ~TP_FLAG_PROFILE;
444
445 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
446 if (trace_kprobe_is_return(tk))
447 disable_kretprobe(&tk->rp);
448 else
449 disable_kprobe(&tk->rp.kp);
450 wait = 1;
451 }
452 out:
453 if (wait) {
454 /*
455 * Synchronize with kprobe_trace_func/kretprobe_trace_func
456 * to ensure disabled (all running handlers are finished).
457 * This is not only for kfree(), but also the caller,
458 * trace_remove_event_call() supposes it for releasing
459 * event_call related objects, which will be accessed in
460 * the kprobe_trace_func/kretprobe_trace_func.
461 */
462 synchronize_sched();
463 kfree(link); /* Ignored if link == NULL */
464 }
465
466 return ret;
467 }
468
469 /* Internal register function - just handle k*probes and flags */
__register_trace_kprobe(struct trace_kprobe * tk)470 static int __register_trace_kprobe(struct trace_kprobe *tk)
471 {
472 int i, ret;
473
474 if (trace_probe_is_registered(&tk->tp))
475 return -EINVAL;
476
477 for (i = 0; i < tk->tp.nr_args; i++)
478 traceprobe_update_arg(&tk->tp.args[i]);
479
480 /* Set/clear disabled flag according to tp->flag */
481 if (trace_probe_is_enabled(&tk->tp))
482 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
483 else
484 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
485
486 if (trace_kprobe_is_return(tk))
487 ret = register_kretprobe(&tk->rp);
488 else
489 ret = register_kprobe(&tk->rp.kp);
490
491 if (ret == 0)
492 tk->tp.flags |= TP_FLAG_REGISTERED;
493 else {
494 pr_warn("Could not insert probe at %s+%lu: %d\n",
495 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
496 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
497 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
498 ret = 0;
499 } else if (ret == -EILSEQ) {
500 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
501 tk->rp.kp.addr);
502 ret = -EINVAL;
503 }
504 }
505
506 return ret;
507 }
508
509 /* Internal unregister function - just handle k*probes and flags */
__unregister_trace_kprobe(struct trace_kprobe * tk)510 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
511 {
512 if (trace_probe_is_registered(&tk->tp)) {
513 if (trace_kprobe_is_return(tk))
514 unregister_kretprobe(&tk->rp);
515 else
516 unregister_kprobe(&tk->rp.kp);
517 tk->tp.flags &= ~TP_FLAG_REGISTERED;
518 /* Cleanup kprobe for reuse */
519 if (tk->rp.kp.symbol_name)
520 tk->rp.kp.addr = NULL;
521 }
522 }
523
524 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
unregister_trace_kprobe(struct trace_kprobe * tk)525 static int unregister_trace_kprobe(struct trace_kprobe *tk)
526 {
527 /* Enabled event can not be unregistered */
528 if (trace_probe_is_enabled(&tk->tp))
529 return -EBUSY;
530
531 /* Will fail if probe is being used by ftrace or perf */
532 if (unregister_kprobe_event(tk))
533 return -EBUSY;
534
535 __unregister_trace_kprobe(tk);
536 list_del(&tk->list);
537
538 return 0;
539 }
540
541 /* Register a trace_probe and probe_event */
register_trace_kprobe(struct trace_kprobe * tk)542 static int register_trace_kprobe(struct trace_kprobe *tk)
543 {
544 struct trace_kprobe *old_tk;
545 int ret;
546
547 mutex_lock(&probe_lock);
548
549 /* Delete old (same name) event if exist */
550 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
551 tk->tp.call.class->system);
552 if (old_tk) {
553 ret = unregister_trace_kprobe(old_tk);
554 if (ret < 0)
555 goto end;
556 free_trace_kprobe(old_tk);
557 }
558
559 /* Register new event */
560 ret = register_kprobe_event(tk);
561 if (ret) {
562 pr_warn("Failed to register probe event(%d)\n", ret);
563 goto end;
564 }
565
566 /* Register k*probe */
567 ret = __register_trace_kprobe(tk);
568 if (ret < 0)
569 unregister_kprobe_event(tk);
570 else
571 list_add_tail(&tk->list, &probe_list);
572
573 end:
574 mutex_unlock(&probe_lock);
575 return ret;
576 }
577
578 /* Module notifier call back, checking event on the module */
trace_kprobe_module_callback(struct notifier_block * nb,unsigned long val,void * data)579 static int trace_kprobe_module_callback(struct notifier_block *nb,
580 unsigned long val, void *data)
581 {
582 struct module *mod = data;
583 struct trace_kprobe *tk;
584 int ret;
585
586 if (val != MODULE_STATE_COMING)
587 return NOTIFY_DONE;
588
589 /* Update probes on coming module */
590 mutex_lock(&probe_lock);
591 list_for_each_entry(tk, &probe_list, list) {
592 if (trace_kprobe_within_module(tk, mod)) {
593 /* Don't need to check busy - this should have gone. */
594 __unregister_trace_kprobe(tk);
595 ret = __register_trace_kprobe(tk);
596 if (ret)
597 pr_warn("Failed to re-register probe %s on %s: %d\n",
598 trace_event_name(&tk->tp.call),
599 mod->name, ret);
600 }
601 }
602 mutex_unlock(&probe_lock);
603
604 return NOTIFY_DONE;
605 }
606
607 static struct notifier_block trace_kprobe_module_nb = {
608 .notifier_call = trace_kprobe_module_callback,
609 .priority = 1 /* Invoked after kprobe module callback */
610 };
611
612 /* Convert certain expected symbols into '_' when generating event names */
sanitize_event_name(char * name)613 static inline void sanitize_event_name(char *name)
614 {
615 while (*name++ != '\0')
616 if (*name == ':' || *name == '.')
617 *name = '_';
618 }
619
create_trace_kprobe(int argc,char ** argv)620 static int create_trace_kprobe(int argc, char **argv)
621 {
622 /*
623 * Argument syntax:
624 * - Add kprobe:
625 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
626 * - Add kretprobe:
627 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
628 * Fetch args:
629 * $retval : fetch return value
630 * $stack : fetch stack address
631 * $stackN : fetch Nth of stack (N:0-)
632 * $comm : fetch current task comm
633 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
634 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
635 * %REG : fetch register REG
636 * Dereferencing memory fetch:
637 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
638 * Alias name of args:
639 * NAME=FETCHARG : set NAME as alias of FETCHARG.
640 * Type of args:
641 * FETCHARG:TYPE : use TYPE instead of unsigned long.
642 */
643 struct trace_kprobe *tk;
644 int i, ret = 0;
645 bool is_return = false, is_delete = false;
646 char *symbol = NULL, *event = NULL, *group = NULL;
647 int maxactive = 0;
648 char *arg;
649 long offset = 0;
650 void *addr = NULL;
651 char buf[MAX_EVENT_NAME_LEN];
652
653 /* argc must be >= 1 */
654 if (argv[0][0] == 'p')
655 is_return = false;
656 else if (argv[0][0] == 'r')
657 is_return = true;
658 else if (argv[0][0] == '-')
659 is_delete = true;
660 else {
661 pr_info("Probe definition must be started with 'p', 'r' or"
662 " '-'.\n");
663 return -EINVAL;
664 }
665
666 event = strchr(&argv[0][1], ':');
667 if (event) {
668 event[0] = '\0';
669 event++;
670 }
671 if (is_return && isdigit(argv[0][1])) {
672 ret = kstrtouint(&argv[0][1], 0, &maxactive);
673 if (ret) {
674 pr_info("Failed to parse maxactive.\n");
675 return ret;
676 }
677 /* kretprobes instances are iterated over via a list. The
678 * maximum should stay reasonable.
679 */
680 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
681 pr_info("Maxactive is too big (%d > %d).\n",
682 maxactive, KRETPROBE_MAXACTIVE_MAX);
683 return -E2BIG;
684 }
685 }
686
687 if (event) {
688 if (strchr(event, '/')) {
689 group = event;
690 event = strchr(group, '/') + 1;
691 event[-1] = '\0';
692 if (strlen(group) == 0) {
693 pr_info("Group name is not specified\n");
694 return -EINVAL;
695 }
696 }
697 if (strlen(event) == 0) {
698 pr_info("Event name is not specified\n");
699 return -EINVAL;
700 }
701 }
702 if (!group)
703 group = KPROBE_EVENT_SYSTEM;
704
705 if (is_delete) {
706 if (!event) {
707 pr_info("Delete command needs an event name.\n");
708 return -EINVAL;
709 }
710 mutex_lock(&probe_lock);
711 tk = find_trace_kprobe(event, group);
712 if (!tk) {
713 mutex_unlock(&probe_lock);
714 pr_info("Event %s/%s doesn't exist.\n", group, event);
715 return -ENOENT;
716 }
717 /* delete an event */
718 ret = unregister_trace_kprobe(tk);
719 if (ret == 0)
720 free_trace_kprobe(tk);
721 mutex_unlock(&probe_lock);
722 return ret;
723 }
724
725 if (argc < 2) {
726 pr_info("Probe point is not specified.\n");
727 return -EINVAL;
728 }
729
730 /* try to parse an address. if that fails, try to read the
731 * input as a symbol. */
732 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
733 /* a symbol specified */
734 symbol = argv[1];
735 /* TODO: support .init module functions */
736 ret = traceprobe_split_symbol_offset(symbol, &offset);
737 if (ret || offset < 0 || offset > UINT_MAX) {
738 pr_info("Failed to parse either an address or a symbol.\n");
739 return ret;
740 }
741 if (offset && is_return &&
742 !kprobe_on_func_entry(NULL, symbol, offset)) {
743 pr_info("Given offset is not valid for return probe.\n");
744 return -EINVAL;
745 }
746 }
747 argc -= 2; argv += 2;
748
749 /* setup a probe */
750 if (!event) {
751 /* Make a new event name */
752 if (symbol)
753 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
754 is_return ? 'r' : 'p', symbol, offset);
755 else
756 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
757 is_return ? 'r' : 'p', addr);
758 sanitize_event_name(buf);
759 event = buf;
760 }
761 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
762 argc, is_return);
763 if (IS_ERR(tk)) {
764 pr_info("Failed to allocate trace_probe.(%d)\n",
765 (int)PTR_ERR(tk));
766 return PTR_ERR(tk);
767 }
768
769 /* parse arguments */
770 ret = 0;
771 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
772 struct probe_arg *parg = &tk->tp.args[i];
773
774 /* Increment count for freeing args in error case */
775 tk->tp.nr_args++;
776
777 /* Parse argument name */
778 arg = strchr(argv[i], '=');
779 if (arg) {
780 *arg++ = '\0';
781 parg->name = kstrdup(argv[i], GFP_KERNEL);
782 } else {
783 arg = argv[i];
784 /* If argument name is omitted, set "argN" */
785 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
786 parg->name = kstrdup(buf, GFP_KERNEL);
787 }
788
789 if (!parg->name) {
790 pr_info("Failed to allocate argument[%d] name.\n", i);
791 ret = -ENOMEM;
792 goto error;
793 }
794
795 if (!is_good_name(parg->name)) {
796 pr_info("Invalid argument[%d] name: %s\n",
797 i, parg->name);
798 ret = -EINVAL;
799 goto error;
800 }
801
802 if (traceprobe_conflict_field_name(parg->name,
803 tk->tp.args, i)) {
804 pr_info("Argument[%d] name '%s' conflicts with "
805 "another field.\n", i, argv[i]);
806 ret = -EINVAL;
807 goto error;
808 }
809
810 /* Parse fetch argument */
811 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
812 is_return, true,
813 kprobes_fetch_type_table);
814 if (ret) {
815 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
816 goto error;
817 }
818 }
819
820 ret = register_trace_kprobe(tk);
821 if (ret)
822 goto error;
823 return 0;
824
825 error:
826 free_trace_kprobe(tk);
827 return ret;
828 }
829
release_all_trace_kprobes(void)830 static int release_all_trace_kprobes(void)
831 {
832 struct trace_kprobe *tk;
833 int ret = 0;
834
835 mutex_lock(&probe_lock);
836 /* Ensure no probe is in use. */
837 list_for_each_entry(tk, &probe_list, list)
838 if (trace_probe_is_enabled(&tk->tp)) {
839 ret = -EBUSY;
840 goto end;
841 }
842 /* TODO: Use batch unregistration */
843 while (!list_empty(&probe_list)) {
844 tk = list_entry(probe_list.next, struct trace_kprobe, list);
845 ret = unregister_trace_kprobe(tk);
846 if (ret)
847 goto end;
848 free_trace_kprobe(tk);
849 }
850
851 end:
852 mutex_unlock(&probe_lock);
853
854 return ret;
855 }
856
857 /* Probes listing interfaces */
probes_seq_start(struct seq_file * m,loff_t * pos)858 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
859 {
860 mutex_lock(&probe_lock);
861 return seq_list_start(&probe_list, *pos);
862 }
863
probes_seq_next(struct seq_file * m,void * v,loff_t * pos)864 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
865 {
866 return seq_list_next(v, &probe_list, pos);
867 }
868
probes_seq_stop(struct seq_file * m,void * v)869 static void probes_seq_stop(struct seq_file *m, void *v)
870 {
871 mutex_unlock(&probe_lock);
872 }
873
probes_seq_show(struct seq_file * m,void * v)874 static int probes_seq_show(struct seq_file *m, void *v)
875 {
876 struct trace_kprobe *tk = v;
877 int i;
878
879 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
880 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
881 trace_event_name(&tk->tp.call));
882
883 if (!tk->symbol)
884 seq_printf(m, " 0x%p", tk->rp.kp.addr);
885 else if (tk->rp.kp.offset)
886 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
887 tk->rp.kp.offset);
888 else
889 seq_printf(m, " %s", trace_kprobe_symbol(tk));
890
891 for (i = 0; i < tk->tp.nr_args; i++)
892 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
893 seq_putc(m, '\n');
894
895 return 0;
896 }
897
898 static const struct seq_operations probes_seq_op = {
899 .start = probes_seq_start,
900 .next = probes_seq_next,
901 .stop = probes_seq_stop,
902 .show = probes_seq_show
903 };
904
probes_open(struct inode * inode,struct file * file)905 static int probes_open(struct inode *inode, struct file *file)
906 {
907 int ret;
908
909 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
910 ret = release_all_trace_kprobes();
911 if (ret < 0)
912 return ret;
913 }
914
915 return seq_open(file, &probes_seq_op);
916 }
917
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)918 static ssize_t probes_write(struct file *file, const char __user *buffer,
919 size_t count, loff_t *ppos)
920 {
921 return traceprobe_probes_write(file, buffer, count, ppos,
922 create_trace_kprobe);
923 }
924
925 static const struct file_operations kprobe_events_ops = {
926 .owner = THIS_MODULE,
927 .open = probes_open,
928 .read = seq_read,
929 .llseek = seq_lseek,
930 .release = seq_release,
931 .write = probes_write,
932 };
933
934 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)935 static int probes_profile_seq_show(struct seq_file *m, void *v)
936 {
937 struct trace_kprobe *tk = v;
938
939 seq_printf(m, " %-44s %15lu %15lu\n",
940 trace_event_name(&tk->tp.call),
941 trace_kprobe_nhit(tk),
942 tk->rp.kp.nmissed);
943
944 return 0;
945 }
946
947 static const struct seq_operations profile_seq_op = {
948 .start = probes_seq_start,
949 .next = probes_seq_next,
950 .stop = probes_seq_stop,
951 .show = probes_profile_seq_show
952 };
953
profile_open(struct inode * inode,struct file * file)954 static int profile_open(struct inode *inode, struct file *file)
955 {
956 return seq_open(file, &profile_seq_op);
957 }
958
959 static const struct file_operations kprobe_profile_ops = {
960 .owner = THIS_MODULE,
961 .open = profile_open,
962 .read = seq_read,
963 .llseek = seq_lseek,
964 .release = seq_release,
965 };
966
967 /* Kprobe handler */
968 static nokprobe_inline void
__kprobe_trace_func(struct trace_kprobe * tk,struct pt_regs * regs,struct trace_event_file * trace_file)969 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
970 struct trace_event_file *trace_file)
971 {
972 struct kprobe_trace_entry_head *entry;
973 struct ring_buffer_event *event;
974 struct ring_buffer *buffer;
975 int size, dsize, pc;
976 unsigned long irq_flags;
977 struct trace_event_call *call = &tk->tp.call;
978
979 WARN_ON(call != trace_file->event_call);
980
981 if (trace_trigger_soft_disabled(trace_file))
982 return;
983
984 local_save_flags(irq_flags);
985 pc = preempt_count();
986
987 dsize = __get_data_size(&tk->tp, regs);
988 size = sizeof(*entry) + tk->tp.size + dsize;
989
990 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
991 call->event.type,
992 size, irq_flags, pc);
993 if (!event)
994 return;
995
996 entry = ring_buffer_event_data(event);
997 entry->ip = (unsigned long)tk->rp.kp.addr;
998 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
999
1000 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1001 entry, irq_flags, pc, regs);
1002 }
1003
1004 static void
kprobe_trace_func(struct trace_kprobe * tk,struct pt_regs * regs)1005 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1006 {
1007 struct event_file_link *link;
1008
1009 list_for_each_entry_rcu(link, &tk->tp.files, list)
1010 __kprobe_trace_func(tk, regs, link->file);
1011 }
1012 NOKPROBE_SYMBOL(kprobe_trace_func);
1013
1014 /* Kretprobe handler */
1015 static nokprobe_inline void
__kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs,struct trace_event_file * trace_file)1016 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1017 struct pt_regs *regs,
1018 struct trace_event_file *trace_file)
1019 {
1020 struct kretprobe_trace_entry_head *entry;
1021 struct ring_buffer_event *event;
1022 struct ring_buffer *buffer;
1023 int size, pc, dsize;
1024 unsigned long irq_flags;
1025 struct trace_event_call *call = &tk->tp.call;
1026
1027 WARN_ON(call != trace_file->event_call);
1028
1029 if (trace_trigger_soft_disabled(trace_file))
1030 return;
1031
1032 local_save_flags(irq_flags);
1033 pc = preempt_count();
1034
1035 dsize = __get_data_size(&tk->tp, regs);
1036 size = sizeof(*entry) + tk->tp.size + dsize;
1037
1038 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1039 call->event.type,
1040 size, irq_flags, pc);
1041 if (!event)
1042 return;
1043
1044 entry = ring_buffer_event_data(event);
1045 entry->func = (unsigned long)tk->rp.kp.addr;
1046 entry->ret_ip = (unsigned long)ri->ret_addr;
1047 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1048
1049 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1050 entry, irq_flags, pc, regs);
1051 }
1052
1053 static void
kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1054 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1055 struct pt_regs *regs)
1056 {
1057 struct event_file_link *link;
1058
1059 list_for_each_entry_rcu(link, &tk->tp.files, list)
1060 __kretprobe_trace_func(tk, ri, regs, link->file);
1061 }
1062 NOKPROBE_SYMBOL(kretprobe_trace_func);
1063
1064 /* Event entry printers */
1065 static enum print_line_t
print_kprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1066 print_kprobe_event(struct trace_iterator *iter, int flags,
1067 struct trace_event *event)
1068 {
1069 struct kprobe_trace_entry_head *field;
1070 struct trace_seq *s = &iter->seq;
1071 struct trace_probe *tp;
1072 u8 *data;
1073 int i;
1074
1075 field = (struct kprobe_trace_entry_head *)iter->ent;
1076 tp = container_of(event, struct trace_probe, call.event);
1077
1078 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1079
1080 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1081 goto out;
1082
1083 trace_seq_putc(s, ')');
1084
1085 data = (u8 *)&field[1];
1086 for (i = 0; i < tp->nr_args; i++)
1087 if (!tp->args[i].type->print(s, tp->args[i].name,
1088 data + tp->args[i].offset, field))
1089 goto out;
1090
1091 trace_seq_putc(s, '\n');
1092 out:
1093 return trace_handle_return(s);
1094 }
1095
1096 static enum print_line_t
print_kretprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1097 print_kretprobe_event(struct trace_iterator *iter, int flags,
1098 struct trace_event *event)
1099 {
1100 struct kretprobe_trace_entry_head *field;
1101 struct trace_seq *s = &iter->seq;
1102 struct trace_probe *tp;
1103 u8 *data;
1104 int i;
1105
1106 field = (struct kretprobe_trace_entry_head *)iter->ent;
1107 tp = container_of(event, struct trace_probe, call.event);
1108
1109 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1110
1111 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1112 goto out;
1113
1114 trace_seq_puts(s, " <- ");
1115
1116 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1117 goto out;
1118
1119 trace_seq_putc(s, ')');
1120
1121 data = (u8 *)&field[1];
1122 for (i = 0; i < tp->nr_args; i++)
1123 if (!tp->args[i].type->print(s, tp->args[i].name,
1124 data + tp->args[i].offset, field))
1125 goto out;
1126
1127 trace_seq_putc(s, '\n');
1128
1129 out:
1130 return trace_handle_return(s);
1131 }
1132
1133
kprobe_event_define_fields(struct trace_event_call * event_call)1134 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1135 {
1136 int ret, i;
1137 struct kprobe_trace_entry_head field;
1138 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1139
1140 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1141 /* Set argument names as fields */
1142 for (i = 0; i < tk->tp.nr_args; i++) {
1143 struct probe_arg *parg = &tk->tp.args[i];
1144
1145 ret = trace_define_field(event_call, parg->type->fmttype,
1146 parg->name,
1147 sizeof(field) + parg->offset,
1148 parg->type->size,
1149 parg->type->is_signed,
1150 FILTER_OTHER);
1151 if (ret)
1152 return ret;
1153 }
1154 return 0;
1155 }
1156
kretprobe_event_define_fields(struct trace_event_call * event_call)1157 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1158 {
1159 int ret, i;
1160 struct kretprobe_trace_entry_head field;
1161 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1162
1163 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1164 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1165 /* Set argument names as fields */
1166 for (i = 0; i < tk->tp.nr_args; i++) {
1167 struct probe_arg *parg = &tk->tp.args[i];
1168
1169 ret = trace_define_field(event_call, parg->type->fmttype,
1170 parg->name,
1171 sizeof(field) + parg->offset,
1172 parg->type->size,
1173 parg->type->is_signed,
1174 FILTER_OTHER);
1175 if (ret)
1176 return ret;
1177 }
1178 return 0;
1179 }
1180
1181 #ifdef CONFIG_PERF_EVENTS
1182
1183 /* Kprobe profile handler */
1184 static void
kprobe_perf_func(struct trace_kprobe * tk,struct pt_regs * regs)1185 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1186 {
1187 struct trace_event_call *call = &tk->tp.call;
1188 struct bpf_prog *prog = call->prog;
1189 struct kprobe_trace_entry_head *entry;
1190 struct hlist_head *head;
1191 int size, __size, dsize;
1192 int rctx;
1193
1194 if (prog && !trace_call_bpf(prog, regs))
1195 return;
1196
1197 head = this_cpu_ptr(call->perf_events);
1198 if (hlist_empty(head))
1199 return;
1200
1201 dsize = __get_data_size(&tk->tp, regs);
1202 __size = sizeof(*entry) + tk->tp.size + dsize;
1203 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1204 size -= sizeof(u32);
1205
1206 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1207 if (!entry)
1208 return;
1209
1210 entry->ip = (unsigned long)tk->rp.kp.addr;
1211 memset(&entry[1], 0, dsize);
1212 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1213 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1214 head, NULL, NULL);
1215 }
1216 NOKPROBE_SYMBOL(kprobe_perf_func);
1217
1218 /* Kretprobe profile handler */
1219 static void
kretprobe_perf_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1220 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1221 struct pt_regs *regs)
1222 {
1223 struct trace_event_call *call = &tk->tp.call;
1224 struct bpf_prog *prog = call->prog;
1225 struct kretprobe_trace_entry_head *entry;
1226 struct hlist_head *head;
1227 int size, __size, dsize;
1228 int rctx;
1229
1230 if (prog && !trace_call_bpf(prog, regs))
1231 return;
1232
1233 head = this_cpu_ptr(call->perf_events);
1234 if (hlist_empty(head))
1235 return;
1236
1237 dsize = __get_data_size(&tk->tp, regs);
1238 __size = sizeof(*entry) + tk->tp.size + dsize;
1239 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1240 size -= sizeof(u32);
1241
1242 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1243 if (!entry)
1244 return;
1245
1246 entry->func = (unsigned long)tk->rp.kp.addr;
1247 entry->ret_ip = (unsigned long)ri->ret_addr;
1248 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1249 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1250 head, NULL, NULL);
1251 }
1252 NOKPROBE_SYMBOL(kretprobe_perf_func);
1253 #endif /* CONFIG_PERF_EVENTS */
1254
1255 /*
1256 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1257 *
1258 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1259 * lockless, but we can't race with this __init function.
1260 */
kprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1261 static int kprobe_register(struct trace_event_call *event,
1262 enum trace_reg type, void *data)
1263 {
1264 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1265 struct trace_event_file *file = data;
1266
1267 switch (type) {
1268 case TRACE_REG_REGISTER:
1269 return enable_trace_kprobe(tk, file);
1270 case TRACE_REG_UNREGISTER:
1271 return disable_trace_kprobe(tk, file);
1272
1273 #ifdef CONFIG_PERF_EVENTS
1274 case TRACE_REG_PERF_REGISTER:
1275 return enable_trace_kprobe(tk, NULL);
1276 case TRACE_REG_PERF_UNREGISTER:
1277 return disable_trace_kprobe(tk, NULL);
1278 case TRACE_REG_PERF_OPEN:
1279 case TRACE_REG_PERF_CLOSE:
1280 case TRACE_REG_PERF_ADD:
1281 case TRACE_REG_PERF_DEL:
1282 return 0;
1283 #endif
1284 }
1285 return 0;
1286 }
1287
kprobe_dispatcher(struct kprobe * kp,struct pt_regs * regs)1288 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1289 {
1290 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1291
1292 raw_cpu_inc(*tk->nhit);
1293
1294 if (tk->tp.flags & TP_FLAG_TRACE)
1295 kprobe_trace_func(tk, regs);
1296 #ifdef CONFIG_PERF_EVENTS
1297 if (tk->tp.flags & TP_FLAG_PROFILE)
1298 kprobe_perf_func(tk, regs);
1299 #endif
1300 return 0; /* We don't tweek kernel, so just return 0 */
1301 }
1302 NOKPROBE_SYMBOL(kprobe_dispatcher);
1303
1304 static int
kretprobe_dispatcher(struct kretprobe_instance * ri,struct pt_regs * regs)1305 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1306 {
1307 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1308
1309 raw_cpu_inc(*tk->nhit);
1310
1311 if (tk->tp.flags & TP_FLAG_TRACE)
1312 kretprobe_trace_func(tk, ri, regs);
1313 #ifdef CONFIG_PERF_EVENTS
1314 if (tk->tp.flags & TP_FLAG_PROFILE)
1315 kretprobe_perf_func(tk, ri, regs);
1316 #endif
1317 return 0; /* We don't tweek kernel, so just return 0 */
1318 }
1319 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1320
1321 static struct trace_event_functions kretprobe_funcs = {
1322 .trace = print_kretprobe_event
1323 };
1324
1325 static struct trace_event_functions kprobe_funcs = {
1326 .trace = print_kprobe_event
1327 };
1328
register_kprobe_event(struct trace_kprobe * tk)1329 static int register_kprobe_event(struct trace_kprobe *tk)
1330 {
1331 struct trace_event_call *call = &tk->tp.call;
1332 int ret;
1333
1334 /* Initialize trace_event_call */
1335 INIT_LIST_HEAD(&call->class->fields);
1336 if (trace_kprobe_is_return(tk)) {
1337 call->event.funcs = &kretprobe_funcs;
1338 call->class->define_fields = kretprobe_event_define_fields;
1339 } else {
1340 call->event.funcs = &kprobe_funcs;
1341 call->class->define_fields = kprobe_event_define_fields;
1342 }
1343 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1344 return -ENOMEM;
1345 ret = register_trace_event(&call->event);
1346 if (!ret) {
1347 kfree(call->print_fmt);
1348 return -ENODEV;
1349 }
1350 call->flags = TRACE_EVENT_FL_KPROBE;
1351 call->class->reg = kprobe_register;
1352 call->data = tk;
1353 ret = trace_add_event_call(call);
1354 if (ret) {
1355 pr_info("Failed to register kprobe event: %s\n",
1356 trace_event_name(call));
1357 kfree(call->print_fmt);
1358 unregister_trace_event(&call->event);
1359 }
1360 return ret;
1361 }
1362
unregister_kprobe_event(struct trace_kprobe * tk)1363 static int unregister_kprobe_event(struct trace_kprobe *tk)
1364 {
1365 int ret;
1366
1367 /* tp->event is unregistered in trace_remove_event_call() */
1368 ret = trace_remove_event_call(&tk->tp.call);
1369 if (!ret)
1370 kfree(tk->tp.call.print_fmt);
1371 return ret;
1372 }
1373
1374 /* Make a tracefs interface for controlling probe points */
init_kprobe_trace(void)1375 static __init int init_kprobe_trace(void)
1376 {
1377 struct dentry *d_tracer;
1378 struct dentry *entry;
1379
1380 if (register_module_notifier(&trace_kprobe_module_nb))
1381 return -EINVAL;
1382
1383 d_tracer = tracing_init_dentry();
1384 if (IS_ERR(d_tracer))
1385 return 0;
1386
1387 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1388 NULL, &kprobe_events_ops);
1389
1390 /* Event list interface */
1391 if (!entry)
1392 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1393
1394 /* Profile interface */
1395 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1396 NULL, &kprobe_profile_ops);
1397
1398 if (!entry)
1399 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1400 return 0;
1401 }
1402 fs_initcall(init_kprobe_trace);
1403
1404
1405 #ifdef CONFIG_FTRACE_STARTUP_TEST
1406 /*
1407 * The "__used" keeps gcc from removing the function symbol
1408 * from the kallsyms table. 'noinline' makes sure that there
1409 * isn't an inlined version used by the test method below
1410 */
1411 static __used __init noinline int
kprobe_trace_selftest_target(int a1,int a2,int a3,int a4,int a5,int a6)1412 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1413 {
1414 return a1 + a2 + a3 + a4 + a5 + a6;
1415 }
1416
1417 static __init struct trace_event_file *
find_trace_probe_file(struct trace_kprobe * tk,struct trace_array * tr)1418 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1419 {
1420 struct trace_event_file *file;
1421
1422 list_for_each_entry(file, &tr->events, list)
1423 if (file->event_call == &tk->tp.call)
1424 return file;
1425
1426 return NULL;
1427 }
1428
1429 /*
1430 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1431 * stage, we can do this lockless.
1432 */
kprobe_trace_self_tests_init(void)1433 static __init int kprobe_trace_self_tests_init(void)
1434 {
1435 int ret, warn = 0;
1436 int (*target)(int, int, int, int, int, int);
1437 struct trace_kprobe *tk;
1438 struct trace_event_file *file;
1439
1440 if (tracing_is_disabled())
1441 return -ENODEV;
1442
1443 target = kprobe_trace_selftest_target;
1444
1445 pr_info("Testing kprobe tracing: ");
1446
1447 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1448 "$stack $stack0 +0($stack)",
1449 create_trace_kprobe);
1450 if (WARN_ON_ONCE(ret)) {
1451 pr_warn("error on probing function entry.\n");
1452 warn++;
1453 } else {
1454 /* Enable trace point */
1455 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1456 if (WARN_ON_ONCE(tk == NULL)) {
1457 pr_warn("error on getting new probe.\n");
1458 warn++;
1459 } else {
1460 file = find_trace_probe_file(tk, top_trace_array());
1461 if (WARN_ON_ONCE(file == NULL)) {
1462 pr_warn("error on getting probe file.\n");
1463 warn++;
1464 } else
1465 enable_trace_kprobe(tk, file);
1466 }
1467 }
1468
1469 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1470 "$retval", create_trace_kprobe);
1471 if (WARN_ON_ONCE(ret)) {
1472 pr_warn("error on probing function return.\n");
1473 warn++;
1474 } else {
1475 /* Enable trace point */
1476 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1477 if (WARN_ON_ONCE(tk == NULL)) {
1478 pr_warn("error on getting 2nd new probe.\n");
1479 warn++;
1480 } else {
1481 file = find_trace_probe_file(tk, top_trace_array());
1482 if (WARN_ON_ONCE(file == NULL)) {
1483 pr_warn("error on getting probe file.\n");
1484 warn++;
1485 } else
1486 enable_trace_kprobe(tk, file);
1487 }
1488 }
1489
1490 if (warn)
1491 goto end;
1492
1493 ret = target(1, 2, 3, 4, 5, 6);
1494
1495 /*
1496 * Not expecting an error here, the check is only to prevent the
1497 * optimizer from removing the call to target() as otherwise there
1498 * are no side-effects and the call is never performed.
1499 */
1500 if (ret != 21)
1501 warn++;
1502
1503 /* Disable trace points before removing it */
1504 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1505 if (WARN_ON_ONCE(tk == NULL)) {
1506 pr_warn("error on getting test probe.\n");
1507 warn++;
1508 } else {
1509 if (trace_kprobe_nhit(tk) != 1) {
1510 pr_warn("incorrect number of testprobe hits\n");
1511 warn++;
1512 }
1513
1514 file = find_trace_probe_file(tk, top_trace_array());
1515 if (WARN_ON_ONCE(file == NULL)) {
1516 pr_warn("error on getting probe file.\n");
1517 warn++;
1518 } else
1519 disable_trace_kprobe(tk, file);
1520 }
1521
1522 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1523 if (WARN_ON_ONCE(tk == NULL)) {
1524 pr_warn("error on getting 2nd test probe.\n");
1525 warn++;
1526 } else {
1527 if (trace_kprobe_nhit(tk) != 1) {
1528 pr_warn("incorrect number of testprobe2 hits\n");
1529 warn++;
1530 }
1531
1532 file = find_trace_probe_file(tk, top_trace_array());
1533 if (WARN_ON_ONCE(file == NULL)) {
1534 pr_warn("error on getting probe file.\n");
1535 warn++;
1536 } else
1537 disable_trace_kprobe(tk, file);
1538 }
1539
1540 ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1541 if (WARN_ON_ONCE(ret)) {
1542 pr_warn("error on deleting a probe.\n");
1543 warn++;
1544 }
1545
1546 ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1547 if (WARN_ON_ONCE(ret)) {
1548 pr_warn("error on deleting a probe.\n");
1549 warn++;
1550 }
1551
1552 end:
1553 release_all_trace_kprobes();
1554 /*
1555 * Wait for the optimizer work to finish. Otherwise it might fiddle
1556 * with probes in already freed __init text.
1557 */
1558 wait_for_kprobe_optimizer();
1559 if (warn)
1560 pr_cont("NG: Some tests are failed. Please check them.\n");
1561 else
1562 pr_cont("OK\n");
1563 return 0;
1564 }
1565
1566 late_initcall(kprobe_trace_self_tests_init);
1567
1568 #endif
1569