1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
9
10 #include <linux/security.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/uprobes.h>
15 #include <linux/namei.h>
16 #include <linux/string.h>
17 #include <linux/rculist.h>
18
19 #include "trace_dynevent.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22
23 #define UPROBE_EVENT_SYSTEM "uprobes"
24
25 struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
28 };
29
30 #define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
33
34 #define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36
37 struct trace_uprobe_filter {
38 rwlock_t rwlock;
39 int nr_systemwide;
40 struct list_head perf_events;
41 };
42
43 static int trace_uprobe_create(int argc, const char **argv);
44 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
45 static int trace_uprobe_release(struct dyn_event *ev);
46 static bool trace_uprobe_is_busy(struct dyn_event *ev);
47 static bool trace_uprobe_match(const char *system, const char *event,
48 int argc, const char **argv, struct dyn_event *ev);
49
50 static struct dyn_event_operations trace_uprobe_ops = {
51 .create = trace_uprobe_create,
52 .show = trace_uprobe_show,
53 .is_busy = trace_uprobe_is_busy,
54 .free = trace_uprobe_release,
55 .match = trace_uprobe_match,
56 };
57
58 /*
59 * uprobe event core functions
60 */
61 struct trace_uprobe {
62 struct dyn_event devent;
63 struct uprobe_consumer consumer;
64 struct path path;
65 struct inode *inode;
66 char *filename;
67 unsigned long offset;
68 unsigned long ref_ctr_offset;
69 unsigned long nhit;
70 struct trace_probe tp;
71 };
72
is_trace_uprobe(struct dyn_event * ev)73 static bool is_trace_uprobe(struct dyn_event *ev)
74 {
75 return ev->ops == &trace_uprobe_ops;
76 }
77
to_trace_uprobe(struct dyn_event * ev)78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
79 {
80 return container_of(ev, struct trace_uprobe, devent);
81 }
82
83 /**
84 * for_each_trace_uprobe - iterate over the trace_uprobe list
85 * @pos: the struct trace_uprobe * for each entry
86 * @dpos: the struct dyn_event * to use as a loop cursor
87 */
88 #define for_each_trace_uprobe(pos, dpos) \
89 for_each_dyn_event(dpos) \
90 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
91
92 #define SIZEOF_TRACE_UPROBE(n) \
93 (offsetof(struct trace_uprobe, tp.args) + \
94 (sizeof(struct probe_arg) * (n)))
95
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
98
99 struct uprobe_dispatch_data {
100 struct trace_uprobe *tu;
101 unsigned long bp_addr;
102 };
103
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106 unsigned long func, struct pt_regs *regs);
107
108 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110 {
111 return addr - (n * sizeof(long));
112 }
113 #else
adjust_stack_addr(unsigned long addr,unsigned int n)114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
115 {
116 return addr + (n * sizeof(long));
117 }
118 #endif
119
get_user_stack_nth(struct pt_regs * regs,unsigned int n)120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
121 {
122 unsigned long ret;
123 unsigned long addr = user_stack_pointer(regs);
124
125 addr = adjust_stack_addr(addr, n);
126
127 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128 return 0;
129
130 return ret;
131 }
132
133 /*
134 * Uprobes-specific fetch functions
135 */
136 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)137 probe_mem_read(void *dest, void *src, size_t size)
138 {
139 void __user *vaddr = (void __force __user *)src;
140
141 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
142 }
143
144 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)145 probe_mem_read_user(void *dest, void *src, size_t size)
146 {
147 return probe_mem_read(dest, src, size);
148 }
149
150 /*
151 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
152 * length and relative data location.
153 */
154 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)155 fetch_store_string(unsigned long addr, void *dest, void *base)
156 {
157 long ret;
158 u32 loc = *(u32 *)dest;
159 int maxlen = get_loc_len(loc);
160 u8 *dst = get_loc_data(dest, base);
161 void __user *src = (void __force __user *) addr;
162
163 if (unlikely(!maxlen))
164 return -ENOMEM;
165
166 if (addr == FETCH_TOKEN_COMM)
167 ret = strlcpy(dst, current->comm, maxlen);
168 else
169 ret = strncpy_from_user(dst, src, maxlen);
170 if (ret >= 0) {
171 if (ret == maxlen)
172 dst[ret - 1] = '\0';
173 else
174 /*
175 * Include the terminating null byte. In this case it
176 * was copied by strncpy_from_user but not accounted
177 * for in ret.
178 */
179 ret++;
180 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
181 }
182
183 return ret;
184 }
185
186 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)187 fetch_store_string_user(unsigned long addr, void *dest, void *base)
188 {
189 return fetch_store_string(addr, dest, base);
190 }
191
192 /* Return the length of string -- including null terminal byte */
193 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)194 fetch_store_strlen(unsigned long addr)
195 {
196 int len;
197 void __user *vaddr = (void __force __user *) addr;
198
199 if (addr == FETCH_TOKEN_COMM)
200 len = strlen(current->comm) + 1;
201 else
202 len = strnlen_user(vaddr, MAX_STRING_SIZE);
203
204 return (len > MAX_STRING_SIZE) ? 0 : len;
205 }
206
207 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)208 fetch_store_strlen_user(unsigned long addr)
209 {
210 return fetch_store_strlen(addr);
211 }
212
translate_user_vaddr(unsigned long file_offset)213 static unsigned long translate_user_vaddr(unsigned long file_offset)
214 {
215 unsigned long base_addr;
216 struct uprobe_dispatch_data *udd;
217
218 udd = (void *) current->utask->vaddr;
219
220 base_addr = udd->bp_addr - udd->tu->offset;
221 return base_addr + file_offset;
222 }
223
224 /* Note that we don't verify it, since the code does not come from user space */
225 static int
process_fetch_insn(struct fetch_insn * code,struct pt_regs * regs,void * dest,void * base)226 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
227 void *base)
228 {
229 unsigned long val;
230
231 /* 1st stage: get value from context */
232 switch (code->op) {
233 case FETCH_OP_REG:
234 val = regs_get_register(regs, code->param);
235 break;
236 case FETCH_OP_STACK:
237 val = get_user_stack_nth(regs, code->param);
238 break;
239 case FETCH_OP_STACKP:
240 val = user_stack_pointer(regs);
241 break;
242 case FETCH_OP_RETVAL:
243 val = regs_return_value(regs);
244 break;
245 case FETCH_OP_IMM:
246 val = code->immediate;
247 break;
248 case FETCH_OP_COMM:
249 val = FETCH_TOKEN_COMM;
250 break;
251 case FETCH_OP_DATA:
252 val = (unsigned long)code->data;
253 break;
254 case FETCH_OP_FOFFS:
255 val = translate_user_vaddr(code->immediate);
256 break;
257 default:
258 return -EILSEQ;
259 }
260 code++;
261
262 return process_fetch_insn_bottom(code, val, dest, base);
263 }
NOKPROBE_SYMBOL(process_fetch_insn)264 NOKPROBE_SYMBOL(process_fetch_insn)
265
266 static struct trace_uprobe_filter *
267 trace_uprobe_get_filter(struct trace_uprobe *tu)
268 {
269 struct trace_probe_event *event = tu->tp.event;
270
271 return (struct trace_uprobe_filter *)&event->data[0];
272 }
273
init_trace_uprobe_filter(struct trace_uprobe_filter * filter)274 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
275 {
276 rwlock_init(&filter->rwlock);
277 filter->nr_systemwide = 0;
278 INIT_LIST_HEAD(&filter->perf_events);
279 }
280
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)281 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
282 {
283 return !filter->nr_systemwide && list_empty(&filter->perf_events);
284 }
285
is_ret_probe(struct trace_uprobe * tu)286 static inline bool is_ret_probe(struct trace_uprobe *tu)
287 {
288 return tu->consumer.ret_handler != NULL;
289 }
290
trace_uprobe_is_busy(struct dyn_event * ev)291 static bool trace_uprobe_is_busy(struct dyn_event *ev)
292 {
293 struct trace_uprobe *tu = to_trace_uprobe(ev);
294
295 return trace_probe_is_enabled(&tu->tp);
296 }
297
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)298 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
299 int argc, const char **argv)
300 {
301 char buf[MAX_ARGSTR_LEN + 1];
302 int len;
303
304 if (!argc)
305 return true;
306
307 len = strlen(tu->filename);
308 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
309 return false;
310
311 if (tu->ref_ctr_offset == 0)
312 snprintf(buf, sizeof(buf), "0x%0*lx",
313 (int)(sizeof(void *) * 2), tu->offset);
314 else
315 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
316 (int)(sizeof(void *) * 2), tu->offset,
317 tu->ref_ctr_offset);
318 if (strcmp(buf, &argv[0][len + 1]))
319 return false;
320
321 argc--; argv++;
322
323 return trace_probe_match_command_args(&tu->tp, argc, argv);
324 }
325
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)326 static bool trace_uprobe_match(const char *system, const char *event,
327 int argc, const char **argv, struct dyn_event *ev)
328 {
329 struct trace_uprobe *tu = to_trace_uprobe(ev);
330
331 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
332 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
333 trace_uprobe_match_command_head(tu, argc, argv);
334 }
335
336 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)337 trace_uprobe_primary_from_call(struct trace_event_call *call)
338 {
339 struct trace_probe *tp;
340
341 tp = trace_probe_primary_from_call(call);
342 if (WARN_ON_ONCE(!tp))
343 return NULL;
344
345 return container_of(tp, struct trace_uprobe, tp);
346 }
347
348 /*
349 * Allocate new trace_uprobe and initialize it (including uprobes).
350 */
351 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)352 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
353 {
354 struct trace_uprobe *tu;
355 int ret;
356
357 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
358 if (!tu)
359 return ERR_PTR(-ENOMEM);
360
361 ret = trace_probe_init(&tu->tp, event, group,
362 sizeof(struct trace_uprobe_filter));
363 if (ret < 0)
364 goto error;
365
366 dyn_event_init(&tu->devent, &trace_uprobe_ops);
367 tu->consumer.handler = uprobe_dispatcher;
368 if (is_ret)
369 tu->consumer.ret_handler = uretprobe_dispatcher;
370 init_trace_uprobe_filter(trace_uprobe_get_filter(tu));
371 return tu;
372
373 error:
374 kfree(tu);
375
376 return ERR_PTR(ret);
377 }
378
free_trace_uprobe(struct trace_uprobe * tu)379 static void free_trace_uprobe(struct trace_uprobe *tu)
380 {
381 if (!tu)
382 return;
383
384 path_put(&tu->path);
385 trace_probe_cleanup(&tu->tp);
386 kfree(tu->filename);
387 kfree(tu);
388 }
389
find_probe_event(const char * event,const char * group)390 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
391 {
392 struct dyn_event *pos;
393 struct trace_uprobe *tu;
394
395 for_each_trace_uprobe(tu, pos)
396 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
397 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
398 return tu;
399
400 return NULL;
401 }
402
403 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)404 static int unregister_trace_uprobe(struct trace_uprobe *tu)
405 {
406 int ret;
407
408 if (trace_probe_has_sibling(&tu->tp))
409 goto unreg;
410
411 ret = unregister_uprobe_event(tu);
412 if (ret)
413 return ret;
414
415 unreg:
416 dyn_event_remove(&tu->devent);
417 trace_probe_unlink(&tu->tp);
418 free_trace_uprobe(tu);
419 return 0;
420 }
421
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)422 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
423 struct trace_uprobe *comp)
424 {
425 struct trace_probe_event *tpe = orig->tp.event;
426 struct trace_probe *pos;
427 struct inode *comp_inode = d_real_inode(comp->path.dentry);
428 int i;
429
430 list_for_each_entry(pos, &tpe->probes, list) {
431 orig = container_of(pos, struct trace_uprobe, tp);
432 if (comp_inode != d_real_inode(orig->path.dentry) ||
433 comp->offset != orig->offset)
434 continue;
435
436 /*
437 * trace_probe_compare_arg_type() ensured that nr_args and
438 * each argument name and type are same. Let's compare comm.
439 */
440 for (i = 0; i < orig->tp.nr_args; i++) {
441 if (strcmp(orig->tp.args[i].comm,
442 comp->tp.args[i].comm))
443 break;
444 }
445
446 if (i == orig->tp.nr_args)
447 return true;
448 }
449
450 return false;
451 }
452
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)453 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
454 {
455 int ret;
456
457 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
458 if (ret) {
459 /* Note that argument starts index = 2 */
460 trace_probe_log_set_index(ret + 1);
461 trace_probe_log_err(0, DIFF_ARG_TYPE);
462 return -EEXIST;
463 }
464 if (trace_uprobe_has_same_uprobe(to, tu)) {
465 trace_probe_log_set_index(0);
466 trace_probe_log_err(0, SAME_PROBE);
467 return -EEXIST;
468 }
469
470 /* Append to existing event */
471 ret = trace_probe_append(&tu->tp, &to->tp);
472 if (!ret)
473 dyn_event_add(&tu->devent);
474
475 return ret;
476 }
477
478 /*
479 * Uprobe with multiple reference counter is not allowed. i.e.
480 * If inode and offset matches, reference counter offset *must*
481 * match as well. Though, there is one exception: If user is
482 * replacing old trace_uprobe with new one(same group/event),
483 * then we allow same uprobe with new reference counter as far
484 * as the new one does not conflict with any other existing
485 * ones.
486 */
validate_ref_ctr_offset(struct trace_uprobe * new)487 static int validate_ref_ctr_offset(struct trace_uprobe *new)
488 {
489 struct dyn_event *pos;
490 struct trace_uprobe *tmp;
491 struct inode *new_inode = d_real_inode(new->path.dentry);
492
493 for_each_trace_uprobe(tmp, pos) {
494 if (new_inode == d_real_inode(tmp->path.dentry) &&
495 new->offset == tmp->offset &&
496 new->ref_ctr_offset != tmp->ref_ctr_offset) {
497 pr_warn("Reference counter offset mismatch.");
498 return -EINVAL;
499 }
500 }
501 return 0;
502 }
503
504 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)505 static int register_trace_uprobe(struct trace_uprobe *tu)
506 {
507 struct trace_uprobe *old_tu;
508 int ret;
509
510 mutex_lock(&event_mutex);
511
512 ret = validate_ref_ctr_offset(tu);
513 if (ret)
514 goto end;
515
516 /* register as an event */
517 old_tu = find_probe_event(trace_probe_name(&tu->tp),
518 trace_probe_group_name(&tu->tp));
519 if (old_tu) {
520 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
521 trace_probe_log_set_index(0);
522 trace_probe_log_err(0, DIFF_PROBE_TYPE);
523 ret = -EEXIST;
524 } else {
525 ret = append_trace_uprobe(tu, old_tu);
526 }
527 goto end;
528 }
529
530 ret = register_uprobe_event(tu);
531 if (ret) {
532 pr_warn("Failed to register probe event(%d)\n", ret);
533 goto end;
534 }
535
536 dyn_event_add(&tu->devent);
537
538 end:
539 mutex_unlock(&event_mutex);
540
541 return ret;
542 }
543
544 /*
545 * Argument syntax:
546 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
547 */
trace_uprobe_create(int argc,const char ** argv)548 static int trace_uprobe_create(int argc, const char **argv)
549 {
550 struct trace_uprobe *tu;
551 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
552 char *arg, *filename, *rctr, *rctr_end, *tmp;
553 char buf[MAX_EVENT_NAME_LEN];
554 struct path path;
555 unsigned long offset, ref_ctr_offset;
556 bool is_return = false;
557 int i, ret;
558
559 ret = 0;
560 ref_ctr_offset = 0;
561
562 switch (argv[0][0]) {
563 case 'r':
564 is_return = true;
565 break;
566 case 'p':
567 break;
568 default:
569 return -ECANCELED;
570 }
571
572 if (argc < 2)
573 return -ECANCELED;
574
575 if (argv[0][1] == ':')
576 event = &argv[0][2];
577
578 if (!strchr(argv[1], '/'))
579 return -ECANCELED;
580
581 filename = kstrdup(argv[1], GFP_KERNEL);
582 if (!filename)
583 return -ENOMEM;
584
585 /* Find the last occurrence, in case the path contains ':' too. */
586 arg = strrchr(filename, ':');
587 if (!arg || !isdigit(arg[1])) {
588 kfree(filename);
589 return -ECANCELED;
590 }
591
592 trace_probe_log_init("trace_uprobe", argc, argv);
593 trace_probe_log_set_index(1); /* filename is the 2nd argument */
594
595 *arg++ = '\0';
596 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
597 if (ret) {
598 trace_probe_log_err(0, FILE_NOT_FOUND);
599 kfree(filename);
600 trace_probe_log_clear();
601 return ret;
602 }
603 if (!d_is_reg(path.dentry)) {
604 trace_probe_log_err(0, NO_REGULAR_FILE);
605 ret = -EINVAL;
606 goto fail_address_parse;
607 }
608
609 /* Parse reference counter offset if specified. */
610 rctr = strchr(arg, '(');
611 if (rctr) {
612 rctr_end = strchr(rctr, ')');
613 if (!rctr_end) {
614 ret = -EINVAL;
615 rctr_end = rctr + strlen(rctr);
616 trace_probe_log_err(rctr_end - filename,
617 REFCNT_OPEN_BRACE);
618 goto fail_address_parse;
619 } else if (rctr_end[1] != '\0') {
620 ret = -EINVAL;
621 trace_probe_log_err(rctr_end + 1 - filename,
622 BAD_REFCNT_SUFFIX);
623 goto fail_address_parse;
624 }
625
626 *rctr++ = '\0';
627 *rctr_end = '\0';
628 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
629 if (ret) {
630 trace_probe_log_err(rctr - filename, BAD_REFCNT);
631 goto fail_address_parse;
632 }
633 }
634
635 /* Parse uprobe offset. */
636 ret = kstrtoul(arg, 0, &offset);
637 if (ret) {
638 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
639 goto fail_address_parse;
640 }
641
642 /* setup a probe */
643 trace_probe_log_set_index(0);
644 if (event) {
645 ret = traceprobe_parse_event_name(&event, &group, buf,
646 event - argv[0]);
647 if (ret)
648 goto fail_address_parse;
649 } else {
650 char *tail;
651 char *ptr;
652
653 tail = kstrdup(kbasename(filename), GFP_KERNEL);
654 if (!tail) {
655 ret = -ENOMEM;
656 goto fail_address_parse;
657 }
658
659 ptr = strpbrk(tail, ".-_");
660 if (ptr)
661 *ptr = '\0';
662
663 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
664 event = buf;
665 kfree(tail);
666 }
667
668 argc -= 2;
669 argv += 2;
670
671 tu = alloc_trace_uprobe(group, event, argc, is_return);
672 if (IS_ERR(tu)) {
673 ret = PTR_ERR(tu);
674 /* This must return -ENOMEM otherwise there is a bug */
675 WARN_ON_ONCE(ret != -ENOMEM);
676 goto fail_address_parse;
677 }
678 tu->offset = offset;
679 tu->ref_ctr_offset = ref_ctr_offset;
680 tu->path = path;
681 tu->filename = filename;
682
683 /* parse arguments */
684 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
685 tmp = kstrdup(argv[i], GFP_KERNEL);
686 if (!tmp) {
687 ret = -ENOMEM;
688 goto error;
689 }
690
691 trace_probe_log_set_index(i + 2);
692 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
693 is_return ? TPARG_FL_RETURN : 0);
694 kfree(tmp);
695 if (ret)
696 goto error;
697 }
698
699 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
700 if (ret < 0)
701 goto error;
702
703 ret = register_trace_uprobe(tu);
704 if (!ret)
705 goto out;
706
707 error:
708 free_trace_uprobe(tu);
709 out:
710 trace_probe_log_clear();
711 return ret;
712
713 fail_address_parse:
714 trace_probe_log_clear();
715 path_put(&path);
716 kfree(filename);
717
718 return ret;
719 }
720
create_or_delete_trace_uprobe(int argc,char ** argv)721 static int create_or_delete_trace_uprobe(int argc, char **argv)
722 {
723 int ret;
724
725 if (argv[0][0] == '-')
726 return dyn_event_release(argc, argv, &trace_uprobe_ops);
727
728 ret = trace_uprobe_create(argc, (const char **)argv);
729 return ret == -ECANCELED ? -EINVAL : ret;
730 }
731
trace_uprobe_release(struct dyn_event * ev)732 static int trace_uprobe_release(struct dyn_event *ev)
733 {
734 struct trace_uprobe *tu = to_trace_uprobe(ev);
735
736 return unregister_trace_uprobe(tu);
737 }
738
739 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)740 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
741 {
742 struct trace_uprobe *tu = to_trace_uprobe(ev);
743 char c = is_ret_probe(tu) ? 'r' : 'p';
744 int i;
745
746 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
747 trace_probe_name(&tu->tp), tu->filename,
748 (int)(sizeof(void *) * 2), tu->offset);
749
750 if (tu->ref_ctr_offset)
751 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
752
753 for (i = 0; i < tu->tp.nr_args; i++)
754 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
755
756 seq_putc(m, '\n');
757 return 0;
758 }
759
probes_seq_show(struct seq_file * m,void * v)760 static int probes_seq_show(struct seq_file *m, void *v)
761 {
762 struct dyn_event *ev = v;
763
764 if (!is_trace_uprobe(ev))
765 return 0;
766
767 return trace_uprobe_show(m, ev);
768 }
769
770 static const struct seq_operations probes_seq_op = {
771 .start = dyn_event_seq_start,
772 .next = dyn_event_seq_next,
773 .stop = dyn_event_seq_stop,
774 .show = probes_seq_show
775 };
776
probes_open(struct inode * inode,struct file * file)777 static int probes_open(struct inode *inode, struct file *file)
778 {
779 int ret;
780
781 ret = security_locked_down(LOCKDOWN_TRACEFS);
782 if (ret)
783 return ret;
784
785 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
786 ret = dyn_events_release_all(&trace_uprobe_ops);
787 if (ret)
788 return ret;
789 }
790
791 return seq_open(file, &probes_seq_op);
792 }
793
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)794 static ssize_t probes_write(struct file *file, const char __user *buffer,
795 size_t count, loff_t *ppos)
796 {
797 return trace_parse_run_command(file, buffer, count, ppos,
798 create_or_delete_trace_uprobe);
799 }
800
801 static const struct file_operations uprobe_events_ops = {
802 .owner = THIS_MODULE,
803 .open = probes_open,
804 .read = seq_read,
805 .llseek = seq_lseek,
806 .release = seq_release,
807 .write = probes_write,
808 };
809
810 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)811 static int probes_profile_seq_show(struct seq_file *m, void *v)
812 {
813 struct dyn_event *ev = v;
814 struct trace_uprobe *tu;
815
816 if (!is_trace_uprobe(ev))
817 return 0;
818
819 tu = to_trace_uprobe(ev);
820 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
821 trace_probe_name(&tu->tp), tu->nhit);
822 return 0;
823 }
824
825 static const struct seq_operations profile_seq_op = {
826 .start = dyn_event_seq_start,
827 .next = dyn_event_seq_next,
828 .stop = dyn_event_seq_stop,
829 .show = probes_profile_seq_show
830 };
831
profile_open(struct inode * inode,struct file * file)832 static int profile_open(struct inode *inode, struct file *file)
833 {
834 int ret;
835
836 ret = security_locked_down(LOCKDOWN_TRACEFS);
837 if (ret)
838 return ret;
839
840 return seq_open(file, &profile_seq_op);
841 }
842
843 static const struct file_operations uprobe_profile_ops = {
844 .owner = THIS_MODULE,
845 .open = profile_open,
846 .read = seq_read,
847 .llseek = seq_lseek,
848 .release = seq_release,
849 };
850
851 struct uprobe_cpu_buffer {
852 struct mutex mutex;
853 void *buf;
854 };
855 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
856 static int uprobe_buffer_refcnt;
857
uprobe_buffer_init(void)858 static int uprobe_buffer_init(void)
859 {
860 int cpu, err_cpu;
861
862 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
863 if (uprobe_cpu_buffer == NULL)
864 return -ENOMEM;
865
866 for_each_possible_cpu(cpu) {
867 struct page *p = alloc_pages_node(cpu_to_node(cpu),
868 GFP_KERNEL, 0);
869 if (p == NULL) {
870 err_cpu = cpu;
871 goto err;
872 }
873 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
874 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
875 }
876
877 return 0;
878
879 err:
880 for_each_possible_cpu(cpu) {
881 if (cpu == err_cpu)
882 break;
883 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
884 }
885
886 free_percpu(uprobe_cpu_buffer);
887 return -ENOMEM;
888 }
889
uprobe_buffer_enable(void)890 static int uprobe_buffer_enable(void)
891 {
892 int ret = 0;
893
894 BUG_ON(!mutex_is_locked(&event_mutex));
895
896 if (uprobe_buffer_refcnt++ == 0) {
897 ret = uprobe_buffer_init();
898 if (ret < 0)
899 uprobe_buffer_refcnt--;
900 }
901
902 return ret;
903 }
904
uprobe_buffer_disable(void)905 static void uprobe_buffer_disable(void)
906 {
907 int cpu;
908
909 BUG_ON(!mutex_is_locked(&event_mutex));
910
911 if (--uprobe_buffer_refcnt == 0) {
912 for_each_possible_cpu(cpu)
913 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
914 cpu)->buf);
915
916 free_percpu(uprobe_cpu_buffer);
917 uprobe_cpu_buffer = NULL;
918 }
919 }
920
uprobe_buffer_get(void)921 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
922 {
923 struct uprobe_cpu_buffer *ucb;
924 int cpu;
925
926 cpu = raw_smp_processor_id();
927 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
928
929 /*
930 * Use per-cpu buffers for fastest access, but we might migrate
931 * so the mutex makes sure we have sole access to it.
932 */
933 mutex_lock(&ucb->mutex);
934
935 return ucb;
936 }
937
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)938 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
939 {
940 mutex_unlock(&ucb->mutex);
941 }
942
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize,struct trace_event_file * trace_file)943 static void __uprobe_trace_func(struct trace_uprobe *tu,
944 unsigned long func, struct pt_regs *regs,
945 struct uprobe_cpu_buffer *ucb, int dsize,
946 struct trace_event_file *trace_file)
947 {
948 struct uprobe_trace_entry_head *entry;
949 struct ring_buffer_event *event;
950 struct ring_buffer *buffer;
951 void *data;
952 int size, esize;
953 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
954
955 WARN_ON(call != trace_file->event_call);
956
957 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
958 return;
959
960 if (trace_trigger_soft_disabled(trace_file))
961 return;
962
963 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
964 size = esize + tu->tp.size + dsize;
965 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
966 call->event.type, size, 0, 0);
967 if (!event)
968 return;
969
970 entry = ring_buffer_event_data(event);
971 if (is_ret_probe(tu)) {
972 entry->vaddr[0] = func;
973 entry->vaddr[1] = instruction_pointer(regs);
974 data = DATAOF_TRACE_ENTRY(entry, true);
975 } else {
976 entry->vaddr[0] = instruction_pointer(regs);
977 data = DATAOF_TRACE_ENTRY(entry, false);
978 }
979
980 memcpy(data, ucb->buf, tu->tp.size + dsize);
981
982 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
983 }
984
985 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)986 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
987 struct uprobe_cpu_buffer *ucb, int dsize)
988 {
989 struct event_file_link *link;
990
991 if (is_ret_probe(tu))
992 return 0;
993
994 rcu_read_lock();
995 trace_probe_for_each_link_rcu(link, &tu->tp)
996 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
997 rcu_read_unlock();
998
999 return 0;
1000 }
1001
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1002 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1003 struct pt_regs *regs,
1004 struct uprobe_cpu_buffer *ucb, int dsize)
1005 {
1006 struct event_file_link *link;
1007
1008 rcu_read_lock();
1009 trace_probe_for_each_link_rcu(link, &tu->tp)
1010 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1011 rcu_read_unlock();
1012 }
1013
1014 /* Event entry printers */
1015 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1016 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1017 {
1018 struct uprobe_trace_entry_head *entry;
1019 struct trace_seq *s = &iter->seq;
1020 struct trace_uprobe *tu;
1021 u8 *data;
1022
1023 entry = (struct uprobe_trace_entry_head *)iter->ent;
1024 tu = trace_uprobe_primary_from_call(
1025 container_of(event, struct trace_event_call, event));
1026 if (unlikely(!tu))
1027 goto out;
1028
1029 if (is_ret_probe(tu)) {
1030 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1031 trace_probe_name(&tu->tp),
1032 entry->vaddr[1], entry->vaddr[0]);
1033 data = DATAOF_TRACE_ENTRY(entry, true);
1034 } else {
1035 trace_seq_printf(s, "%s: (0x%lx)",
1036 trace_probe_name(&tu->tp),
1037 entry->vaddr[0]);
1038 data = DATAOF_TRACE_ENTRY(entry, false);
1039 }
1040
1041 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1042 goto out;
1043
1044 trace_seq_putc(s, '\n');
1045
1046 out:
1047 return trace_handle_return(s);
1048 }
1049
1050 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1051 enum uprobe_filter_ctx ctx,
1052 struct mm_struct *mm);
1053
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1054 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1055 {
1056 int ret;
1057
1058 tu->consumer.filter = filter;
1059 tu->inode = d_real_inode(tu->path.dentry);
1060
1061 if (tu->ref_ctr_offset)
1062 ret = uprobe_register_refctr(tu->inode, tu->offset,
1063 tu->ref_ctr_offset, &tu->consumer);
1064 else
1065 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1066
1067 if (ret)
1068 tu->inode = NULL;
1069
1070 return ret;
1071 }
1072
__probe_event_disable(struct trace_probe * tp)1073 static void __probe_event_disable(struct trace_probe *tp)
1074 {
1075 struct trace_probe *pos;
1076 struct trace_uprobe *tu;
1077
1078 tu = container_of(tp, struct trace_uprobe, tp);
1079 WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
1080
1081 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1082 tu = container_of(pos, struct trace_uprobe, tp);
1083 if (!tu->inode)
1084 continue;
1085
1086 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1087 tu->inode = NULL;
1088 }
1089 }
1090
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1091 static int probe_event_enable(struct trace_event_call *call,
1092 struct trace_event_file *file, filter_func_t filter)
1093 {
1094 struct trace_probe *pos, *tp;
1095 struct trace_uprobe *tu;
1096 bool enabled;
1097 int ret;
1098
1099 tp = trace_probe_primary_from_call(call);
1100 if (WARN_ON_ONCE(!tp))
1101 return -ENODEV;
1102 enabled = trace_probe_is_enabled(tp);
1103
1104 /* This may also change "enabled" state */
1105 if (file) {
1106 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1107 return -EINTR;
1108
1109 ret = trace_probe_add_file(tp, file);
1110 if (ret < 0)
1111 return ret;
1112 } else {
1113 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1114 return -EINTR;
1115
1116 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1117 }
1118
1119 tu = container_of(tp, struct trace_uprobe, tp);
1120 WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
1121
1122 if (enabled)
1123 return 0;
1124
1125 ret = uprobe_buffer_enable();
1126 if (ret)
1127 goto err_flags;
1128
1129 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1130 tu = container_of(pos, struct trace_uprobe, tp);
1131 ret = trace_uprobe_enable(tu, filter);
1132 if (ret) {
1133 __probe_event_disable(tp);
1134 goto err_buffer;
1135 }
1136 }
1137
1138 return 0;
1139
1140 err_buffer:
1141 uprobe_buffer_disable();
1142
1143 err_flags:
1144 if (file)
1145 trace_probe_remove_file(tp, file);
1146 else
1147 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1148
1149 return ret;
1150 }
1151
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1152 static void probe_event_disable(struct trace_event_call *call,
1153 struct trace_event_file *file)
1154 {
1155 struct trace_probe *tp;
1156
1157 tp = trace_probe_primary_from_call(call);
1158 if (WARN_ON_ONCE(!tp))
1159 return;
1160
1161 if (!trace_probe_is_enabled(tp))
1162 return;
1163
1164 if (file) {
1165 if (trace_probe_remove_file(tp, file) < 0)
1166 return;
1167
1168 if (trace_probe_is_enabled(tp))
1169 return;
1170 } else
1171 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1172
1173 __probe_event_disable(tp);
1174 uprobe_buffer_disable();
1175 }
1176
uprobe_event_define_fields(struct trace_event_call * event_call)1177 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1178 {
1179 int ret, size;
1180 struct uprobe_trace_entry_head field;
1181 struct trace_uprobe *tu;
1182
1183 tu = trace_uprobe_primary_from_call(event_call);
1184 if (unlikely(!tu))
1185 return -ENODEV;
1186
1187 if (is_ret_probe(tu)) {
1188 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1189 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1190 size = SIZEOF_TRACE_ENTRY(true);
1191 } else {
1192 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1193 size = SIZEOF_TRACE_ENTRY(false);
1194 }
1195
1196 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1197 }
1198
1199 #ifdef CONFIG_PERF_EVENTS
1200 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1201 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1202 {
1203 struct perf_event *event;
1204
1205 if (filter->nr_systemwide)
1206 return true;
1207
1208 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1209 if (event->hw.target->mm == mm)
1210 return true;
1211 }
1212
1213 return false;
1214 }
1215
1216 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1217 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1218 struct perf_event *event)
1219 {
1220 return __uprobe_perf_filter(filter, event->hw.target->mm);
1221 }
1222
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1223 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1224 struct perf_event *event)
1225 {
1226 bool done;
1227
1228 write_lock(&filter->rwlock);
1229 if (event->hw.target) {
1230 list_del(&event->hw.tp_list);
1231 done = filter->nr_systemwide ||
1232 (event->hw.target->flags & PF_EXITING) ||
1233 trace_uprobe_filter_event(filter, event);
1234 } else {
1235 filter->nr_systemwide--;
1236 done = filter->nr_systemwide;
1237 }
1238 write_unlock(&filter->rwlock);
1239
1240 return done;
1241 }
1242
1243 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1244 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1245 struct perf_event *event)
1246 {
1247 bool done;
1248
1249 write_lock(&filter->rwlock);
1250 if (event->hw.target) {
1251 /*
1252 * event->parent != NULL means copy_process(), we can avoid
1253 * uprobe_apply(). current->mm must be probed and we can rely
1254 * on dup_mmap() which preserves the already installed bp's.
1255 *
1256 * attr.enable_on_exec means that exec/mmap will install the
1257 * breakpoints we need.
1258 */
1259 done = filter->nr_systemwide ||
1260 event->parent || event->attr.enable_on_exec ||
1261 trace_uprobe_filter_event(filter, event);
1262 list_add(&event->hw.tp_list, &filter->perf_events);
1263 } else {
1264 done = filter->nr_systemwide;
1265 filter->nr_systemwide++;
1266 }
1267 write_unlock(&filter->rwlock);
1268
1269 return done;
1270 }
1271
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1272 static int uprobe_perf_close(struct trace_event_call *call,
1273 struct perf_event *event)
1274 {
1275 struct trace_probe *pos, *tp;
1276 struct trace_uprobe *tu;
1277 int ret = 0;
1278
1279 tp = trace_probe_primary_from_call(call);
1280 if (WARN_ON_ONCE(!tp))
1281 return -ENODEV;
1282
1283 tu = container_of(tp, struct trace_uprobe, tp);
1284 if (trace_uprobe_filter_remove(trace_uprobe_get_filter(tu), event))
1285 return 0;
1286
1287 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1288 tu = container_of(pos, struct trace_uprobe, tp);
1289 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1290 if (ret)
1291 break;
1292 }
1293
1294 return ret;
1295 }
1296
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1297 static int uprobe_perf_open(struct trace_event_call *call,
1298 struct perf_event *event)
1299 {
1300 struct trace_probe *pos, *tp;
1301 struct trace_uprobe *tu;
1302 int err = 0;
1303
1304 tp = trace_probe_primary_from_call(call);
1305 if (WARN_ON_ONCE(!tp))
1306 return -ENODEV;
1307
1308 tu = container_of(tp, struct trace_uprobe, tp);
1309 if (trace_uprobe_filter_add(trace_uprobe_get_filter(tu), event))
1310 return 0;
1311
1312 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1313 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1314 if (err) {
1315 uprobe_perf_close(call, event);
1316 break;
1317 }
1318 }
1319
1320 return err;
1321 }
1322
uprobe_perf_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)1323 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1324 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1325 {
1326 struct trace_uprobe_filter *filter;
1327 struct trace_uprobe *tu;
1328 int ret;
1329
1330 tu = container_of(uc, struct trace_uprobe, consumer);
1331 filter = trace_uprobe_get_filter(tu);
1332
1333 read_lock(&filter->rwlock);
1334 ret = __uprobe_perf_filter(filter, mm);
1335 read_unlock(&filter->rwlock);
1336
1337 return ret;
1338 }
1339
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1340 static void __uprobe_perf_func(struct trace_uprobe *tu,
1341 unsigned long func, struct pt_regs *regs,
1342 struct uprobe_cpu_buffer *ucb, int dsize)
1343 {
1344 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1345 struct uprobe_trace_entry_head *entry;
1346 struct hlist_head *head;
1347 void *data;
1348 int size, esize;
1349 int rctx;
1350
1351 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1352 return;
1353
1354 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1355
1356 size = esize + tu->tp.size + dsize;
1357 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1358 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1359 return;
1360
1361 preempt_disable();
1362 head = this_cpu_ptr(call->perf_events);
1363 if (hlist_empty(head))
1364 goto out;
1365
1366 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1367 if (!entry)
1368 goto out;
1369
1370 if (is_ret_probe(tu)) {
1371 entry->vaddr[0] = func;
1372 entry->vaddr[1] = instruction_pointer(regs);
1373 data = DATAOF_TRACE_ENTRY(entry, true);
1374 } else {
1375 entry->vaddr[0] = instruction_pointer(regs);
1376 data = DATAOF_TRACE_ENTRY(entry, false);
1377 }
1378
1379 memcpy(data, ucb->buf, tu->tp.size + dsize);
1380
1381 if (size - esize > tu->tp.size + dsize) {
1382 int len = tu->tp.size + dsize;
1383
1384 memset(data + len, 0, size - esize - len);
1385 }
1386
1387 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1388 head, NULL);
1389 out:
1390 preempt_enable();
1391 }
1392
1393 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1394 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1395 struct uprobe_cpu_buffer *ucb, int dsize)
1396 {
1397 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1398 return UPROBE_HANDLER_REMOVE;
1399
1400 if (!is_ret_probe(tu))
1401 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1402 return 0;
1403 }
1404
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1405 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1406 struct pt_regs *regs,
1407 struct uprobe_cpu_buffer *ucb, int dsize)
1408 {
1409 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1410 }
1411
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,bool perf_type_tracepoint)1412 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1413 const char **filename, u64 *probe_offset,
1414 bool perf_type_tracepoint)
1415 {
1416 const char *pevent = trace_event_name(event->tp_event);
1417 const char *group = event->tp_event->class->system;
1418 struct trace_uprobe *tu;
1419
1420 if (perf_type_tracepoint)
1421 tu = find_probe_event(pevent, group);
1422 else
1423 tu = event->tp_event->data;
1424 if (!tu)
1425 return -EINVAL;
1426
1427 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1428 : BPF_FD_TYPE_UPROBE;
1429 *filename = tu->filename;
1430 *probe_offset = tu->offset;
1431 return 0;
1432 }
1433 #endif /* CONFIG_PERF_EVENTS */
1434
1435 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1436 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1437 void *data)
1438 {
1439 struct trace_event_file *file = data;
1440
1441 switch (type) {
1442 case TRACE_REG_REGISTER:
1443 return probe_event_enable(event, file, NULL);
1444
1445 case TRACE_REG_UNREGISTER:
1446 probe_event_disable(event, file);
1447 return 0;
1448
1449 #ifdef CONFIG_PERF_EVENTS
1450 case TRACE_REG_PERF_REGISTER:
1451 return probe_event_enable(event, NULL, uprobe_perf_filter);
1452
1453 case TRACE_REG_PERF_UNREGISTER:
1454 probe_event_disable(event, NULL);
1455 return 0;
1456
1457 case TRACE_REG_PERF_OPEN:
1458 return uprobe_perf_open(event, data);
1459
1460 case TRACE_REG_PERF_CLOSE:
1461 return uprobe_perf_close(event, data);
1462
1463 #endif
1464 default:
1465 return 0;
1466 }
1467 return 0;
1468 }
1469
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs)1470 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1471 {
1472 struct trace_uprobe *tu;
1473 struct uprobe_dispatch_data udd;
1474 struct uprobe_cpu_buffer *ucb;
1475 int dsize, esize;
1476 int ret = 0;
1477
1478
1479 tu = container_of(con, struct trace_uprobe, consumer);
1480 tu->nhit++;
1481
1482 udd.tu = tu;
1483 udd.bp_addr = instruction_pointer(regs);
1484
1485 current->utask->vaddr = (unsigned long) &udd;
1486
1487 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1488 return 0;
1489
1490 dsize = __get_data_size(&tu->tp, regs);
1491 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1492
1493 ucb = uprobe_buffer_get();
1494 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1495
1496 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1497 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1498
1499 #ifdef CONFIG_PERF_EVENTS
1500 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1501 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1502 #endif
1503 uprobe_buffer_put(ucb);
1504 return ret;
1505 }
1506
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)1507 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1508 unsigned long func, struct pt_regs *regs)
1509 {
1510 struct trace_uprobe *tu;
1511 struct uprobe_dispatch_data udd;
1512 struct uprobe_cpu_buffer *ucb;
1513 int dsize, esize;
1514
1515 tu = container_of(con, struct trace_uprobe, consumer);
1516
1517 udd.tu = tu;
1518 udd.bp_addr = func;
1519
1520 current->utask->vaddr = (unsigned long) &udd;
1521
1522 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1523 return 0;
1524
1525 dsize = __get_data_size(&tu->tp, regs);
1526 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1527
1528 ucb = uprobe_buffer_get();
1529 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1530
1531 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1532 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1533
1534 #ifdef CONFIG_PERF_EVENTS
1535 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1536 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1537 #endif
1538 uprobe_buffer_put(ucb);
1539 return 0;
1540 }
1541
1542 static struct trace_event_functions uprobe_funcs = {
1543 .trace = print_uprobe_event
1544 };
1545
init_trace_event_call(struct trace_uprobe * tu)1546 static inline void init_trace_event_call(struct trace_uprobe *tu)
1547 {
1548 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1549
1550 call->event.funcs = &uprobe_funcs;
1551 call->class->define_fields = uprobe_event_define_fields;
1552
1553 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1554 call->class->reg = trace_uprobe_register;
1555 }
1556
register_uprobe_event(struct trace_uprobe * tu)1557 static int register_uprobe_event(struct trace_uprobe *tu)
1558 {
1559 init_trace_event_call(tu);
1560
1561 return trace_probe_register_event_call(&tu->tp);
1562 }
1563
unregister_uprobe_event(struct trace_uprobe * tu)1564 static int unregister_uprobe_event(struct trace_uprobe *tu)
1565 {
1566 return trace_probe_unregister_event_call(&tu->tp);
1567 }
1568
1569 #ifdef CONFIG_PERF_EVENTS
1570 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1571 create_local_trace_uprobe(char *name, unsigned long offs,
1572 unsigned long ref_ctr_offset, bool is_return)
1573 {
1574 struct trace_uprobe *tu;
1575 struct path path;
1576 int ret;
1577
1578 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1579 if (ret)
1580 return ERR_PTR(ret);
1581
1582 if (!d_is_reg(path.dentry)) {
1583 path_put(&path);
1584 return ERR_PTR(-EINVAL);
1585 }
1586
1587 /*
1588 * local trace_kprobes are not added to dyn_event, so they are never
1589 * searched in find_trace_kprobe(). Therefore, there is no concern of
1590 * duplicated name "DUMMY_EVENT" here.
1591 */
1592 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1593 is_return);
1594
1595 if (IS_ERR(tu)) {
1596 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1597 (int)PTR_ERR(tu));
1598 path_put(&path);
1599 return ERR_CAST(tu);
1600 }
1601
1602 tu->offset = offs;
1603 tu->path = path;
1604 tu->ref_ctr_offset = ref_ctr_offset;
1605 tu->filename = kstrdup(name, GFP_KERNEL);
1606 init_trace_event_call(tu);
1607
1608 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1609 ret = -ENOMEM;
1610 goto error;
1611 }
1612
1613 return trace_probe_event_call(&tu->tp);
1614 error:
1615 free_trace_uprobe(tu);
1616 return ERR_PTR(ret);
1617 }
1618
destroy_local_trace_uprobe(struct trace_event_call * event_call)1619 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1620 {
1621 struct trace_uprobe *tu;
1622
1623 tu = trace_uprobe_primary_from_call(event_call);
1624
1625 free_trace_uprobe(tu);
1626 }
1627 #endif /* CONFIG_PERF_EVENTS */
1628
1629 /* Make a trace interface for controling probe points */
init_uprobe_trace(void)1630 static __init int init_uprobe_trace(void)
1631 {
1632 struct dentry *d_tracer;
1633 int ret;
1634
1635 ret = dyn_event_register(&trace_uprobe_ops);
1636 if (ret)
1637 return ret;
1638
1639 d_tracer = tracing_init_dentry();
1640 if (IS_ERR(d_tracer))
1641 return 0;
1642
1643 trace_create_file("uprobe_events", 0644, d_tracer,
1644 NULL, &uprobe_events_ops);
1645 /* Profile interface */
1646 trace_create_file("uprobe_profile", 0444, d_tracer,
1647 NULL, &uprobe_profile_ops);
1648 return 0;
1649 }
1650
1651 fs_initcall(init_uprobe_trace);
1652