1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
9
10 #include <linux/security.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/uprobes.h>
15 #include <linux/namei.h>
16 #include <linux/string.h>
17 #include <linux/rculist.h>
18
19 #include "trace_dynevent.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22
23 #define UPROBE_EVENT_SYSTEM "uprobes"
24
25 struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
28 };
29
30 #define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
33
34 #define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36
37 static int trace_uprobe_create(int argc, const char **argv);
38 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_uprobe_release(struct dyn_event *ev);
40 static bool trace_uprobe_is_busy(struct dyn_event *ev);
41 static bool trace_uprobe_match(const char *system, const char *event,
42 int argc, const char **argv, struct dyn_event *ev);
43
44 static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
50 };
51
52 /*
53 * uprobe event core functions
54 */
55 struct trace_uprobe {
56 struct dyn_event devent;
57 struct uprobe_consumer consumer;
58 struct path path;
59 struct inode *inode;
60 char *filename;
61 unsigned long offset;
62 unsigned long ref_ctr_offset;
63 unsigned long nhit;
64 struct trace_probe tp;
65 };
66
is_trace_uprobe(struct dyn_event * ev)67 static bool is_trace_uprobe(struct dyn_event *ev)
68 {
69 return ev->ops == &trace_uprobe_ops;
70 }
71
to_trace_uprobe(struct dyn_event * ev)72 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73 {
74 return container_of(ev, struct trace_uprobe, devent);
75 }
76
77 /**
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
81 */
82 #define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
85
86 #define SIZEOF_TRACE_UPROBE(n) \
87 (offsetof(struct trace_uprobe, tp.args) + \
88 (sizeof(struct probe_arg) * (n)))
89
90 static int register_uprobe_event(struct trace_uprobe *tu);
91 static int unregister_uprobe_event(struct trace_uprobe *tu);
92
93 struct uprobe_dispatch_data {
94 struct trace_uprobe *tu;
95 unsigned long bp_addr;
96 };
97
98 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
99 static int uretprobe_dispatcher(struct uprobe_consumer *con,
100 unsigned long func, struct pt_regs *regs);
101
102 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)103 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
104 {
105 return addr - (n * sizeof(long));
106 }
107 #else
adjust_stack_addr(unsigned long addr,unsigned int n)108 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
109 {
110 return addr + (n * sizeof(long));
111 }
112 #endif
113
get_user_stack_nth(struct pt_regs * regs,unsigned int n)114 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
115 {
116 unsigned long ret;
117 unsigned long addr = user_stack_pointer(regs);
118
119 addr = adjust_stack_addr(addr, n);
120
121 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
122 return 0;
123
124 return ret;
125 }
126
127 /*
128 * Uprobes-specific fetch functions
129 */
130 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)131 probe_mem_read(void *dest, void *src, size_t size)
132 {
133 void __user *vaddr = (void __force __user *)src;
134
135 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
136 }
137
138 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)139 probe_mem_read_user(void *dest, void *src, size_t size)
140 {
141 return probe_mem_read(dest, src, size);
142 }
143
144 /*
145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
146 * length and relative data location.
147 */
148 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)149 fetch_store_string(unsigned long addr, void *dest, void *base)
150 {
151 long ret;
152 u32 loc = *(u32 *)dest;
153 int maxlen = get_loc_len(loc);
154 u8 *dst = get_loc_data(dest, base);
155 void __user *src = (void __force __user *) addr;
156
157 if (unlikely(!maxlen))
158 return -ENOMEM;
159
160 if (addr == FETCH_TOKEN_COMM)
161 ret = strlcpy(dst, current->comm, maxlen);
162 else
163 ret = strncpy_from_user(dst, src, maxlen);
164 if (ret >= 0) {
165 if (ret == maxlen)
166 dst[ret - 1] = '\0';
167 else
168 /*
169 * Include the terminating null byte. In this case it
170 * was copied by strncpy_from_user but not accounted
171 * for in ret.
172 */
173 ret++;
174 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
175 }
176
177 return ret;
178 }
179
180 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)181 fetch_store_string_user(unsigned long addr, void *dest, void *base)
182 {
183 return fetch_store_string(addr, dest, base);
184 }
185
186 /* Return the length of string -- including null terminal byte */
187 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)188 fetch_store_strlen(unsigned long addr)
189 {
190 int len;
191 void __user *vaddr = (void __force __user *) addr;
192
193 if (addr == FETCH_TOKEN_COMM)
194 len = strlen(current->comm) + 1;
195 else
196 len = strnlen_user(vaddr, MAX_STRING_SIZE);
197
198 return (len > MAX_STRING_SIZE) ? 0 : len;
199 }
200
201 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)202 fetch_store_strlen_user(unsigned long addr)
203 {
204 return fetch_store_strlen(addr);
205 }
206
translate_user_vaddr(unsigned long file_offset)207 static unsigned long translate_user_vaddr(unsigned long file_offset)
208 {
209 unsigned long base_addr;
210 struct uprobe_dispatch_data *udd;
211
212 udd = (void *) current->utask->vaddr;
213
214 base_addr = udd->bp_addr - udd->tu->offset;
215 return base_addr + file_offset;
216 }
217
218 /* Note that we don't verify it, since the code does not come from user space */
219 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * dest,void * base)220 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
221 void *base)
222 {
223 struct pt_regs *regs = rec;
224 unsigned long val;
225
226 /* 1st stage: get value from context */
227 switch (code->op) {
228 case FETCH_OP_REG:
229 val = regs_get_register(regs, code->param);
230 break;
231 case FETCH_OP_STACK:
232 val = get_user_stack_nth(regs, code->param);
233 break;
234 case FETCH_OP_STACKP:
235 val = user_stack_pointer(regs);
236 break;
237 case FETCH_OP_RETVAL:
238 val = regs_return_value(regs);
239 break;
240 case FETCH_OP_IMM:
241 val = code->immediate;
242 break;
243 case FETCH_OP_COMM:
244 val = FETCH_TOKEN_COMM;
245 break;
246 case FETCH_OP_DATA:
247 val = (unsigned long)code->data;
248 break;
249 case FETCH_OP_FOFFS:
250 val = translate_user_vaddr(code->immediate);
251 break;
252 default:
253 return -EILSEQ;
254 }
255 code++;
256
257 return process_fetch_insn_bottom(code, val, dest, base);
258 }
NOKPROBE_SYMBOL(process_fetch_insn)259 NOKPROBE_SYMBOL(process_fetch_insn)
260
261 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
262 {
263 rwlock_init(&filter->rwlock);
264 filter->nr_systemwide = 0;
265 INIT_LIST_HEAD(&filter->perf_events);
266 }
267
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)268 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
269 {
270 return !filter->nr_systemwide && list_empty(&filter->perf_events);
271 }
272
is_ret_probe(struct trace_uprobe * tu)273 static inline bool is_ret_probe(struct trace_uprobe *tu)
274 {
275 return tu->consumer.ret_handler != NULL;
276 }
277
trace_uprobe_is_busy(struct dyn_event * ev)278 static bool trace_uprobe_is_busy(struct dyn_event *ev)
279 {
280 struct trace_uprobe *tu = to_trace_uprobe(ev);
281
282 return trace_probe_is_enabled(&tu->tp);
283 }
284
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)285 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
286 int argc, const char **argv)
287 {
288 char buf[MAX_ARGSTR_LEN + 1];
289 int len;
290
291 if (!argc)
292 return true;
293
294 len = strlen(tu->filename);
295 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
296 return false;
297
298 if (tu->ref_ctr_offset == 0)
299 snprintf(buf, sizeof(buf), "0x%0*lx",
300 (int)(sizeof(void *) * 2), tu->offset);
301 else
302 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
303 (int)(sizeof(void *) * 2), tu->offset,
304 tu->ref_ctr_offset);
305 if (strcmp(buf, &argv[0][len + 1]))
306 return false;
307
308 argc--; argv++;
309
310 return trace_probe_match_command_args(&tu->tp, argc, argv);
311 }
312
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)313 static bool trace_uprobe_match(const char *system, const char *event,
314 int argc, const char **argv, struct dyn_event *ev)
315 {
316 struct trace_uprobe *tu = to_trace_uprobe(ev);
317
318 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
319 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
320 trace_uprobe_match_command_head(tu, argc, argv);
321 }
322
323 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)324 trace_uprobe_primary_from_call(struct trace_event_call *call)
325 {
326 struct trace_probe *tp;
327
328 tp = trace_probe_primary_from_call(call);
329 if (WARN_ON_ONCE(!tp))
330 return NULL;
331
332 return container_of(tp, struct trace_uprobe, tp);
333 }
334
335 /*
336 * Allocate new trace_uprobe and initialize it (including uprobes).
337 */
338 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)339 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
340 {
341 struct trace_uprobe *tu;
342 int ret;
343
344 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
345 if (!tu)
346 return ERR_PTR(-ENOMEM);
347
348 ret = trace_probe_init(&tu->tp, event, group, true);
349 if (ret < 0)
350 goto error;
351
352 dyn_event_init(&tu->devent, &trace_uprobe_ops);
353 tu->consumer.handler = uprobe_dispatcher;
354 if (is_ret)
355 tu->consumer.ret_handler = uretprobe_dispatcher;
356 init_trace_uprobe_filter(tu->tp.event->filter);
357 return tu;
358
359 error:
360 kfree(tu);
361
362 return ERR_PTR(ret);
363 }
364
free_trace_uprobe(struct trace_uprobe * tu)365 static void free_trace_uprobe(struct trace_uprobe *tu)
366 {
367 if (!tu)
368 return;
369
370 path_put(&tu->path);
371 trace_probe_cleanup(&tu->tp);
372 kfree(tu->filename);
373 kfree(tu);
374 }
375
find_probe_event(const char * event,const char * group)376 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
377 {
378 struct dyn_event *pos;
379 struct trace_uprobe *tu;
380
381 for_each_trace_uprobe(tu, pos)
382 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
383 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
384 return tu;
385
386 return NULL;
387 }
388
389 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)390 static int unregister_trace_uprobe(struct trace_uprobe *tu)
391 {
392 int ret;
393
394 if (trace_probe_has_sibling(&tu->tp))
395 goto unreg;
396
397 ret = unregister_uprobe_event(tu);
398 if (ret)
399 return ret;
400
401 unreg:
402 dyn_event_remove(&tu->devent);
403 trace_probe_unlink(&tu->tp);
404 free_trace_uprobe(tu);
405 return 0;
406 }
407
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)408 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
409 struct trace_uprobe *comp)
410 {
411 struct trace_probe_event *tpe = orig->tp.event;
412 struct trace_probe *pos;
413 struct inode *comp_inode = d_real_inode(comp->path.dentry);
414 int i;
415
416 list_for_each_entry(pos, &tpe->probes, list) {
417 orig = container_of(pos, struct trace_uprobe, tp);
418 if (comp_inode != d_real_inode(orig->path.dentry) ||
419 comp->offset != orig->offset)
420 continue;
421
422 /*
423 * trace_probe_compare_arg_type() ensured that nr_args and
424 * each argument name and type are same. Let's compare comm.
425 */
426 for (i = 0; i < orig->tp.nr_args; i++) {
427 if (strcmp(orig->tp.args[i].comm,
428 comp->tp.args[i].comm))
429 break;
430 }
431
432 if (i == orig->tp.nr_args)
433 return true;
434 }
435
436 return false;
437 }
438
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)439 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
440 {
441 int ret;
442
443 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
444 if (ret) {
445 /* Note that argument starts index = 2 */
446 trace_probe_log_set_index(ret + 1);
447 trace_probe_log_err(0, DIFF_ARG_TYPE);
448 return -EEXIST;
449 }
450 if (trace_uprobe_has_same_uprobe(to, tu)) {
451 trace_probe_log_set_index(0);
452 trace_probe_log_err(0, SAME_PROBE);
453 return -EEXIST;
454 }
455
456 /* Append to existing event */
457 ret = trace_probe_append(&tu->tp, &to->tp);
458 if (!ret)
459 dyn_event_add(&tu->devent);
460
461 return ret;
462 }
463
464 /*
465 * Uprobe with multiple reference counter is not allowed. i.e.
466 * If inode and offset matches, reference counter offset *must*
467 * match as well. Though, there is one exception: If user is
468 * replacing old trace_uprobe with new one(same group/event),
469 * then we allow same uprobe with new reference counter as far
470 * as the new one does not conflict with any other existing
471 * ones.
472 */
validate_ref_ctr_offset(struct trace_uprobe * new)473 static int validate_ref_ctr_offset(struct trace_uprobe *new)
474 {
475 struct dyn_event *pos;
476 struct trace_uprobe *tmp;
477 struct inode *new_inode = d_real_inode(new->path.dentry);
478
479 for_each_trace_uprobe(tmp, pos) {
480 if (new_inode == d_real_inode(tmp->path.dentry) &&
481 new->offset == tmp->offset &&
482 new->ref_ctr_offset != tmp->ref_ctr_offset) {
483 pr_warn("Reference counter offset mismatch.");
484 return -EINVAL;
485 }
486 }
487 return 0;
488 }
489
490 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)491 static int register_trace_uprobe(struct trace_uprobe *tu)
492 {
493 struct trace_uprobe *old_tu;
494 int ret;
495
496 mutex_lock(&event_mutex);
497
498 ret = validate_ref_ctr_offset(tu);
499 if (ret)
500 goto end;
501
502 /* register as an event */
503 old_tu = find_probe_event(trace_probe_name(&tu->tp),
504 trace_probe_group_name(&tu->tp));
505 if (old_tu) {
506 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
507 trace_probe_log_set_index(0);
508 trace_probe_log_err(0, DIFF_PROBE_TYPE);
509 ret = -EEXIST;
510 } else {
511 ret = append_trace_uprobe(tu, old_tu);
512 }
513 goto end;
514 }
515
516 ret = register_uprobe_event(tu);
517 if (ret) {
518 if (ret == -EEXIST) {
519 trace_probe_log_set_index(0);
520 trace_probe_log_err(0, EVENT_EXIST);
521 } else
522 pr_warn("Failed to register probe event(%d)\n", ret);
523 goto end;
524 }
525
526 dyn_event_add(&tu->devent);
527
528 end:
529 mutex_unlock(&event_mutex);
530
531 return ret;
532 }
533
534 /*
535 * Argument syntax:
536 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
537 */
trace_uprobe_create(int argc,const char ** argv)538 static int trace_uprobe_create(int argc, const char **argv)
539 {
540 struct trace_uprobe *tu;
541 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
542 char *arg, *filename, *rctr, *rctr_end, *tmp;
543 char buf[MAX_EVENT_NAME_LEN];
544 struct path path;
545 unsigned long offset, ref_ctr_offset;
546 bool is_return = false;
547 int i, ret;
548
549 ret = 0;
550 ref_ctr_offset = 0;
551
552 switch (argv[0][0]) {
553 case 'r':
554 is_return = true;
555 break;
556 case 'p':
557 break;
558 default:
559 return -ECANCELED;
560 }
561
562 if (argc < 2)
563 return -ECANCELED;
564
565 if (argv[0][1] == ':')
566 event = &argv[0][2];
567
568 if (!strchr(argv[1], '/'))
569 return -ECANCELED;
570
571 filename = kstrdup(argv[1], GFP_KERNEL);
572 if (!filename)
573 return -ENOMEM;
574
575 /* Find the last occurrence, in case the path contains ':' too. */
576 arg = strrchr(filename, ':');
577 if (!arg || !isdigit(arg[1])) {
578 kfree(filename);
579 return -ECANCELED;
580 }
581
582 trace_probe_log_init("trace_uprobe", argc, argv);
583 trace_probe_log_set_index(1); /* filename is the 2nd argument */
584
585 *arg++ = '\0';
586 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
587 if (ret) {
588 trace_probe_log_err(0, FILE_NOT_FOUND);
589 kfree(filename);
590 trace_probe_log_clear();
591 return ret;
592 }
593 if (!d_is_reg(path.dentry)) {
594 trace_probe_log_err(0, NO_REGULAR_FILE);
595 ret = -EINVAL;
596 goto fail_address_parse;
597 }
598
599 /* Parse reference counter offset if specified. */
600 rctr = strchr(arg, '(');
601 if (rctr) {
602 rctr_end = strchr(rctr, ')');
603 if (!rctr_end) {
604 ret = -EINVAL;
605 rctr_end = rctr + strlen(rctr);
606 trace_probe_log_err(rctr_end - filename,
607 REFCNT_OPEN_BRACE);
608 goto fail_address_parse;
609 } else if (rctr_end[1] != '\0') {
610 ret = -EINVAL;
611 trace_probe_log_err(rctr_end + 1 - filename,
612 BAD_REFCNT_SUFFIX);
613 goto fail_address_parse;
614 }
615
616 *rctr++ = '\0';
617 *rctr_end = '\0';
618 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
619 if (ret) {
620 trace_probe_log_err(rctr - filename, BAD_REFCNT);
621 goto fail_address_parse;
622 }
623 }
624
625 /* Check if there is %return suffix */
626 tmp = strchr(arg, '%');
627 if (tmp) {
628 if (!strcmp(tmp, "%return")) {
629 *tmp = '\0';
630 is_return = true;
631 } else {
632 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
633 ret = -EINVAL;
634 goto fail_address_parse;
635 }
636 }
637
638 /* Parse uprobe offset. */
639 ret = kstrtoul(arg, 0, &offset);
640 if (ret) {
641 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
642 goto fail_address_parse;
643 }
644
645 /* setup a probe */
646 trace_probe_log_set_index(0);
647 if (event) {
648 ret = traceprobe_parse_event_name(&event, &group, buf,
649 event - argv[0]);
650 if (ret)
651 goto fail_address_parse;
652 } else {
653 char *tail;
654 char *ptr;
655
656 tail = kstrdup(kbasename(filename), GFP_KERNEL);
657 if (!tail) {
658 ret = -ENOMEM;
659 goto fail_address_parse;
660 }
661
662 ptr = strpbrk(tail, ".-_");
663 if (ptr)
664 *ptr = '\0';
665
666 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
667 event = buf;
668 kfree(tail);
669 }
670
671 argc -= 2;
672 argv += 2;
673
674 tu = alloc_trace_uprobe(group, event, argc, is_return);
675 if (IS_ERR(tu)) {
676 ret = PTR_ERR(tu);
677 /* This must return -ENOMEM otherwise there is a bug */
678 WARN_ON_ONCE(ret != -ENOMEM);
679 goto fail_address_parse;
680 }
681 tu->offset = offset;
682 tu->ref_ctr_offset = ref_ctr_offset;
683 tu->path = path;
684 tu->filename = filename;
685
686 /* parse arguments */
687 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
688 tmp = kstrdup(argv[i], GFP_KERNEL);
689 if (!tmp) {
690 ret = -ENOMEM;
691 goto error;
692 }
693
694 trace_probe_log_set_index(i + 2);
695 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
696 is_return ? TPARG_FL_RETURN : 0);
697 kfree(tmp);
698 if (ret)
699 goto error;
700 }
701
702 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
703 if (ret < 0)
704 goto error;
705
706 ret = register_trace_uprobe(tu);
707 if (!ret)
708 goto out;
709
710 error:
711 free_trace_uprobe(tu);
712 out:
713 trace_probe_log_clear();
714 return ret;
715
716 fail_address_parse:
717 trace_probe_log_clear();
718 path_put(&path);
719 kfree(filename);
720
721 return ret;
722 }
723
create_or_delete_trace_uprobe(int argc,char ** argv)724 static int create_or_delete_trace_uprobe(int argc, char **argv)
725 {
726 int ret;
727
728 if (argv[0][0] == '-')
729 return dyn_event_release(argc, argv, &trace_uprobe_ops);
730
731 ret = trace_uprobe_create(argc, (const char **)argv);
732 return ret == -ECANCELED ? -EINVAL : ret;
733 }
734
trace_uprobe_release(struct dyn_event * ev)735 static int trace_uprobe_release(struct dyn_event *ev)
736 {
737 struct trace_uprobe *tu = to_trace_uprobe(ev);
738
739 return unregister_trace_uprobe(tu);
740 }
741
742 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)743 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
744 {
745 struct trace_uprobe *tu = to_trace_uprobe(ev);
746 char c = is_ret_probe(tu) ? 'r' : 'p';
747 int i;
748
749 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
750 trace_probe_name(&tu->tp), tu->filename,
751 (int)(sizeof(void *) * 2), tu->offset);
752
753 if (tu->ref_ctr_offset)
754 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
755
756 for (i = 0; i < tu->tp.nr_args; i++)
757 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
758
759 seq_putc(m, '\n');
760 return 0;
761 }
762
probes_seq_show(struct seq_file * m,void * v)763 static int probes_seq_show(struct seq_file *m, void *v)
764 {
765 struct dyn_event *ev = v;
766
767 if (!is_trace_uprobe(ev))
768 return 0;
769
770 return trace_uprobe_show(m, ev);
771 }
772
773 static const struct seq_operations probes_seq_op = {
774 .start = dyn_event_seq_start,
775 .next = dyn_event_seq_next,
776 .stop = dyn_event_seq_stop,
777 .show = probes_seq_show
778 };
779
probes_open(struct inode * inode,struct file * file)780 static int probes_open(struct inode *inode, struct file *file)
781 {
782 int ret;
783
784 ret = security_locked_down(LOCKDOWN_TRACEFS);
785 if (ret)
786 return ret;
787
788 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
789 ret = dyn_events_release_all(&trace_uprobe_ops);
790 if (ret)
791 return ret;
792 }
793
794 return seq_open(file, &probes_seq_op);
795 }
796
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)797 static ssize_t probes_write(struct file *file, const char __user *buffer,
798 size_t count, loff_t *ppos)
799 {
800 return trace_parse_run_command(file, buffer, count, ppos,
801 create_or_delete_trace_uprobe);
802 }
803
804 static const struct file_operations uprobe_events_ops = {
805 .owner = THIS_MODULE,
806 .open = probes_open,
807 .read = seq_read,
808 .llseek = seq_lseek,
809 .release = seq_release,
810 .write = probes_write,
811 };
812
813 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)814 static int probes_profile_seq_show(struct seq_file *m, void *v)
815 {
816 struct dyn_event *ev = v;
817 struct trace_uprobe *tu;
818
819 if (!is_trace_uprobe(ev))
820 return 0;
821
822 tu = to_trace_uprobe(ev);
823 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
824 trace_probe_name(&tu->tp), tu->nhit);
825 return 0;
826 }
827
828 static const struct seq_operations profile_seq_op = {
829 .start = dyn_event_seq_start,
830 .next = dyn_event_seq_next,
831 .stop = dyn_event_seq_stop,
832 .show = probes_profile_seq_show
833 };
834
profile_open(struct inode * inode,struct file * file)835 static int profile_open(struct inode *inode, struct file *file)
836 {
837 int ret;
838
839 ret = security_locked_down(LOCKDOWN_TRACEFS);
840 if (ret)
841 return ret;
842
843 return seq_open(file, &profile_seq_op);
844 }
845
846 static const struct file_operations uprobe_profile_ops = {
847 .owner = THIS_MODULE,
848 .open = profile_open,
849 .read = seq_read,
850 .llseek = seq_lseek,
851 .release = seq_release,
852 };
853
854 struct uprobe_cpu_buffer {
855 struct mutex mutex;
856 void *buf;
857 };
858 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
859 static int uprobe_buffer_refcnt;
860
uprobe_buffer_init(void)861 static int uprobe_buffer_init(void)
862 {
863 int cpu, err_cpu;
864
865 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
866 if (uprobe_cpu_buffer == NULL)
867 return -ENOMEM;
868
869 for_each_possible_cpu(cpu) {
870 struct page *p = alloc_pages_node(cpu_to_node(cpu),
871 GFP_KERNEL, 0);
872 if (p == NULL) {
873 err_cpu = cpu;
874 goto err;
875 }
876 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
877 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
878 }
879
880 return 0;
881
882 err:
883 for_each_possible_cpu(cpu) {
884 if (cpu == err_cpu)
885 break;
886 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
887 }
888
889 free_percpu(uprobe_cpu_buffer);
890 return -ENOMEM;
891 }
892
uprobe_buffer_enable(void)893 static int uprobe_buffer_enable(void)
894 {
895 int ret = 0;
896
897 BUG_ON(!mutex_is_locked(&event_mutex));
898
899 if (uprobe_buffer_refcnt++ == 0) {
900 ret = uprobe_buffer_init();
901 if (ret < 0)
902 uprobe_buffer_refcnt--;
903 }
904
905 return ret;
906 }
907
uprobe_buffer_disable(void)908 static void uprobe_buffer_disable(void)
909 {
910 int cpu;
911
912 BUG_ON(!mutex_is_locked(&event_mutex));
913
914 if (--uprobe_buffer_refcnt == 0) {
915 for_each_possible_cpu(cpu)
916 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
917 cpu)->buf);
918
919 free_percpu(uprobe_cpu_buffer);
920 uprobe_cpu_buffer = NULL;
921 }
922 }
923
uprobe_buffer_get(void)924 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
925 {
926 struct uprobe_cpu_buffer *ucb;
927 int cpu;
928
929 cpu = raw_smp_processor_id();
930 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
931
932 /*
933 * Use per-cpu buffers for fastest access, but we might migrate
934 * so the mutex makes sure we have sole access to it.
935 */
936 mutex_lock(&ucb->mutex);
937
938 return ucb;
939 }
940
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)941 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
942 {
943 mutex_unlock(&ucb->mutex);
944 }
945
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize,struct trace_event_file * trace_file)946 static void __uprobe_trace_func(struct trace_uprobe *tu,
947 unsigned long func, struct pt_regs *regs,
948 struct uprobe_cpu_buffer *ucb, int dsize,
949 struct trace_event_file *trace_file)
950 {
951 struct uprobe_trace_entry_head *entry;
952 struct trace_buffer *buffer;
953 struct ring_buffer_event *event;
954 void *data;
955 int size, esize;
956 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
957
958 WARN_ON(call != trace_file->event_call);
959
960 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
961 return;
962
963 if (trace_trigger_soft_disabled(trace_file))
964 return;
965
966 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
967 size = esize + tu->tp.size + dsize;
968 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
969 call->event.type, size, 0, 0);
970 if (!event)
971 return;
972
973 entry = ring_buffer_event_data(event);
974 if (is_ret_probe(tu)) {
975 entry->vaddr[0] = func;
976 entry->vaddr[1] = instruction_pointer(regs);
977 data = DATAOF_TRACE_ENTRY(entry, true);
978 } else {
979 entry->vaddr[0] = instruction_pointer(regs);
980 data = DATAOF_TRACE_ENTRY(entry, false);
981 }
982
983 memcpy(data, ucb->buf, tu->tp.size + dsize);
984
985 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
986 }
987
988 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)989 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
990 struct uprobe_cpu_buffer *ucb, int dsize)
991 {
992 struct event_file_link *link;
993
994 if (is_ret_probe(tu))
995 return 0;
996
997 rcu_read_lock();
998 trace_probe_for_each_link_rcu(link, &tu->tp)
999 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
1000 rcu_read_unlock();
1001
1002 return 0;
1003 }
1004
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1005 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1006 struct pt_regs *regs,
1007 struct uprobe_cpu_buffer *ucb, int dsize)
1008 {
1009 struct event_file_link *link;
1010
1011 rcu_read_lock();
1012 trace_probe_for_each_link_rcu(link, &tu->tp)
1013 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1014 rcu_read_unlock();
1015 }
1016
1017 /* Event entry printers */
1018 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1019 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1020 {
1021 struct uprobe_trace_entry_head *entry;
1022 struct trace_seq *s = &iter->seq;
1023 struct trace_uprobe *tu;
1024 u8 *data;
1025
1026 entry = (struct uprobe_trace_entry_head *)iter->ent;
1027 tu = trace_uprobe_primary_from_call(
1028 container_of(event, struct trace_event_call, event));
1029 if (unlikely(!tu))
1030 goto out;
1031
1032 if (is_ret_probe(tu)) {
1033 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1034 trace_probe_name(&tu->tp),
1035 entry->vaddr[1], entry->vaddr[0]);
1036 data = DATAOF_TRACE_ENTRY(entry, true);
1037 } else {
1038 trace_seq_printf(s, "%s: (0x%lx)",
1039 trace_probe_name(&tu->tp),
1040 entry->vaddr[0]);
1041 data = DATAOF_TRACE_ENTRY(entry, false);
1042 }
1043
1044 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1045 goto out;
1046
1047 trace_seq_putc(s, '\n');
1048
1049 out:
1050 return trace_handle_return(s);
1051 }
1052
1053 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1054 enum uprobe_filter_ctx ctx,
1055 struct mm_struct *mm);
1056
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1057 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1058 {
1059 int ret;
1060
1061 tu->consumer.filter = filter;
1062 tu->inode = d_real_inode(tu->path.dentry);
1063
1064 if (tu->ref_ctr_offset)
1065 ret = uprobe_register_refctr(tu->inode, tu->offset,
1066 tu->ref_ctr_offset, &tu->consumer);
1067 else
1068 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1069
1070 if (ret)
1071 tu->inode = NULL;
1072
1073 return ret;
1074 }
1075
__probe_event_disable(struct trace_probe * tp)1076 static void __probe_event_disable(struct trace_probe *tp)
1077 {
1078 struct trace_probe *pos;
1079 struct trace_uprobe *tu;
1080
1081 tu = container_of(tp, struct trace_uprobe, tp);
1082 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1083
1084 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1085 tu = container_of(pos, struct trace_uprobe, tp);
1086 if (!tu->inode)
1087 continue;
1088
1089 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1090 tu->inode = NULL;
1091 }
1092 }
1093
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1094 static int probe_event_enable(struct trace_event_call *call,
1095 struct trace_event_file *file, filter_func_t filter)
1096 {
1097 struct trace_probe *pos, *tp;
1098 struct trace_uprobe *tu;
1099 bool enabled;
1100 int ret;
1101
1102 tp = trace_probe_primary_from_call(call);
1103 if (WARN_ON_ONCE(!tp))
1104 return -ENODEV;
1105 enabled = trace_probe_is_enabled(tp);
1106
1107 /* This may also change "enabled" state */
1108 if (file) {
1109 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1110 return -EINTR;
1111
1112 ret = trace_probe_add_file(tp, file);
1113 if (ret < 0)
1114 return ret;
1115 } else {
1116 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1117 return -EINTR;
1118
1119 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1120 }
1121
1122 tu = container_of(tp, struct trace_uprobe, tp);
1123 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1124
1125 if (enabled)
1126 return 0;
1127
1128 ret = uprobe_buffer_enable();
1129 if (ret)
1130 goto err_flags;
1131
1132 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1133 tu = container_of(pos, struct trace_uprobe, tp);
1134 ret = trace_uprobe_enable(tu, filter);
1135 if (ret) {
1136 __probe_event_disable(tp);
1137 goto err_buffer;
1138 }
1139 }
1140
1141 return 0;
1142
1143 err_buffer:
1144 uprobe_buffer_disable();
1145
1146 err_flags:
1147 if (file)
1148 trace_probe_remove_file(tp, file);
1149 else
1150 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1151
1152 return ret;
1153 }
1154
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1155 static void probe_event_disable(struct trace_event_call *call,
1156 struct trace_event_file *file)
1157 {
1158 struct trace_probe *tp;
1159
1160 tp = trace_probe_primary_from_call(call);
1161 if (WARN_ON_ONCE(!tp))
1162 return;
1163
1164 if (!trace_probe_is_enabled(tp))
1165 return;
1166
1167 if (file) {
1168 if (trace_probe_remove_file(tp, file) < 0)
1169 return;
1170
1171 if (trace_probe_is_enabled(tp))
1172 return;
1173 } else
1174 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1175
1176 __probe_event_disable(tp);
1177 uprobe_buffer_disable();
1178 }
1179
uprobe_event_define_fields(struct trace_event_call * event_call)1180 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1181 {
1182 int ret, size;
1183 struct uprobe_trace_entry_head field;
1184 struct trace_uprobe *tu;
1185
1186 tu = trace_uprobe_primary_from_call(event_call);
1187 if (unlikely(!tu))
1188 return -ENODEV;
1189
1190 if (is_ret_probe(tu)) {
1191 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1192 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1193 size = SIZEOF_TRACE_ENTRY(true);
1194 } else {
1195 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1196 size = SIZEOF_TRACE_ENTRY(false);
1197 }
1198
1199 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1200 }
1201
1202 #ifdef CONFIG_PERF_EVENTS
1203 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1204 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1205 {
1206 struct perf_event *event;
1207
1208 if (filter->nr_systemwide)
1209 return true;
1210
1211 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1212 if (event->hw.target->mm == mm)
1213 return true;
1214 }
1215
1216 return false;
1217 }
1218
1219 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1220 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1221 struct perf_event *event)
1222 {
1223 return __uprobe_perf_filter(filter, event->hw.target->mm);
1224 }
1225
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1226 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1227 struct perf_event *event)
1228 {
1229 bool done;
1230
1231 write_lock(&filter->rwlock);
1232 if (event->hw.target) {
1233 list_del(&event->hw.tp_list);
1234 done = filter->nr_systemwide ||
1235 (event->hw.target->flags & PF_EXITING) ||
1236 trace_uprobe_filter_event(filter, event);
1237 } else {
1238 filter->nr_systemwide--;
1239 done = filter->nr_systemwide;
1240 }
1241 write_unlock(&filter->rwlock);
1242
1243 return done;
1244 }
1245
1246 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1247 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1248 struct perf_event *event)
1249 {
1250 bool done;
1251
1252 write_lock(&filter->rwlock);
1253 if (event->hw.target) {
1254 /*
1255 * event->parent != NULL means copy_process(), we can avoid
1256 * uprobe_apply(). current->mm must be probed and we can rely
1257 * on dup_mmap() which preserves the already installed bp's.
1258 *
1259 * attr.enable_on_exec means that exec/mmap will install the
1260 * breakpoints we need.
1261 */
1262 done = filter->nr_systemwide ||
1263 event->parent || event->attr.enable_on_exec ||
1264 trace_uprobe_filter_event(filter, event);
1265 list_add(&event->hw.tp_list, &filter->perf_events);
1266 } else {
1267 done = filter->nr_systemwide;
1268 filter->nr_systemwide++;
1269 }
1270 write_unlock(&filter->rwlock);
1271
1272 return done;
1273 }
1274
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1275 static int uprobe_perf_close(struct trace_event_call *call,
1276 struct perf_event *event)
1277 {
1278 struct trace_probe *pos, *tp;
1279 struct trace_uprobe *tu;
1280 int ret = 0;
1281
1282 tp = trace_probe_primary_from_call(call);
1283 if (WARN_ON_ONCE(!tp))
1284 return -ENODEV;
1285
1286 tu = container_of(tp, struct trace_uprobe, tp);
1287 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1288 return 0;
1289
1290 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1291 tu = container_of(pos, struct trace_uprobe, tp);
1292 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1293 if (ret)
1294 break;
1295 }
1296
1297 return ret;
1298 }
1299
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1300 static int uprobe_perf_open(struct trace_event_call *call,
1301 struct perf_event *event)
1302 {
1303 struct trace_probe *pos, *tp;
1304 struct trace_uprobe *tu;
1305 int err = 0;
1306
1307 tp = trace_probe_primary_from_call(call);
1308 if (WARN_ON_ONCE(!tp))
1309 return -ENODEV;
1310
1311 tu = container_of(tp, struct trace_uprobe, tp);
1312 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1313 return 0;
1314
1315 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1316 tu = container_of(pos, struct trace_uprobe, tp);
1317 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1318 if (err) {
1319 uprobe_perf_close(call, event);
1320 break;
1321 }
1322 }
1323
1324 return err;
1325 }
1326
uprobe_perf_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)1327 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1328 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1329 {
1330 struct trace_uprobe_filter *filter;
1331 struct trace_uprobe *tu;
1332 int ret;
1333
1334 tu = container_of(uc, struct trace_uprobe, consumer);
1335 filter = tu->tp.event->filter;
1336
1337 read_lock(&filter->rwlock);
1338 ret = __uprobe_perf_filter(filter, mm);
1339 read_unlock(&filter->rwlock);
1340
1341 return ret;
1342 }
1343
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1344 static void __uprobe_perf_func(struct trace_uprobe *tu,
1345 unsigned long func, struct pt_regs *regs,
1346 struct uprobe_cpu_buffer *ucb, int dsize)
1347 {
1348 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1349 struct uprobe_trace_entry_head *entry;
1350 struct hlist_head *head;
1351 void *data;
1352 int size, esize;
1353 int rctx;
1354
1355 if (bpf_prog_array_valid(call)) {
1356 u32 ret;
1357
1358 preempt_disable();
1359 ret = trace_call_bpf(call, regs);
1360 preempt_enable();
1361 if (!ret)
1362 return;
1363 }
1364
1365 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1366
1367 size = esize + tu->tp.size + dsize;
1368 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1369 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1370 return;
1371
1372 preempt_disable();
1373 head = this_cpu_ptr(call->perf_events);
1374 if (hlist_empty(head))
1375 goto out;
1376
1377 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1378 if (!entry)
1379 goto out;
1380
1381 if (is_ret_probe(tu)) {
1382 entry->vaddr[0] = func;
1383 entry->vaddr[1] = instruction_pointer(regs);
1384 data = DATAOF_TRACE_ENTRY(entry, true);
1385 } else {
1386 entry->vaddr[0] = instruction_pointer(regs);
1387 data = DATAOF_TRACE_ENTRY(entry, false);
1388 }
1389
1390 memcpy(data, ucb->buf, tu->tp.size + dsize);
1391
1392 if (size - esize > tu->tp.size + dsize) {
1393 int len = tu->tp.size + dsize;
1394
1395 memset(data + len, 0, size - esize - len);
1396 }
1397
1398 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1399 head, NULL);
1400 out:
1401 preempt_enable();
1402 }
1403
1404 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1405 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1406 struct uprobe_cpu_buffer *ucb, int dsize)
1407 {
1408 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1409 return UPROBE_HANDLER_REMOVE;
1410
1411 if (!is_ret_probe(tu))
1412 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1413 return 0;
1414 }
1415
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1416 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1417 struct pt_regs *regs,
1418 struct uprobe_cpu_buffer *ucb, int dsize)
1419 {
1420 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1421 }
1422
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,u64 * probe_addr,bool perf_type_tracepoint)1423 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1424 const char **filename, u64 *probe_offset,
1425 u64 *probe_addr, bool perf_type_tracepoint)
1426 {
1427 const char *pevent = trace_event_name(event->tp_event);
1428 const char *group = event->tp_event->class->system;
1429 struct trace_uprobe *tu;
1430
1431 if (perf_type_tracepoint)
1432 tu = find_probe_event(pevent, group);
1433 else
1434 tu = trace_uprobe_primary_from_call(event->tp_event);
1435 if (!tu)
1436 return -EINVAL;
1437
1438 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1439 : BPF_FD_TYPE_UPROBE;
1440 *filename = tu->filename;
1441 *probe_offset = tu->offset;
1442 *probe_addr = 0;
1443 return 0;
1444 }
1445 #endif /* CONFIG_PERF_EVENTS */
1446
1447 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1448 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1449 void *data)
1450 {
1451 struct trace_event_file *file = data;
1452
1453 switch (type) {
1454 case TRACE_REG_REGISTER:
1455 return probe_event_enable(event, file, NULL);
1456
1457 case TRACE_REG_UNREGISTER:
1458 probe_event_disable(event, file);
1459 return 0;
1460
1461 #ifdef CONFIG_PERF_EVENTS
1462 case TRACE_REG_PERF_REGISTER:
1463 return probe_event_enable(event, NULL, uprobe_perf_filter);
1464
1465 case TRACE_REG_PERF_UNREGISTER:
1466 probe_event_disable(event, NULL);
1467 return 0;
1468
1469 case TRACE_REG_PERF_OPEN:
1470 return uprobe_perf_open(event, data);
1471
1472 case TRACE_REG_PERF_CLOSE:
1473 return uprobe_perf_close(event, data);
1474
1475 #endif
1476 default:
1477 return 0;
1478 }
1479 }
1480
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs)1481 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1482 {
1483 struct trace_uprobe *tu;
1484 struct uprobe_dispatch_data udd;
1485 struct uprobe_cpu_buffer *ucb;
1486 int dsize, esize;
1487 int ret = 0;
1488
1489
1490 tu = container_of(con, struct trace_uprobe, consumer);
1491 tu->nhit++;
1492
1493 udd.tu = tu;
1494 udd.bp_addr = instruction_pointer(regs);
1495
1496 current->utask->vaddr = (unsigned long) &udd;
1497
1498 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1499 return 0;
1500
1501 dsize = __get_data_size(&tu->tp, regs);
1502 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1503
1504 ucb = uprobe_buffer_get();
1505 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1506
1507 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1508 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1509
1510 #ifdef CONFIG_PERF_EVENTS
1511 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1512 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1513 #endif
1514 uprobe_buffer_put(ucb);
1515 return ret;
1516 }
1517
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)1518 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1519 unsigned long func, struct pt_regs *regs)
1520 {
1521 struct trace_uprobe *tu;
1522 struct uprobe_dispatch_data udd;
1523 struct uprobe_cpu_buffer *ucb;
1524 int dsize, esize;
1525
1526 tu = container_of(con, struct trace_uprobe, consumer);
1527
1528 udd.tu = tu;
1529 udd.bp_addr = func;
1530
1531 current->utask->vaddr = (unsigned long) &udd;
1532
1533 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1534 return 0;
1535
1536 dsize = __get_data_size(&tu->tp, regs);
1537 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1538
1539 ucb = uprobe_buffer_get();
1540 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1541
1542 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1543 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1544
1545 #ifdef CONFIG_PERF_EVENTS
1546 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1547 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1548 #endif
1549 uprobe_buffer_put(ucb);
1550 return 0;
1551 }
1552
1553 static struct trace_event_functions uprobe_funcs = {
1554 .trace = print_uprobe_event
1555 };
1556
1557 static struct trace_event_fields uprobe_fields_array[] = {
1558 { .type = TRACE_FUNCTION_TYPE,
1559 .define_fields = uprobe_event_define_fields },
1560 {}
1561 };
1562
init_trace_event_call(struct trace_uprobe * tu)1563 static inline void init_trace_event_call(struct trace_uprobe *tu)
1564 {
1565 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1566 call->event.funcs = &uprobe_funcs;
1567 call->class->fields_array = uprobe_fields_array;
1568
1569 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1570 call->class->reg = trace_uprobe_register;
1571 }
1572
register_uprobe_event(struct trace_uprobe * tu)1573 static int register_uprobe_event(struct trace_uprobe *tu)
1574 {
1575 init_trace_event_call(tu);
1576
1577 return trace_probe_register_event_call(&tu->tp);
1578 }
1579
unregister_uprobe_event(struct trace_uprobe * tu)1580 static int unregister_uprobe_event(struct trace_uprobe *tu)
1581 {
1582 return trace_probe_unregister_event_call(&tu->tp);
1583 }
1584
1585 #ifdef CONFIG_PERF_EVENTS
1586 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1587 create_local_trace_uprobe(char *name, unsigned long offs,
1588 unsigned long ref_ctr_offset, bool is_return)
1589 {
1590 struct trace_uprobe *tu;
1591 struct path path;
1592 int ret;
1593
1594 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1595 if (ret)
1596 return ERR_PTR(ret);
1597
1598 if (!d_is_reg(path.dentry)) {
1599 path_put(&path);
1600 return ERR_PTR(-EINVAL);
1601 }
1602
1603 /*
1604 * local trace_kprobes are not added to dyn_event, so they are never
1605 * searched in find_trace_kprobe(). Therefore, there is no concern of
1606 * duplicated name "DUMMY_EVENT" here.
1607 */
1608 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1609 is_return);
1610
1611 if (IS_ERR(tu)) {
1612 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1613 (int)PTR_ERR(tu));
1614 path_put(&path);
1615 return ERR_CAST(tu);
1616 }
1617
1618 tu->offset = offs;
1619 tu->path = path;
1620 tu->ref_ctr_offset = ref_ctr_offset;
1621 tu->filename = kstrdup(name, GFP_KERNEL);
1622 init_trace_event_call(tu);
1623
1624 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1625 ret = -ENOMEM;
1626 goto error;
1627 }
1628
1629 return trace_probe_event_call(&tu->tp);
1630 error:
1631 free_trace_uprobe(tu);
1632 return ERR_PTR(ret);
1633 }
1634
destroy_local_trace_uprobe(struct trace_event_call * event_call)1635 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1636 {
1637 struct trace_uprobe *tu;
1638
1639 tu = trace_uprobe_primary_from_call(event_call);
1640
1641 free_trace_uprobe(tu);
1642 }
1643 #endif /* CONFIG_PERF_EVENTS */
1644
1645 /* Make a trace interface for controling probe points */
init_uprobe_trace(void)1646 static __init int init_uprobe_trace(void)
1647 {
1648 int ret;
1649
1650 ret = dyn_event_register(&trace_uprobe_ops);
1651 if (ret)
1652 return ret;
1653
1654 ret = tracing_init_dentry();
1655 if (ret)
1656 return 0;
1657
1658 trace_create_file("uprobe_events", 0644, NULL,
1659 NULL, &uprobe_events_ops);
1660 /* Profile interface */
1661 trace_create_file("uprobe_profile", 0444, NULL,
1662 NULL, &uprobe_profile_ops);
1663 return 0;
1664 }
1665
1666 fs_initcall(init_uprobe_trace);
1667