1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
9
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/uprobes.h>
16 #include <linux/namei.h>
17 #include <linux/string.h>
18 #include <linux/rculist.h>
19 #include <linux/filter.h>
20
21 #include "trace_dynevent.h"
22 #include "trace_probe.h"
23 #include "trace_probe_tmpl.h"
24
25 #define UPROBE_EVENT_SYSTEM "uprobes"
26
27 struct uprobe_trace_entry_head {
28 struct trace_entry ent;
29 unsigned long vaddr[];
30 };
31
32 #define SIZEOF_TRACE_ENTRY(is_return) \
33 (sizeof(struct uprobe_trace_entry_head) + \
34 sizeof(unsigned long) * (is_return ? 2 : 1))
35
36 #define DATAOF_TRACE_ENTRY(entry, is_return) \
37 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
38
39 static int trace_uprobe_create(const char *raw_command);
40 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
41 static int trace_uprobe_release(struct dyn_event *ev);
42 static bool trace_uprobe_is_busy(struct dyn_event *ev);
43 static bool trace_uprobe_match(const char *system, const char *event,
44 int argc, const char **argv, struct dyn_event *ev);
45
46 static struct dyn_event_operations trace_uprobe_ops = {
47 .create = trace_uprobe_create,
48 .show = trace_uprobe_show,
49 .is_busy = trace_uprobe_is_busy,
50 .free = trace_uprobe_release,
51 .match = trace_uprobe_match,
52 };
53
54 /*
55 * uprobe event core functions
56 */
57 struct trace_uprobe {
58 struct dyn_event devent;
59 struct uprobe_consumer consumer;
60 struct path path;
61 struct inode *inode;
62 char *filename;
63 unsigned long offset;
64 unsigned long ref_ctr_offset;
65 unsigned long nhit;
66 struct trace_probe tp;
67 };
68
is_trace_uprobe(struct dyn_event * ev)69 static bool is_trace_uprobe(struct dyn_event *ev)
70 {
71 return ev->ops == &trace_uprobe_ops;
72 }
73
to_trace_uprobe(struct dyn_event * ev)74 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
75 {
76 return container_of(ev, struct trace_uprobe, devent);
77 }
78
79 /**
80 * for_each_trace_uprobe - iterate over the trace_uprobe list
81 * @pos: the struct trace_uprobe * for each entry
82 * @dpos: the struct dyn_event * to use as a loop cursor
83 */
84 #define for_each_trace_uprobe(pos, dpos) \
85 for_each_dyn_event(dpos) \
86 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
87
88 static int register_uprobe_event(struct trace_uprobe *tu);
89 static int unregister_uprobe_event(struct trace_uprobe *tu);
90
91 struct uprobe_dispatch_data {
92 struct trace_uprobe *tu;
93 unsigned long bp_addr;
94 };
95
96 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
97 static int uretprobe_dispatcher(struct uprobe_consumer *con,
98 unsigned long func, struct pt_regs *regs);
99
100 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)101 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
102 {
103 return addr - (n * sizeof(long));
104 }
105 #else
adjust_stack_addr(unsigned long addr,unsigned int n)106 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
107 {
108 return addr + (n * sizeof(long));
109 }
110 #endif
111
get_user_stack_nth(struct pt_regs * regs,unsigned int n)112 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
113 {
114 unsigned long ret;
115 unsigned long addr = user_stack_pointer(regs);
116
117 addr = adjust_stack_addr(addr, n);
118
119 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
120 return 0;
121
122 return ret;
123 }
124
125 /*
126 * Uprobes-specific fetch functions
127 */
128 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)129 probe_mem_read(void *dest, void *src, size_t size)
130 {
131 void __user *vaddr = (void __force __user *)src;
132
133 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
134 }
135
136 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)137 probe_mem_read_user(void *dest, void *src, size_t size)
138 {
139 return probe_mem_read(dest, src, size);
140 }
141
142 /*
143 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
144 * length and relative data location.
145 */
146 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)147 fetch_store_string(unsigned long addr, void *dest, void *base)
148 {
149 long ret;
150 u32 loc = *(u32 *)dest;
151 int maxlen = get_loc_len(loc);
152 u8 *dst = get_loc_data(dest, base);
153 void __user *src = (void __force __user *) addr;
154
155 if (unlikely(!maxlen))
156 return -ENOMEM;
157
158 if (addr == FETCH_TOKEN_COMM)
159 ret = strlcpy(dst, current->comm, maxlen);
160 else
161 ret = strncpy_from_user(dst, src, maxlen);
162 if (ret >= 0) {
163 if (ret == maxlen)
164 dst[ret - 1] = '\0';
165 else
166 /*
167 * Include the terminating null byte. In this case it
168 * was copied by strncpy_from_user but not accounted
169 * for in ret.
170 */
171 ret++;
172 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
173 } else
174 *(u32 *)dest = make_data_loc(0, (void *)dst - base);
175
176 return ret;
177 }
178
179 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)180 fetch_store_string_user(unsigned long addr, void *dest, void *base)
181 {
182 return fetch_store_string(addr, dest, base);
183 }
184
185 /* Return the length of string -- including null terminal byte */
186 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)187 fetch_store_strlen(unsigned long addr)
188 {
189 int len;
190 void __user *vaddr = (void __force __user *) addr;
191
192 if (addr == FETCH_TOKEN_COMM)
193 len = strlen(current->comm) + 1;
194 else
195 len = strnlen_user(vaddr, MAX_STRING_SIZE);
196
197 return (len > MAX_STRING_SIZE) ? 0 : len;
198 }
199
200 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)201 fetch_store_strlen_user(unsigned long addr)
202 {
203 return fetch_store_strlen(addr);
204 }
205
translate_user_vaddr(unsigned long file_offset)206 static unsigned long translate_user_vaddr(unsigned long file_offset)
207 {
208 unsigned long base_addr;
209 struct uprobe_dispatch_data *udd;
210
211 udd = (void *) current->utask->vaddr;
212
213 base_addr = udd->bp_addr - udd->tu->offset;
214 return base_addr + file_offset;
215 }
216
217 /* Note that we don't verify it, since the code does not come from user space */
218 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * dest,void * base)219 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
220 void *base)
221 {
222 struct pt_regs *regs = rec;
223 unsigned long val;
224
225 /* 1st stage: get value from context */
226 switch (code->op) {
227 case FETCH_OP_REG:
228 val = regs_get_register(regs, code->param);
229 break;
230 case FETCH_OP_STACK:
231 val = get_user_stack_nth(regs, code->param);
232 break;
233 case FETCH_OP_STACKP:
234 val = user_stack_pointer(regs);
235 break;
236 case FETCH_OP_RETVAL:
237 val = regs_return_value(regs);
238 break;
239 case FETCH_OP_IMM:
240 val = code->immediate;
241 break;
242 case FETCH_OP_COMM:
243 val = FETCH_TOKEN_COMM;
244 break;
245 case FETCH_OP_DATA:
246 val = (unsigned long)code->data;
247 break;
248 case FETCH_OP_FOFFS:
249 val = translate_user_vaddr(code->immediate);
250 break;
251 default:
252 return -EILSEQ;
253 }
254 code++;
255
256 return process_fetch_insn_bottom(code, val, dest, base);
257 }
NOKPROBE_SYMBOL(process_fetch_insn)258 NOKPROBE_SYMBOL(process_fetch_insn)
259
260 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
261 {
262 rwlock_init(&filter->rwlock);
263 filter->nr_systemwide = 0;
264 INIT_LIST_HEAD(&filter->perf_events);
265 }
266
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)267 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
268 {
269 return !filter->nr_systemwide && list_empty(&filter->perf_events);
270 }
271
is_ret_probe(struct trace_uprobe * tu)272 static inline bool is_ret_probe(struct trace_uprobe *tu)
273 {
274 return tu->consumer.ret_handler != NULL;
275 }
276
trace_uprobe_is_busy(struct dyn_event * ev)277 static bool trace_uprobe_is_busy(struct dyn_event *ev)
278 {
279 struct trace_uprobe *tu = to_trace_uprobe(ev);
280
281 return trace_probe_is_enabled(&tu->tp);
282 }
283
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)284 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
285 int argc, const char **argv)
286 {
287 char buf[MAX_ARGSTR_LEN + 1];
288 int len;
289
290 if (!argc)
291 return true;
292
293 len = strlen(tu->filename);
294 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295 return false;
296
297 if (tu->ref_ctr_offset == 0)
298 snprintf(buf, sizeof(buf), "0x%0*lx",
299 (int)(sizeof(void *) * 2), tu->offset);
300 else
301 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
302 (int)(sizeof(void *) * 2), tu->offset,
303 tu->ref_ctr_offset);
304 if (strcmp(buf, &argv[0][len + 1]))
305 return false;
306
307 argc--; argv++;
308
309 return trace_probe_match_command_args(&tu->tp, argc, argv);
310 }
311
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)312 static bool trace_uprobe_match(const char *system, const char *event,
313 int argc, const char **argv, struct dyn_event *ev)
314 {
315 struct trace_uprobe *tu = to_trace_uprobe(ev);
316
317 return (event[0] == '\0' ||
318 strcmp(trace_probe_name(&tu->tp), event) == 0) &&
319 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
320 trace_uprobe_match_command_head(tu, argc, argv);
321 }
322
323 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)324 trace_uprobe_primary_from_call(struct trace_event_call *call)
325 {
326 struct trace_probe *tp;
327
328 tp = trace_probe_primary_from_call(call);
329 if (WARN_ON_ONCE(!tp))
330 return NULL;
331
332 return container_of(tp, struct trace_uprobe, tp);
333 }
334
335 /*
336 * Allocate new trace_uprobe and initialize it (including uprobes).
337 */
338 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)339 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
340 {
341 struct trace_uprobe *tu;
342 int ret;
343
344 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
345 if (!tu)
346 return ERR_PTR(-ENOMEM);
347
348 ret = trace_probe_init(&tu->tp, event, group, true);
349 if (ret < 0)
350 goto error;
351
352 dyn_event_init(&tu->devent, &trace_uprobe_ops);
353 tu->consumer.handler = uprobe_dispatcher;
354 if (is_ret)
355 tu->consumer.ret_handler = uretprobe_dispatcher;
356 init_trace_uprobe_filter(tu->tp.event->filter);
357 return tu;
358
359 error:
360 kfree(tu);
361
362 return ERR_PTR(ret);
363 }
364
free_trace_uprobe(struct trace_uprobe * tu)365 static void free_trace_uprobe(struct trace_uprobe *tu)
366 {
367 if (!tu)
368 return;
369
370 path_put(&tu->path);
371 trace_probe_cleanup(&tu->tp);
372 kfree(tu->filename);
373 kfree(tu);
374 }
375
find_probe_event(const char * event,const char * group)376 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
377 {
378 struct dyn_event *pos;
379 struct trace_uprobe *tu;
380
381 for_each_trace_uprobe(tu, pos)
382 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
383 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
384 return tu;
385
386 return NULL;
387 }
388
389 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)390 static int unregister_trace_uprobe(struct trace_uprobe *tu)
391 {
392 int ret;
393
394 if (trace_probe_has_sibling(&tu->tp))
395 goto unreg;
396
397 /* If there's a reference to the dynamic event */
398 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
399 return -EBUSY;
400
401 ret = unregister_uprobe_event(tu);
402 if (ret)
403 return ret;
404
405 unreg:
406 dyn_event_remove(&tu->devent);
407 trace_probe_unlink(&tu->tp);
408 free_trace_uprobe(tu);
409 return 0;
410 }
411
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)412 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
413 struct trace_uprobe *comp)
414 {
415 struct trace_probe_event *tpe = orig->tp.event;
416 struct inode *comp_inode = d_real_inode(comp->path.dentry);
417 int i;
418
419 list_for_each_entry(orig, &tpe->probes, tp.list) {
420 if (comp_inode != d_real_inode(orig->path.dentry) ||
421 comp->offset != orig->offset)
422 continue;
423
424 /*
425 * trace_probe_compare_arg_type() ensured that nr_args and
426 * each argument name and type are same. Let's compare comm.
427 */
428 for (i = 0; i < orig->tp.nr_args; i++) {
429 if (strcmp(orig->tp.args[i].comm,
430 comp->tp.args[i].comm))
431 break;
432 }
433
434 if (i == orig->tp.nr_args)
435 return true;
436 }
437
438 return false;
439 }
440
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)441 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
442 {
443 int ret;
444
445 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
446 if (ret) {
447 /* Note that argument starts index = 2 */
448 trace_probe_log_set_index(ret + 1);
449 trace_probe_log_err(0, DIFF_ARG_TYPE);
450 return -EEXIST;
451 }
452 if (trace_uprobe_has_same_uprobe(to, tu)) {
453 trace_probe_log_set_index(0);
454 trace_probe_log_err(0, SAME_PROBE);
455 return -EEXIST;
456 }
457
458 /* Append to existing event */
459 ret = trace_probe_append(&tu->tp, &to->tp);
460 if (!ret)
461 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
462
463 return ret;
464 }
465
466 /*
467 * Uprobe with multiple reference counter is not allowed. i.e.
468 * If inode and offset matches, reference counter offset *must*
469 * match as well. Though, there is one exception: If user is
470 * replacing old trace_uprobe with new one(same group/event),
471 * then we allow same uprobe with new reference counter as far
472 * as the new one does not conflict with any other existing
473 * ones.
474 */
validate_ref_ctr_offset(struct trace_uprobe * new)475 static int validate_ref_ctr_offset(struct trace_uprobe *new)
476 {
477 struct dyn_event *pos;
478 struct trace_uprobe *tmp;
479 struct inode *new_inode = d_real_inode(new->path.dentry);
480
481 for_each_trace_uprobe(tmp, pos) {
482 if (new_inode == d_real_inode(tmp->path.dentry) &&
483 new->offset == tmp->offset &&
484 new->ref_ctr_offset != tmp->ref_ctr_offset) {
485 pr_warn("Reference counter offset mismatch.");
486 return -EINVAL;
487 }
488 }
489 return 0;
490 }
491
492 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)493 static int register_trace_uprobe(struct trace_uprobe *tu)
494 {
495 struct trace_uprobe *old_tu;
496 int ret;
497
498 mutex_lock(&event_mutex);
499
500 ret = validate_ref_ctr_offset(tu);
501 if (ret)
502 goto end;
503
504 /* register as an event */
505 old_tu = find_probe_event(trace_probe_name(&tu->tp),
506 trace_probe_group_name(&tu->tp));
507 if (old_tu) {
508 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
509 trace_probe_log_set_index(0);
510 trace_probe_log_err(0, DIFF_PROBE_TYPE);
511 ret = -EEXIST;
512 } else {
513 ret = append_trace_uprobe(tu, old_tu);
514 }
515 goto end;
516 }
517
518 ret = register_uprobe_event(tu);
519 if (ret) {
520 if (ret == -EEXIST) {
521 trace_probe_log_set_index(0);
522 trace_probe_log_err(0, EVENT_EXIST);
523 } else
524 pr_warn("Failed to register probe event(%d)\n", ret);
525 goto end;
526 }
527
528 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
529
530 end:
531 mutex_unlock(&event_mutex);
532
533 return ret;
534 }
535
536 /*
537 * Argument syntax:
538 * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
539 */
__trace_uprobe_create(int argc,const char ** argv)540 static int __trace_uprobe_create(int argc, const char **argv)
541 {
542 struct trace_uprobe *tu;
543 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
544 char *arg, *filename, *rctr, *rctr_end, *tmp;
545 char buf[MAX_EVENT_NAME_LEN];
546 char gbuf[MAX_EVENT_NAME_LEN];
547 enum probe_print_type ptype;
548 struct path path;
549 unsigned long offset, ref_ctr_offset;
550 bool is_return = false;
551 int i, ret;
552
553 ref_ctr_offset = 0;
554
555 switch (argv[0][0]) {
556 case 'r':
557 is_return = true;
558 break;
559 case 'p':
560 break;
561 default:
562 return -ECANCELED;
563 }
564
565 if (argc < 2)
566 return -ECANCELED;
567
568 if (argv[0][1] == ':')
569 event = &argv[0][2];
570
571 if (!strchr(argv[1], '/'))
572 return -ECANCELED;
573
574 filename = kstrdup(argv[1], GFP_KERNEL);
575 if (!filename)
576 return -ENOMEM;
577
578 /* Find the last occurrence, in case the path contains ':' too. */
579 arg = strrchr(filename, ':');
580 if (!arg || !isdigit(arg[1])) {
581 kfree(filename);
582 return -ECANCELED;
583 }
584
585 trace_probe_log_init("trace_uprobe", argc, argv);
586 trace_probe_log_set_index(1); /* filename is the 2nd argument */
587
588 *arg++ = '\0';
589 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
590 if (ret) {
591 trace_probe_log_err(0, FILE_NOT_FOUND);
592 kfree(filename);
593 trace_probe_log_clear();
594 return ret;
595 }
596 if (!d_is_reg(path.dentry)) {
597 trace_probe_log_err(0, NO_REGULAR_FILE);
598 ret = -EINVAL;
599 goto fail_address_parse;
600 }
601
602 /* Parse reference counter offset if specified. */
603 rctr = strchr(arg, '(');
604 if (rctr) {
605 rctr_end = strchr(rctr, ')');
606 if (!rctr_end) {
607 ret = -EINVAL;
608 rctr_end = rctr + strlen(rctr);
609 trace_probe_log_err(rctr_end - filename,
610 REFCNT_OPEN_BRACE);
611 goto fail_address_parse;
612 } else if (rctr_end[1] != '\0') {
613 ret = -EINVAL;
614 trace_probe_log_err(rctr_end + 1 - filename,
615 BAD_REFCNT_SUFFIX);
616 goto fail_address_parse;
617 }
618
619 *rctr++ = '\0';
620 *rctr_end = '\0';
621 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
622 if (ret) {
623 trace_probe_log_err(rctr - filename, BAD_REFCNT);
624 goto fail_address_parse;
625 }
626 }
627
628 /* Check if there is %return suffix */
629 tmp = strchr(arg, '%');
630 if (tmp) {
631 if (!strcmp(tmp, "%return")) {
632 *tmp = '\0';
633 is_return = true;
634 } else {
635 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
636 ret = -EINVAL;
637 goto fail_address_parse;
638 }
639 }
640
641 /* Parse uprobe offset. */
642 ret = kstrtoul(arg, 0, &offset);
643 if (ret) {
644 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
645 goto fail_address_parse;
646 }
647
648 /* setup a probe */
649 trace_probe_log_set_index(0);
650 if (event) {
651 ret = traceprobe_parse_event_name(&event, &group, gbuf,
652 event - argv[0]);
653 if (ret)
654 goto fail_address_parse;
655 }
656
657 if (!event) {
658 char *tail;
659 char *ptr;
660
661 tail = kstrdup(kbasename(filename), GFP_KERNEL);
662 if (!tail) {
663 ret = -ENOMEM;
664 goto fail_address_parse;
665 }
666
667 ptr = strpbrk(tail, ".-_");
668 if (ptr)
669 *ptr = '\0';
670
671 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
672 event = buf;
673 kfree(tail);
674 }
675
676 argc -= 2;
677 argv += 2;
678
679 tu = alloc_trace_uprobe(group, event, argc, is_return);
680 if (IS_ERR(tu)) {
681 ret = PTR_ERR(tu);
682 /* This must return -ENOMEM otherwise there is a bug */
683 WARN_ON_ONCE(ret != -ENOMEM);
684 goto fail_address_parse;
685 }
686 tu->offset = offset;
687 tu->ref_ctr_offset = ref_ctr_offset;
688 tu->path = path;
689 tu->filename = filename;
690
691 /* parse arguments */
692 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
693 trace_probe_log_set_index(i + 2);
694 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i],
695 is_return ? TPARG_FL_RETURN : 0);
696 if (ret)
697 goto error;
698 }
699
700 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
701 ret = traceprobe_set_print_fmt(&tu->tp, ptype);
702 if (ret < 0)
703 goto error;
704
705 ret = register_trace_uprobe(tu);
706 if (!ret)
707 goto out;
708
709 error:
710 free_trace_uprobe(tu);
711 out:
712 trace_probe_log_clear();
713 return ret;
714
715 fail_address_parse:
716 trace_probe_log_clear();
717 path_put(&path);
718 kfree(filename);
719
720 return ret;
721 }
722
trace_uprobe_create(const char * raw_command)723 int trace_uprobe_create(const char *raw_command)
724 {
725 return trace_probe_create(raw_command, __trace_uprobe_create);
726 }
727
create_or_delete_trace_uprobe(const char * raw_command)728 static int create_or_delete_trace_uprobe(const char *raw_command)
729 {
730 int ret;
731
732 if (raw_command[0] == '-')
733 return dyn_event_release(raw_command, &trace_uprobe_ops);
734
735 ret = trace_uprobe_create(raw_command);
736 return ret == -ECANCELED ? -EINVAL : ret;
737 }
738
trace_uprobe_release(struct dyn_event * ev)739 static int trace_uprobe_release(struct dyn_event *ev)
740 {
741 struct trace_uprobe *tu = to_trace_uprobe(ev);
742
743 return unregister_trace_uprobe(tu);
744 }
745
746 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)747 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
748 {
749 struct trace_uprobe *tu = to_trace_uprobe(ev);
750 char c = is_ret_probe(tu) ? 'r' : 'p';
751 int i;
752
753 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
754 trace_probe_name(&tu->tp), tu->filename,
755 (int)(sizeof(void *) * 2), tu->offset);
756
757 if (tu->ref_ctr_offset)
758 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
759
760 for (i = 0; i < tu->tp.nr_args; i++)
761 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
762
763 seq_putc(m, '\n');
764 return 0;
765 }
766
probes_seq_show(struct seq_file * m,void * v)767 static int probes_seq_show(struct seq_file *m, void *v)
768 {
769 struct dyn_event *ev = v;
770
771 if (!is_trace_uprobe(ev))
772 return 0;
773
774 return trace_uprobe_show(m, ev);
775 }
776
777 static const struct seq_operations probes_seq_op = {
778 .start = dyn_event_seq_start,
779 .next = dyn_event_seq_next,
780 .stop = dyn_event_seq_stop,
781 .show = probes_seq_show
782 };
783
probes_open(struct inode * inode,struct file * file)784 static int probes_open(struct inode *inode, struct file *file)
785 {
786 int ret;
787
788 ret = security_locked_down(LOCKDOWN_TRACEFS);
789 if (ret)
790 return ret;
791
792 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
793 ret = dyn_events_release_all(&trace_uprobe_ops);
794 if (ret)
795 return ret;
796 }
797
798 return seq_open(file, &probes_seq_op);
799 }
800
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)801 static ssize_t probes_write(struct file *file, const char __user *buffer,
802 size_t count, loff_t *ppos)
803 {
804 return trace_parse_run_command(file, buffer, count, ppos,
805 create_or_delete_trace_uprobe);
806 }
807
808 static const struct file_operations uprobe_events_ops = {
809 .owner = THIS_MODULE,
810 .open = probes_open,
811 .read = seq_read,
812 .llseek = seq_lseek,
813 .release = seq_release,
814 .write = probes_write,
815 };
816
817 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)818 static int probes_profile_seq_show(struct seq_file *m, void *v)
819 {
820 struct dyn_event *ev = v;
821 struct trace_uprobe *tu;
822
823 if (!is_trace_uprobe(ev))
824 return 0;
825
826 tu = to_trace_uprobe(ev);
827 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
828 trace_probe_name(&tu->tp), tu->nhit);
829 return 0;
830 }
831
832 static const struct seq_operations profile_seq_op = {
833 .start = dyn_event_seq_start,
834 .next = dyn_event_seq_next,
835 .stop = dyn_event_seq_stop,
836 .show = probes_profile_seq_show
837 };
838
profile_open(struct inode * inode,struct file * file)839 static int profile_open(struct inode *inode, struct file *file)
840 {
841 int ret;
842
843 ret = security_locked_down(LOCKDOWN_TRACEFS);
844 if (ret)
845 return ret;
846
847 return seq_open(file, &profile_seq_op);
848 }
849
850 static const struct file_operations uprobe_profile_ops = {
851 .owner = THIS_MODULE,
852 .open = profile_open,
853 .read = seq_read,
854 .llseek = seq_lseek,
855 .release = seq_release,
856 };
857
858 struct uprobe_cpu_buffer {
859 struct mutex mutex;
860 void *buf;
861 };
862 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
863 static int uprobe_buffer_refcnt;
864
uprobe_buffer_init(void)865 static int uprobe_buffer_init(void)
866 {
867 int cpu, err_cpu;
868
869 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
870 if (uprobe_cpu_buffer == NULL)
871 return -ENOMEM;
872
873 for_each_possible_cpu(cpu) {
874 struct page *p = alloc_pages_node(cpu_to_node(cpu),
875 GFP_KERNEL, 0);
876 if (p == NULL) {
877 err_cpu = cpu;
878 goto err;
879 }
880 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
881 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
882 }
883
884 return 0;
885
886 err:
887 for_each_possible_cpu(cpu) {
888 if (cpu == err_cpu)
889 break;
890 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
891 }
892
893 free_percpu(uprobe_cpu_buffer);
894 return -ENOMEM;
895 }
896
uprobe_buffer_enable(void)897 static int uprobe_buffer_enable(void)
898 {
899 int ret = 0;
900
901 BUG_ON(!mutex_is_locked(&event_mutex));
902
903 if (uprobe_buffer_refcnt++ == 0) {
904 ret = uprobe_buffer_init();
905 if (ret < 0)
906 uprobe_buffer_refcnt--;
907 }
908
909 return ret;
910 }
911
uprobe_buffer_disable(void)912 static void uprobe_buffer_disable(void)
913 {
914 int cpu;
915
916 BUG_ON(!mutex_is_locked(&event_mutex));
917
918 if (--uprobe_buffer_refcnt == 0) {
919 for_each_possible_cpu(cpu)
920 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
921 cpu)->buf);
922
923 free_percpu(uprobe_cpu_buffer);
924 uprobe_cpu_buffer = NULL;
925 }
926 }
927
uprobe_buffer_get(void)928 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
929 {
930 struct uprobe_cpu_buffer *ucb;
931 int cpu;
932
933 cpu = raw_smp_processor_id();
934 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
935
936 /*
937 * Use per-cpu buffers for fastest access, but we might migrate
938 * so the mutex makes sure we have sole access to it.
939 */
940 mutex_lock(&ucb->mutex);
941
942 return ucb;
943 }
944
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)945 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
946 {
947 mutex_unlock(&ucb->mutex);
948 }
949
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize,struct trace_event_file * trace_file)950 static void __uprobe_trace_func(struct trace_uprobe *tu,
951 unsigned long func, struct pt_regs *regs,
952 struct uprobe_cpu_buffer *ucb, int dsize,
953 struct trace_event_file *trace_file)
954 {
955 struct uprobe_trace_entry_head *entry;
956 struct trace_event_buffer fbuffer;
957 void *data;
958 int size, esize;
959 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
960
961 WARN_ON(call != trace_file->event_call);
962
963 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
964 return;
965
966 if (trace_trigger_soft_disabled(trace_file))
967 return;
968
969 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
970 size = esize + tu->tp.size + dsize;
971 entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
972 if (!entry)
973 return;
974
975 if (is_ret_probe(tu)) {
976 entry->vaddr[0] = func;
977 entry->vaddr[1] = instruction_pointer(regs);
978 data = DATAOF_TRACE_ENTRY(entry, true);
979 } else {
980 entry->vaddr[0] = instruction_pointer(regs);
981 data = DATAOF_TRACE_ENTRY(entry, false);
982 }
983
984 memcpy(data, ucb->buf, tu->tp.size + dsize);
985
986 trace_event_buffer_commit(&fbuffer);
987 }
988
989 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)990 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
991 struct uprobe_cpu_buffer *ucb, int dsize)
992 {
993 struct event_file_link *link;
994
995 if (is_ret_probe(tu))
996 return 0;
997
998 rcu_read_lock();
999 trace_probe_for_each_link_rcu(link, &tu->tp)
1000 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
1001 rcu_read_unlock();
1002
1003 return 0;
1004 }
1005
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1006 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1007 struct pt_regs *regs,
1008 struct uprobe_cpu_buffer *ucb, int dsize)
1009 {
1010 struct event_file_link *link;
1011
1012 rcu_read_lock();
1013 trace_probe_for_each_link_rcu(link, &tu->tp)
1014 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1015 rcu_read_unlock();
1016 }
1017
1018 /* Event entry printers */
1019 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1020 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1021 {
1022 struct uprobe_trace_entry_head *entry;
1023 struct trace_seq *s = &iter->seq;
1024 struct trace_uprobe *tu;
1025 u8 *data;
1026
1027 entry = (struct uprobe_trace_entry_head *)iter->ent;
1028 tu = trace_uprobe_primary_from_call(
1029 container_of(event, struct trace_event_call, event));
1030 if (unlikely(!tu))
1031 goto out;
1032
1033 if (is_ret_probe(tu)) {
1034 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1035 trace_probe_name(&tu->tp),
1036 entry->vaddr[1], entry->vaddr[0]);
1037 data = DATAOF_TRACE_ENTRY(entry, true);
1038 } else {
1039 trace_seq_printf(s, "%s: (0x%lx)",
1040 trace_probe_name(&tu->tp),
1041 entry->vaddr[0]);
1042 data = DATAOF_TRACE_ENTRY(entry, false);
1043 }
1044
1045 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1046 goto out;
1047
1048 trace_seq_putc(s, '\n');
1049
1050 out:
1051 return trace_handle_return(s);
1052 }
1053
1054 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1055 enum uprobe_filter_ctx ctx,
1056 struct mm_struct *mm);
1057
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1058 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1059 {
1060 int ret;
1061
1062 tu->consumer.filter = filter;
1063 tu->inode = d_real_inode(tu->path.dentry);
1064
1065 if (tu->ref_ctr_offset)
1066 ret = uprobe_register_refctr(tu->inode, tu->offset,
1067 tu->ref_ctr_offset, &tu->consumer);
1068 else
1069 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1070
1071 if (ret)
1072 tu->inode = NULL;
1073
1074 return ret;
1075 }
1076
__probe_event_disable(struct trace_probe * tp)1077 static void __probe_event_disable(struct trace_probe *tp)
1078 {
1079 struct trace_uprobe *tu;
1080
1081 tu = container_of(tp, struct trace_uprobe, tp);
1082 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1083
1084 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1085 if (!tu->inode)
1086 continue;
1087
1088 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1089 tu->inode = NULL;
1090 }
1091 }
1092
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1093 static int probe_event_enable(struct trace_event_call *call,
1094 struct trace_event_file *file, filter_func_t filter)
1095 {
1096 struct trace_probe *tp;
1097 struct trace_uprobe *tu;
1098 bool enabled;
1099 int ret;
1100
1101 tp = trace_probe_primary_from_call(call);
1102 if (WARN_ON_ONCE(!tp))
1103 return -ENODEV;
1104 enabled = trace_probe_is_enabled(tp);
1105
1106 /* This may also change "enabled" state */
1107 if (file) {
1108 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1109 return -EINTR;
1110
1111 ret = trace_probe_add_file(tp, file);
1112 if (ret < 0)
1113 return ret;
1114 } else {
1115 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1116 return -EINTR;
1117
1118 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1119 }
1120
1121 tu = container_of(tp, struct trace_uprobe, tp);
1122 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1123
1124 if (enabled)
1125 return 0;
1126
1127 ret = uprobe_buffer_enable();
1128 if (ret)
1129 goto err_flags;
1130
1131 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1132 ret = trace_uprobe_enable(tu, filter);
1133 if (ret) {
1134 __probe_event_disable(tp);
1135 goto err_buffer;
1136 }
1137 }
1138
1139 return 0;
1140
1141 err_buffer:
1142 uprobe_buffer_disable();
1143
1144 err_flags:
1145 if (file)
1146 trace_probe_remove_file(tp, file);
1147 else
1148 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1149
1150 return ret;
1151 }
1152
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1153 static void probe_event_disable(struct trace_event_call *call,
1154 struct trace_event_file *file)
1155 {
1156 struct trace_probe *tp;
1157
1158 tp = trace_probe_primary_from_call(call);
1159 if (WARN_ON_ONCE(!tp))
1160 return;
1161
1162 if (!trace_probe_is_enabled(tp))
1163 return;
1164
1165 if (file) {
1166 if (trace_probe_remove_file(tp, file) < 0)
1167 return;
1168
1169 if (trace_probe_is_enabled(tp))
1170 return;
1171 } else
1172 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1173
1174 __probe_event_disable(tp);
1175 uprobe_buffer_disable();
1176 }
1177
uprobe_event_define_fields(struct trace_event_call * event_call)1178 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1179 {
1180 int ret, size;
1181 struct uprobe_trace_entry_head field;
1182 struct trace_uprobe *tu;
1183
1184 tu = trace_uprobe_primary_from_call(event_call);
1185 if (unlikely(!tu))
1186 return -ENODEV;
1187
1188 if (is_ret_probe(tu)) {
1189 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1190 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1191 size = SIZEOF_TRACE_ENTRY(true);
1192 } else {
1193 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1194 size = SIZEOF_TRACE_ENTRY(false);
1195 }
1196
1197 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1198 }
1199
1200 #ifdef CONFIG_PERF_EVENTS
1201 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1202 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1203 {
1204 struct perf_event *event;
1205
1206 if (filter->nr_systemwide)
1207 return true;
1208
1209 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1210 if (event->hw.target->mm == mm)
1211 return true;
1212 }
1213
1214 return false;
1215 }
1216
1217 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1218 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1219 struct perf_event *event)
1220 {
1221 return __uprobe_perf_filter(filter, event->hw.target->mm);
1222 }
1223
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1224 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1225 struct perf_event *event)
1226 {
1227 bool done;
1228
1229 write_lock(&filter->rwlock);
1230 if (event->hw.target) {
1231 list_del(&event->hw.tp_list);
1232 done = filter->nr_systemwide ||
1233 (event->hw.target->flags & PF_EXITING) ||
1234 trace_uprobe_filter_event(filter, event);
1235 } else {
1236 filter->nr_systemwide--;
1237 done = filter->nr_systemwide;
1238 }
1239 write_unlock(&filter->rwlock);
1240
1241 return done;
1242 }
1243
1244 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1245 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1246 struct perf_event *event)
1247 {
1248 bool done;
1249
1250 write_lock(&filter->rwlock);
1251 if (event->hw.target) {
1252 /*
1253 * event->parent != NULL means copy_process(), we can avoid
1254 * uprobe_apply(). current->mm must be probed and we can rely
1255 * on dup_mmap() which preserves the already installed bp's.
1256 *
1257 * attr.enable_on_exec means that exec/mmap will install the
1258 * breakpoints we need.
1259 */
1260 done = filter->nr_systemwide ||
1261 event->parent || event->attr.enable_on_exec ||
1262 trace_uprobe_filter_event(filter, event);
1263 list_add(&event->hw.tp_list, &filter->perf_events);
1264 } else {
1265 done = filter->nr_systemwide;
1266 filter->nr_systemwide++;
1267 }
1268 write_unlock(&filter->rwlock);
1269
1270 return done;
1271 }
1272
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1273 static int uprobe_perf_close(struct trace_event_call *call,
1274 struct perf_event *event)
1275 {
1276 struct trace_probe *tp;
1277 struct trace_uprobe *tu;
1278 int ret = 0;
1279
1280 tp = trace_probe_primary_from_call(call);
1281 if (WARN_ON_ONCE(!tp))
1282 return -ENODEV;
1283
1284 tu = container_of(tp, struct trace_uprobe, tp);
1285 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1286 return 0;
1287
1288 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1289 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1290 if (ret)
1291 break;
1292 }
1293
1294 return ret;
1295 }
1296
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1297 static int uprobe_perf_open(struct trace_event_call *call,
1298 struct perf_event *event)
1299 {
1300 struct trace_probe *tp;
1301 struct trace_uprobe *tu;
1302 int err = 0;
1303
1304 tp = trace_probe_primary_from_call(call);
1305 if (WARN_ON_ONCE(!tp))
1306 return -ENODEV;
1307
1308 tu = container_of(tp, struct trace_uprobe, tp);
1309 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1310 return 0;
1311
1312 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1313 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1314 if (err) {
1315 uprobe_perf_close(call, event);
1316 break;
1317 }
1318 }
1319
1320 return err;
1321 }
1322
uprobe_perf_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)1323 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1324 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1325 {
1326 struct trace_uprobe_filter *filter;
1327 struct trace_uprobe *tu;
1328 int ret;
1329
1330 tu = container_of(uc, struct trace_uprobe, consumer);
1331 filter = tu->tp.event->filter;
1332
1333 read_lock(&filter->rwlock);
1334 ret = __uprobe_perf_filter(filter, mm);
1335 read_unlock(&filter->rwlock);
1336
1337 return ret;
1338 }
1339
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1340 static void __uprobe_perf_func(struct trace_uprobe *tu,
1341 unsigned long func, struct pt_regs *regs,
1342 struct uprobe_cpu_buffer *ucb, int dsize)
1343 {
1344 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1345 struct uprobe_trace_entry_head *entry;
1346 struct hlist_head *head;
1347 void *data;
1348 int size, esize;
1349 int rctx;
1350
1351 #ifdef CONFIG_BPF_EVENTS
1352 if (bpf_prog_array_valid(call)) {
1353 u32 ret;
1354
1355 ret = bpf_prog_run_array_sleepable(call->prog_array, regs, bpf_prog_run);
1356 if (!ret)
1357 return;
1358 }
1359 #endif /* CONFIG_BPF_EVENTS */
1360
1361 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1362
1363 size = esize + tu->tp.size + dsize;
1364 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1365 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1366 return;
1367
1368 preempt_disable();
1369 head = this_cpu_ptr(call->perf_events);
1370 if (hlist_empty(head))
1371 goto out;
1372
1373 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1374 if (!entry)
1375 goto out;
1376
1377 if (is_ret_probe(tu)) {
1378 entry->vaddr[0] = func;
1379 entry->vaddr[1] = instruction_pointer(regs);
1380 data = DATAOF_TRACE_ENTRY(entry, true);
1381 } else {
1382 entry->vaddr[0] = instruction_pointer(regs);
1383 data = DATAOF_TRACE_ENTRY(entry, false);
1384 }
1385
1386 memcpy(data, ucb->buf, tu->tp.size + dsize);
1387
1388 if (size - esize > tu->tp.size + dsize) {
1389 int len = tu->tp.size + dsize;
1390
1391 memset(data + len, 0, size - esize - len);
1392 }
1393
1394 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1395 head, NULL);
1396 out:
1397 preempt_enable();
1398 }
1399
1400 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1401 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1402 struct uprobe_cpu_buffer *ucb, int dsize)
1403 {
1404 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1405 return UPROBE_HANDLER_REMOVE;
1406
1407 if (!is_ret_probe(tu))
1408 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1409 return 0;
1410 }
1411
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1412 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1413 struct pt_regs *regs,
1414 struct uprobe_cpu_buffer *ucb, int dsize)
1415 {
1416 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1417 }
1418
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,u64 * probe_addr,bool perf_type_tracepoint)1419 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1420 const char **filename, u64 *probe_offset,
1421 u64 *probe_addr, bool perf_type_tracepoint)
1422 {
1423 const char *pevent = trace_event_name(event->tp_event);
1424 const char *group = event->tp_event->class->system;
1425 struct trace_uprobe *tu;
1426
1427 if (perf_type_tracepoint)
1428 tu = find_probe_event(pevent, group);
1429 else
1430 tu = trace_uprobe_primary_from_call(event->tp_event);
1431 if (!tu)
1432 return -EINVAL;
1433
1434 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1435 : BPF_FD_TYPE_UPROBE;
1436 *filename = tu->filename;
1437 *probe_offset = tu->offset;
1438 *probe_addr = 0;
1439 return 0;
1440 }
1441 #endif /* CONFIG_PERF_EVENTS */
1442
1443 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1444 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1445 void *data)
1446 {
1447 struct trace_event_file *file = data;
1448
1449 switch (type) {
1450 case TRACE_REG_REGISTER:
1451 return probe_event_enable(event, file, NULL);
1452
1453 case TRACE_REG_UNREGISTER:
1454 probe_event_disable(event, file);
1455 return 0;
1456
1457 #ifdef CONFIG_PERF_EVENTS
1458 case TRACE_REG_PERF_REGISTER:
1459 return probe_event_enable(event, NULL, uprobe_perf_filter);
1460
1461 case TRACE_REG_PERF_UNREGISTER:
1462 probe_event_disable(event, NULL);
1463 return 0;
1464
1465 case TRACE_REG_PERF_OPEN:
1466 return uprobe_perf_open(event, data);
1467
1468 case TRACE_REG_PERF_CLOSE:
1469 return uprobe_perf_close(event, data);
1470
1471 #endif
1472 default:
1473 return 0;
1474 }
1475 }
1476
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs)1477 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1478 {
1479 struct trace_uprobe *tu;
1480 struct uprobe_dispatch_data udd;
1481 struct uprobe_cpu_buffer *ucb;
1482 int dsize, esize;
1483 int ret = 0;
1484
1485
1486 tu = container_of(con, struct trace_uprobe, consumer);
1487 tu->nhit++;
1488
1489 udd.tu = tu;
1490 udd.bp_addr = instruction_pointer(regs);
1491
1492 current->utask->vaddr = (unsigned long) &udd;
1493
1494 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1495 return 0;
1496
1497 dsize = __get_data_size(&tu->tp, regs);
1498 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1499
1500 ucb = uprobe_buffer_get();
1501 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1502
1503 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1504 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1505
1506 #ifdef CONFIG_PERF_EVENTS
1507 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1508 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1509 #endif
1510 uprobe_buffer_put(ucb);
1511 return ret;
1512 }
1513
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)1514 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1515 unsigned long func, struct pt_regs *regs)
1516 {
1517 struct trace_uprobe *tu;
1518 struct uprobe_dispatch_data udd;
1519 struct uprobe_cpu_buffer *ucb;
1520 int dsize, esize;
1521
1522 tu = container_of(con, struct trace_uprobe, consumer);
1523
1524 udd.tu = tu;
1525 udd.bp_addr = func;
1526
1527 current->utask->vaddr = (unsigned long) &udd;
1528
1529 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1530 return 0;
1531
1532 dsize = __get_data_size(&tu->tp, regs);
1533 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1534
1535 ucb = uprobe_buffer_get();
1536 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1537
1538 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1539 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1540
1541 #ifdef CONFIG_PERF_EVENTS
1542 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1543 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1544 #endif
1545 uprobe_buffer_put(ucb);
1546 return 0;
1547 }
1548
1549 static struct trace_event_functions uprobe_funcs = {
1550 .trace = print_uprobe_event
1551 };
1552
1553 static struct trace_event_fields uprobe_fields_array[] = {
1554 { .type = TRACE_FUNCTION_TYPE,
1555 .define_fields = uprobe_event_define_fields },
1556 {}
1557 };
1558
init_trace_event_call(struct trace_uprobe * tu)1559 static inline void init_trace_event_call(struct trace_uprobe *tu)
1560 {
1561 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1562 call->event.funcs = &uprobe_funcs;
1563 call->class->fields_array = uprobe_fields_array;
1564
1565 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1566 call->class->reg = trace_uprobe_register;
1567 }
1568
register_uprobe_event(struct trace_uprobe * tu)1569 static int register_uprobe_event(struct trace_uprobe *tu)
1570 {
1571 init_trace_event_call(tu);
1572
1573 return trace_probe_register_event_call(&tu->tp);
1574 }
1575
unregister_uprobe_event(struct trace_uprobe * tu)1576 static int unregister_uprobe_event(struct trace_uprobe *tu)
1577 {
1578 return trace_probe_unregister_event_call(&tu->tp);
1579 }
1580
1581 #ifdef CONFIG_PERF_EVENTS
1582 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1583 create_local_trace_uprobe(char *name, unsigned long offs,
1584 unsigned long ref_ctr_offset, bool is_return)
1585 {
1586 enum probe_print_type ptype;
1587 struct trace_uprobe *tu;
1588 struct path path;
1589 int ret;
1590
1591 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1592 if (ret)
1593 return ERR_PTR(ret);
1594
1595 if (!d_is_reg(path.dentry)) {
1596 path_put(&path);
1597 return ERR_PTR(-EINVAL);
1598 }
1599
1600 /*
1601 * local trace_kprobes are not added to dyn_event, so they are never
1602 * searched in find_trace_kprobe(). Therefore, there is no concern of
1603 * duplicated name "DUMMY_EVENT" here.
1604 */
1605 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1606 is_return);
1607
1608 if (IS_ERR(tu)) {
1609 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1610 (int)PTR_ERR(tu));
1611 path_put(&path);
1612 return ERR_CAST(tu);
1613 }
1614
1615 tu->offset = offs;
1616 tu->path = path;
1617 tu->ref_ctr_offset = ref_ctr_offset;
1618 tu->filename = kstrdup(name, GFP_KERNEL);
1619 if (!tu->filename) {
1620 ret = -ENOMEM;
1621 goto error;
1622 }
1623
1624 init_trace_event_call(tu);
1625
1626 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1627 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1628 ret = -ENOMEM;
1629 goto error;
1630 }
1631
1632 return trace_probe_event_call(&tu->tp);
1633 error:
1634 free_trace_uprobe(tu);
1635 return ERR_PTR(ret);
1636 }
1637
destroy_local_trace_uprobe(struct trace_event_call * event_call)1638 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1639 {
1640 struct trace_uprobe *tu;
1641
1642 tu = trace_uprobe_primary_from_call(event_call);
1643
1644 free_trace_uprobe(tu);
1645 }
1646 #endif /* CONFIG_PERF_EVENTS */
1647
1648 /* Make a trace interface for controlling probe points */
init_uprobe_trace(void)1649 static __init int init_uprobe_trace(void)
1650 {
1651 int ret;
1652
1653 ret = dyn_event_register(&trace_uprobe_ops);
1654 if (ret)
1655 return ret;
1656
1657 ret = tracing_init_dentry();
1658 if (ret)
1659 return 0;
1660
1661 trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1662 NULL, &uprobe_events_ops);
1663 /* Profile interface */
1664 trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1665 NULL, &uprobe_profile_ops);
1666 return 0;
1667 }
1668
1669 fs_initcall(init_uprobe_trace);
1670