1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, Microsoft Corporation.
4 *
5 * Authors:
6 * Beau Belgrave <beaub@linux.microsoft.com>
7 */
8
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
13 #include <linux/io.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 /* Reminder to move to uapi when everything works */
23 #ifdef CONFIG_COMPILE_TEST
24 #include <linux/user_events.h>
25 #else
26 #include <uapi/linux/user_events.h>
27 #endif
28 #include "trace.h"
29 #include "trace_dynevent.h"
30
31 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
32
33 #define FIELD_DEPTH_TYPE 0
34 #define FIELD_DEPTH_NAME 1
35 #define FIELD_DEPTH_SIZE 2
36
37 /*
38 * Limits how many trace_event calls user processes can create:
39 * Must be a power of two of PAGE_SIZE.
40 */
41 #define MAX_PAGE_ORDER 0
42 #define MAX_PAGES (1 << MAX_PAGE_ORDER)
43 #define MAX_BYTES (MAX_PAGES * PAGE_SIZE)
44 #define MAX_EVENTS (MAX_BYTES * 8)
45
46 /* Limit how long of an event name plus args within the subsystem. */
47 #define MAX_EVENT_DESC 512
48 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
49 #define MAX_FIELD_ARRAY_SIZE 1024
50
51 /*
52 * The MAP_STATUS_* macros are used for taking a index and determining the
53 * appropriate byte and the bit in the byte to set/reset for an event.
54 *
55 * The lower 3 bits of the index decide which bit to set.
56 * The remaining upper bits of the index decide which byte to use for the bit.
57 *
58 * This is used when an event has a probe attached/removed to reflect live
59 * status of the event wanting tracing or not to user-programs via shared
60 * memory maps.
61 */
62 #define MAP_STATUS_BYTE(index) ((index) >> 3)
63 #define MAP_STATUS_MASK(index) BIT((index) & 7)
64
65 /*
66 * Internal bits (kernel side only) to keep track of connected probes:
67 * These are used when status is requested in text form about an event. These
68 * bits are compared against an internal byte on the event to determine which
69 * probes to print out to the user.
70 *
71 * These do not reflect the mapped bytes between the user and kernel space.
72 */
73 #define EVENT_STATUS_FTRACE BIT(0)
74 #define EVENT_STATUS_PERF BIT(1)
75 #define EVENT_STATUS_OTHER BIT(7)
76
77 /*
78 * Stores the pages, tables, and locks for a group of events.
79 * Each logical grouping of events has its own group, with a
80 * matching page for status checks within user programs. This
81 * allows for isolation of events to user programs by various
82 * means.
83 */
84 struct user_event_group {
85 struct page *pages;
86 char *register_page_data;
87 char *system_name;
88 struct hlist_node node;
89 struct mutex reg_mutex;
90 DECLARE_HASHTABLE(register_table, 8);
91 DECLARE_BITMAP(page_bitmap, MAX_EVENTS);
92 };
93
94 /* Group for init_user_ns mapping, top-most group */
95 static struct user_event_group *init_group;
96
97 /*
98 * Stores per-event properties, as users register events
99 * within a file a user_event might be created if it does not
100 * already exist. These are globally used and their lifetime
101 * is tied to the refcnt member. These cannot go away until the
102 * refcnt reaches one.
103 */
104 struct user_event {
105 struct user_event_group *group;
106 struct tracepoint tracepoint;
107 struct trace_event_call call;
108 struct trace_event_class class;
109 struct dyn_event devent;
110 struct hlist_node node;
111 struct list_head fields;
112 struct list_head validators;
113 refcount_t refcnt;
114 int index;
115 int flags;
116 int min_size;
117 char status;
118 };
119
120 /*
121 * Stores per-file events references, as users register events
122 * within a file this structure is modified and freed via RCU.
123 * The lifetime of this struct is tied to the lifetime of the file.
124 * These are not shared and only accessible by the file that created it.
125 */
126 struct user_event_refs {
127 struct rcu_head rcu;
128 int count;
129 struct user_event *events[];
130 };
131
132 struct user_event_file_info {
133 struct user_event_group *group;
134 struct user_event_refs *refs;
135 };
136
137 #define VALIDATOR_ENSURE_NULL (1 << 0)
138 #define VALIDATOR_REL (1 << 1)
139
140 struct user_event_validator {
141 struct list_head link;
142 int offset;
143 int flags;
144 };
145
146 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
147 void *tpdata, bool *faulted);
148
149 static int user_event_parse(struct user_event_group *group, char *name,
150 char *args, char *flags,
151 struct user_event **newuser);
152
user_event_key(char * name)153 static u32 user_event_key(char *name)
154 {
155 return jhash(name, strlen(name), 0);
156 }
157
set_page_reservations(char * pages,bool set)158 static void set_page_reservations(char *pages, bool set)
159 {
160 int page;
161
162 for (page = 0; page < MAX_PAGES; ++page) {
163 void *addr = pages + (PAGE_SIZE * page);
164
165 if (set)
166 SetPageReserved(virt_to_page(addr));
167 else
168 ClearPageReserved(virt_to_page(addr));
169 }
170 }
171
user_event_group_destroy(struct user_event_group * group)172 static void user_event_group_destroy(struct user_event_group *group)
173 {
174 if (group->register_page_data)
175 set_page_reservations(group->register_page_data, false);
176
177 if (group->pages)
178 __free_pages(group->pages, MAX_PAGE_ORDER);
179
180 kfree(group->system_name);
181 kfree(group);
182 }
183
user_event_group_system_name(struct user_namespace * user_ns)184 static char *user_event_group_system_name(struct user_namespace *user_ns)
185 {
186 char *system_name;
187 int len = sizeof(USER_EVENTS_SYSTEM) + 1;
188
189 if (user_ns != &init_user_ns) {
190 /*
191 * Unexpected at this point:
192 * We only currently support init_user_ns.
193 * When we enable more, this will trigger a failure so log.
194 */
195 pr_warn("user_events: Namespace other than init_user_ns!\n");
196 return NULL;
197 }
198
199 system_name = kmalloc(len, GFP_KERNEL);
200
201 if (!system_name)
202 return NULL;
203
204 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
205
206 return system_name;
207 }
208
209 static inline struct user_event_group
user_event_group_from_user_ns(struct user_namespace * user_ns)210 *user_event_group_from_user_ns(struct user_namespace *user_ns)
211 {
212 if (user_ns == &init_user_ns)
213 return init_group;
214
215 return NULL;
216 }
217
current_user_event_group(void)218 static struct user_event_group *current_user_event_group(void)
219 {
220 struct user_namespace *user_ns = current_user_ns();
221 struct user_event_group *group = NULL;
222
223 while (user_ns) {
224 group = user_event_group_from_user_ns(user_ns);
225
226 if (group)
227 break;
228
229 user_ns = user_ns->parent;
230 }
231
232 return group;
233 }
234
235 static struct user_event_group
user_event_group_create(struct user_namespace * user_ns)236 *user_event_group_create(struct user_namespace *user_ns)
237 {
238 struct user_event_group *group;
239
240 group = kzalloc(sizeof(*group), GFP_KERNEL);
241
242 if (!group)
243 return NULL;
244
245 group->system_name = user_event_group_system_name(user_ns);
246
247 if (!group->system_name)
248 goto error;
249
250 group->pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, MAX_PAGE_ORDER);
251
252 if (!group->pages)
253 goto error;
254
255 group->register_page_data = page_address(group->pages);
256
257 set_page_reservations(group->register_page_data, true);
258
259 /* Zero all bits beside 0 (which is reserved for failures) */
260 bitmap_zero(group->page_bitmap, MAX_EVENTS);
261 set_bit(0, group->page_bitmap);
262
263 mutex_init(&group->reg_mutex);
264 hash_init(group->register_table);
265
266 return group;
267 error:
268 if (group)
269 user_event_group_destroy(group);
270
271 return NULL;
272 };
273
274 static __always_inline
user_event_register_set(struct user_event * user)275 void user_event_register_set(struct user_event *user)
276 {
277 int i = user->index;
278
279 user->group->register_page_data[MAP_STATUS_BYTE(i)] |= MAP_STATUS_MASK(i);
280 }
281
282 static __always_inline
user_event_register_clear(struct user_event * user)283 void user_event_register_clear(struct user_event *user)
284 {
285 int i = user->index;
286
287 user->group->register_page_data[MAP_STATUS_BYTE(i)] &= ~MAP_STATUS_MASK(i);
288 }
289
290 static __always_inline __must_check
user_event_last_ref(struct user_event * user)291 bool user_event_last_ref(struct user_event *user)
292 {
293 return refcount_read(&user->refcnt) == 1;
294 }
295
296 static __always_inline __must_check
copy_nofault(void * addr,size_t bytes,struct iov_iter * i)297 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
298 {
299 size_t ret;
300
301 pagefault_disable();
302
303 ret = copy_from_iter_nocache(addr, bytes, i);
304
305 pagefault_enable();
306
307 return ret;
308 }
309
user_event_get_fields(struct trace_event_call * call)310 static struct list_head *user_event_get_fields(struct trace_event_call *call)
311 {
312 struct user_event *user = (struct user_event *)call->data;
313
314 return &user->fields;
315 }
316
317 /*
318 * Parses a register command for user_events
319 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
320 *
321 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
322 * 'id' field after:
323 * test char[20] msg;unsigned int id
324 *
325 * NOTE: Offsets are from the user data perspective, they are not from the
326 * trace_entry/buffer perspective. We automatically add the common properties
327 * sizes to the offset for the user.
328 *
329 * Upon success user_event has its ref count increased by 1.
330 */
user_event_parse_cmd(struct user_event_group * group,char * raw_command,struct user_event ** newuser)331 static int user_event_parse_cmd(struct user_event_group *group,
332 char *raw_command, struct user_event **newuser)
333 {
334 char *name = raw_command;
335 char *args = strpbrk(name, " ");
336 char *flags;
337
338 if (args)
339 *args++ = '\0';
340
341 flags = strpbrk(name, ":");
342
343 if (flags)
344 *flags++ = '\0';
345
346 return user_event_parse(group, name, args, flags, newuser);
347 }
348
user_field_array_size(const char * type)349 static int user_field_array_size(const char *type)
350 {
351 const char *start = strchr(type, '[');
352 char val[8];
353 char *bracket;
354 int size = 0;
355
356 if (start == NULL)
357 return -EINVAL;
358
359 if (strscpy(val, start + 1, sizeof(val)) <= 0)
360 return -EINVAL;
361
362 bracket = strchr(val, ']');
363
364 if (!bracket)
365 return -EINVAL;
366
367 *bracket = '\0';
368
369 if (kstrtouint(val, 0, &size))
370 return -EINVAL;
371
372 if (size > MAX_FIELD_ARRAY_SIZE)
373 return -EINVAL;
374
375 return size;
376 }
377
user_field_size(const char * type)378 static int user_field_size(const char *type)
379 {
380 /* long is not allowed from a user, since it's ambigious in size */
381 if (strcmp(type, "s64") == 0)
382 return sizeof(s64);
383 if (strcmp(type, "u64") == 0)
384 return sizeof(u64);
385 if (strcmp(type, "s32") == 0)
386 return sizeof(s32);
387 if (strcmp(type, "u32") == 0)
388 return sizeof(u32);
389 if (strcmp(type, "int") == 0)
390 return sizeof(int);
391 if (strcmp(type, "unsigned int") == 0)
392 return sizeof(unsigned int);
393 if (strcmp(type, "s16") == 0)
394 return sizeof(s16);
395 if (strcmp(type, "u16") == 0)
396 return sizeof(u16);
397 if (strcmp(type, "short") == 0)
398 return sizeof(short);
399 if (strcmp(type, "unsigned short") == 0)
400 return sizeof(unsigned short);
401 if (strcmp(type, "s8") == 0)
402 return sizeof(s8);
403 if (strcmp(type, "u8") == 0)
404 return sizeof(u8);
405 if (strcmp(type, "char") == 0)
406 return sizeof(char);
407 if (strcmp(type, "unsigned char") == 0)
408 return sizeof(unsigned char);
409 if (str_has_prefix(type, "char["))
410 return user_field_array_size(type);
411 if (str_has_prefix(type, "unsigned char["))
412 return user_field_array_size(type);
413 if (str_has_prefix(type, "__data_loc "))
414 return sizeof(u32);
415 if (str_has_prefix(type, "__rel_loc "))
416 return sizeof(u32);
417
418 /* Uknown basic type, error */
419 return -EINVAL;
420 }
421
user_event_destroy_validators(struct user_event * user)422 static void user_event_destroy_validators(struct user_event *user)
423 {
424 struct user_event_validator *validator, *next;
425 struct list_head *head = &user->validators;
426
427 list_for_each_entry_safe(validator, next, head, link) {
428 list_del(&validator->link);
429 kfree(validator);
430 }
431 }
432
user_event_destroy_fields(struct user_event * user)433 static void user_event_destroy_fields(struct user_event *user)
434 {
435 struct ftrace_event_field *field, *next;
436 struct list_head *head = &user->fields;
437
438 list_for_each_entry_safe(field, next, head, link) {
439 list_del(&field->link);
440 kfree(field);
441 }
442 }
443
user_event_add_field(struct user_event * user,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)444 static int user_event_add_field(struct user_event *user, const char *type,
445 const char *name, int offset, int size,
446 int is_signed, int filter_type)
447 {
448 struct user_event_validator *validator;
449 struct ftrace_event_field *field;
450 int validator_flags = 0;
451
452 field = kmalloc(sizeof(*field), GFP_KERNEL);
453
454 if (!field)
455 return -ENOMEM;
456
457 if (str_has_prefix(type, "__data_loc "))
458 goto add_validator;
459
460 if (str_has_prefix(type, "__rel_loc ")) {
461 validator_flags |= VALIDATOR_REL;
462 goto add_validator;
463 }
464
465 goto add_field;
466
467 add_validator:
468 if (strstr(type, "char") != NULL)
469 validator_flags |= VALIDATOR_ENSURE_NULL;
470
471 validator = kmalloc(sizeof(*validator), GFP_KERNEL);
472
473 if (!validator) {
474 kfree(field);
475 return -ENOMEM;
476 }
477
478 validator->flags = validator_flags;
479 validator->offset = offset;
480
481 /* Want sequential access when validating */
482 list_add_tail(&validator->link, &user->validators);
483
484 add_field:
485 field->type = type;
486 field->name = name;
487 field->offset = offset;
488 field->size = size;
489 field->is_signed = is_signed;
490 field->filter_type = filter_type;
491
492 list_add(&field->link, &user->fields);
493
494 /*
495 * Min size from user writes that are required, this does not include
496 * the size of trace_entry (common fields).
497 */
498 user->min_size = (offset + size) - sizeof(struct trace_entry);
499
500 return 0;
501 }
502
503 /*
504 * Parses the values of a field within the description
505 * Format: type name [size]
506 */
user_event_parse_field(char * field,struct user_event * user,u32 * offset)507 static int user_event_parse_field(char *field, struct user_event *user,
508 u32 *offset)
509 {
510 char *part, *type, *name;
511 u32 depth = 0, saved_offset = *offset;
512 int len, size = -EINVAL;
513 bool is_struct = false;
514
515 field = skip_spaces(field);
516
517 if (*field == '\0')
518 return 0;
519
520 /* Handle types that have a space within */
521 len = str_has_prefix(field, "unsigned ");
522 if (len)
523 goto skip_next;
524
525 len = str_has_prefix(field, "struct ");
526 if (len) {
527 is_struct = true;
528 goto skip_next;
529 }
530
531 len = str_has_prefix(field, "__data_loc unsigned ");
532 if (len)
533 goto skip_next;
534
535 len = str_has_prefix(field, "__data_loc ");
536 if (len)
537 goto skip_next;
538
539 len = str_has_prefix(field, "__rel_loc unsigned ");
540 if (len)
541 goto skip_next;
542
543 len = str_has_prefix(field, "__rel_loc ");
544 if (len)
545 goto skip_next;
546
547 goto parse;
548 skip_next:
549 type = field;
550 field = strpbrk(field + len, " ");
551
552 if (field == NULL)
553 return -EINVAL;
554
555 *field++ = '\0';
556 depth++;
557 parse:
558 name = NULL;
559
560 while ((part = strsep(&field, " ")) != NULL) {
561 switch (depth++) {
562 case FIELD_DEPTH_TYPE:
563 type = part;
564 break;
565 case FIELD_DEPTH_NAME:
566 name = part;
567 break;
568 case FIELD_DEPTH_SIZE:
569 if (!is_struct)
570 return -EINVAL;
571
572 if (kstrtou32(part, 10, &size))
573 return -EINVAL;
574 break;
575 default:
576 return -EINVAL;
577 }
578 }
579
580 if (depth < FIELD_DEPTH_SIZE || !name)
581 return -EINVAL;
582
583 if (depth == FIELD_DEPTH_SIZE)
584 size = user_field_size(type);
585
586 if (size == 0)
587 return -EINVAL;
588
589 if (size < 0)
590 return size;
591
592 *offset = saved_offset + size;
593
594 return user_event_add_field(user, type, name, saved_offset, size,
595 type[0] != 'u', FILTER_OTHER);
596 }
597
user_event_parse_fields(struct user_event * user,char * args)598 static int user_event_parse_fields(struct user_event *user, char *args)
599 {
600 char *field;
601 u32 offset = sizeof(struct trace_entry);
602 int ret = -EINVAL;
603
604 if (args == NULL)
605 return 0;
606
607 while ((field = strsep(&args, ";")) != NULL) {
608 ret = user_event_parse_field(field, user, &offset);
609
610 if (ret)
611 break;
612 }
613
614 return ret;
615 }
616
617 static struct trace_event_fields user_event_fields_array[1];
618
user_field_format(const char * type)619 static const char *user_field_format(const char *type)
620 {
621 if (strcmp(type, "s64") == 0)
622 return "%lld";
623 if (strcmp(type, "u64") == 0)
624 return "%llu";
625 if (strcmp(type, "s32") == 0)
626 return "%d";
627 if (strcmp(type, "u32") == 0)
628 return "%u";
629 if (strcmp(type, "int") == 0)
630 return "%d";
631 if (strcmp(type, "unsigned int") == 0)
632 return "%u";
633 if (strcmp(type, "s16") == 0)
634 return "%d";
635 if (strcmp(type, "u16") == 0)
636 return "%u";
637 if (strcmp(type, "short") == 0)
638 return "%d";
639 if (strcmp(type, "unsigned short") == 0)
640 return "%u";
641 if (strcmp(type, "s8") == 0)
642 return "%d";
643 if (strcmp(type, "u8") == 0)
644 return "%u";
645 if (strcmp(type, "char") == 0)
646 return "%d";
647 if (strcmp(type, "unsigned char") == 0)
648 return "%u";
649 if (strstr(type, "char[") != NULL)
650 return "%s";
651
652 /* Unknown, likely struct, allowed treat as 64-bit */
653 return "%llu";
654 }
655
user_field_is_dyn_string(const char * type,const char ** str_func)656 static bool user_field_is_dyn_string(const char *type, const char **str_func)
657 {
658 if (str_has_prefix(type, "__data_loc ")) {
659 *str_func = "__get_str";
660 goto check;
661 }
662
663 if (str_has_prefix(type, "__rel_loc ")) {
664 *str_func = "__get_rel_str";
665 goto check;
666 }
667
668 return false;
669 check:
670 return strstr(type, "char") != NULL;
671 }
672
673 #define LEN_OR_ZERO (len ? len - pos : 0)
user_dyn_field_set_string(int argc,const char ** argv,int * iout,char * buf,int len,bool * colon)674 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
675 char *buf, int len, bool *colon)
676 {
677 int pos = 0, i = *iout;
678
679 *colon = false;
680
681 for (; i < argc; ++i) {
682 if (i != *iout)
683 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
684
685 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
686
687 if (strchr(argv[i], ';')) {
688 ++i;
689 *colon = true;
690 break;
691 }
692 }
693
694 /* Actual set, advance i */
695 if (len != 0)
696 *iout = i;
697
698 return pos + 1;
699 }
700
user_field_set_string(struct ftrace_event_field * field,char * buf,int len,bool colon)701 static int user_field_set_string(struct ftrace_event_field *field,
702 char *buf, int len, bool colon)
703 {
704 int pos = 0;
705
706 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
707 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
708 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
709
710 if (str_has_prefix(field->type, "struct "))
711 pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
712
713 if (colon)
714 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
715
716 return pos + 1;
717 }
718
user_event_set_print_fmt(struct user_event * user,char * buf,int len)719 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
720 {
721 struct ftrace_event_field *field, *next;
722 struct list_head *head = &user->fields;
723 int pos = 0, depth = 0;
724 const char *str_func;
725
726 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
727
728 list_for_each_entry_safe_reverse(field, next, head, link) {
729 if (depth != 0)
730 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
731
732 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
733 field->name, user_field_format(field->type));
734
735 depth++;
736 }
737
738 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
739
740 list_for_each_entry_safe_reverse(field, next, head, link) {
741 if (user_field_is_dyn_string(field->type, &str_func))
742 pos += snprintf(buf + pos, LEN_OR_ZERO,
743 ", %s(%s)", str_func, field->name);
744 else
745 pos += snprintf(buf + pos, LEN_OR_ZERO,
746 ", REC->%s", field->name);
747 }
748
749 return pos + 1;
750 }
751 #undef LEN_OR_ZERO
752
user_event_create_print_fmt(struct user_event * user)753 static int user_event_create_print_fmt(struct user_event *user)
754 {
755 char *print_fmt;
756 int len;
757
758 len = user_event_set_print_fmt(user, NULL, 0);
759
760 print_fmt = kmalloc(len, GFP_KERNEL);
761
762 if (!print_fmt)
763 return -ENOMEM;
764
765 user_event_set_print_fmt(user, print_fmt, len);
766
767 user->call.print_fmt = print_fmt;
768
769 return 0;
770 }
771
user_event_print_trace(struct trace_iterator * iter,int flags,struct trace_event * event)772 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
773 int flags,
774 struct trace_event *event)
775 {
776 /* Unsafe to try to decode user provided print_fmt, use hex */
777 trace_print_hex_dump_seq(&iter->seq, "", DUMP_PREFIX_OFFSET, 16,
778 1, iter->ent, iter->ent_size, true);
779
780 return trace_handle_return(&iter->seq);
781 }
782
783 static struct trace_event_functions user_event_funcs = {
784 .trace = user_event_print_trace,
785 };
786
user_event_set_call_visible(struct user_event * user,bool visible)787 static int user_event_set_call_visible(struct user_event *user, bool visible)
788 {
789 int ret;
790 const struct cred *old_cred;
791 struct cred *cred;
792
793 cred = prepare_creds();
794
795 if (!cred)
796 return -ENOMEM;
797
798 /*
799 * While by default tracefs is locked down, systems can be configured
800 * to allow user_event files to be less locked down. The extreme case
801 * being "other" has read/write access to user_events_data/status.
802 *
803 * When not locked down, processes may not have permissions to
804 * add/remove calls themselves to tracefs. We need to temporarily
805 * switch to root file permission to allow for this scenario.
806 */
807 cred->fsuid = GLOBAL_ROOT_UID;
808
809 old_cred = override_creds(cred);
810
811 if (visible)
812 ret = trace_add_event_call(&user->call);
813 else
814 ret = trace_remove_event_call(&user->call);
815
816 revert_creds(old_cred);
817 put_cred(cred);
818
819 return ret;
820 }
821
destroy_user_event(struct user_event * user)822 static int destroy_user_event(struct user_event *user)
823 {
824 int ret = 0;
825
826 /* Must destroy fields before call removal */
827 user_event_destroy_fields(user);
828
829 ret = user_event_set_call_visible(user, false);
830
831 if (ret)
832 return ret;
833
834 dyn_event_remove(&user->devent);
835
836 user_event_register_clear(user);
837 clear_bit(user->index, user->group->page_bitmap);
838 hash_del(&user->node);
839
840 user_event_destroy_validators(user);
841 kfree(user->call.print_fmt);
842 kfree(EVENT_NAME(user));
843 kfree(user);
844
845 return ret;
846 }
847
find_user_event(struct user_event_group * group,char * name,u32 * outkey)848 static struct user_event *find_user_event(struct user_event_group *group,
849 char *name, u32 *outkey)
850 {
851 struct user_event *user;
852 u32 key = user_event_key(name);
853
854 *outkey = key;
855
856 hash_for_each_possible(group->register_table, user, node, key)
857 if (!strcmp(EVENT_NAME(user), name)) {
858 refcount_inc(&user->refcnt);
859 return user;
860 }
861
862 return NULL;
863 }
864
user_event_validate(struct user_event * user,void * data,int len)865 static int user_event_validate(struct user_event *user, void *data, int len)
866 {
867 struct list_head *head = &user->validators;
868 struct user_event_validator *validator;
869 void *pos, *end = data + len;
870 u32 loc, offset, size;
871
872 list_for_each_entry(validator, head, link) {
873 pos = data + validator->offset;
874
875 /* Already done min_size check, no bounds check here */
876 loc = *(u32 *)pos;
877 offset = loc & 0xffff;
878 size = loc >> 16;
879
880 if (likely(validator->flags & VALIDATOR_REL))
881 pos += offset + sizeof(loc);
882 else
883 pos = data + offset;
884
885 pos += size;
886
887 if (unlikely(pos > end))
888 return -EFAULT;
889
890 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
891 if (unlikely(*(char *)(pos - 1) != '\0'))
892 return -EFAULT;
893 }
894
895 return 0;
896 }
897
898 /*
899 * Writes the user supplied payload out to a trace file.
900 */
user_event_ftrace(struct user_event * user,struct iov_iter * i,void * tpdata,bool * faulted)901 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
902 void *tpdata, bool *faulted)
903 {
904 struct trace_event_file *file;
905 struct trace_entry *entry;
906 struct trace_event_buffer event_buffer;
907 size_t size = sizeof(*entry) + i->count;
908
909 file = (struct trace_event_file *)tpdata;
910
911 if (!file ||
912 !(file->flags & EVENT_FILE_FL_ENABLED) ||
913 trace_trigger_soft_disabled(file))
914 return;
915
916 /* Allocates and fills trace_entry, + 1 of this is data payload */
917 entry = trace_event_buffer_reserve(&event_buffer, file, size);
918
919 if (unlikely(!entry))
920 return;
921
922 if (unlikely(!copy_nofault(entry + 1, i->count, i)))
923 goto discard;
924
925 if (!list_empty(&user->validators) &&
926 unlikely(user_event_validate(user, entry, size)))
927 goto discard;
928
929 trace_event_buffer_commit(&event_buffer);
930
931 return;
932 discard:
933 *faulted = true;
934 __trace_event_discard_commit(event_buffer.buffer,
935 event_buffer.event);
936 }
937
938 #ifdef CONFIG_PERF_EVENTS
939 /*
940 * Writes the user supplied payload out to perf ring buffer.
941 */
user_event_perf(struct user_event * user,struct iov_iter * i,void * tpdata,bool * faulted)942 static void user_event_perf(struct user_event *user, struct iov_iter *i,
943 void *tpdata, bool *faulted)
944 {
945 struct hlist_head *perf_head;
946
947 perf_head = this_cpu_ptr(user->call.perf_events);
948
949 if (perf_head && !hlist_empty(perf_head)) {
950 struct trace_entry *perf_entry;
951 struct pt_regs *regs;
952 size_t size = sizeof(*perf_entry) + i->count;
953 int context;
954
955 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
956 ®s, &context);
957
958 if (unlikely(!perf_entry))
959 return;
960
961 perf_fetch_caller_regs(regs);
962
963 if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
964 goto discard;
965
966 if (!list_empty(&user->validators) &&
967 unlikely(user_event_validate(user, perf_entry, size)))
968 goto discard;
969
970 perf_trace_buf_submit(perf_entry, size, context,
971 user->call.event.type, 1, regs,
972 perf_head, NULL);
973
974 return;
975 discard:
976 *faulted = true;
977 perf_swevent_put_recursion_context(context);
978 }
979 }
980 #endif
981
982 /*
983 * Update the register page that is shared between user processes.
984 */
update_reg_page_for(struct user_event * user)985 static void update_reg_page_for(struct user_event *user)
986 {
987 struct tracepoint *tp = &user->tracepoint;
988 char status = 0;
989
990 if (atomic_read(&tp->key.enabled) > 0) {
991 struct tracepoint_func *probe_func_ptr;
992 user_event_func_t probe_func;
993
994 rcu_read_lock_sched();
995
996 probe_func_ptr = rcu_dereference_sched(tp->funcs);
997
998 if (probe_func_ptr) {
999 do {
1000 probe_func = probe_func_ptr->func;
1001
1002 if (probe_func == user_event_ftrace)
1003 status |= EVENT_STATUS_FTRACE;
1004 #ifdef CONFIG_PERF_EVENTS
1005 else if (probe_func == user_event_perf)
1006 status |= EVENT_STATUS_PERF;
1007 #endif
1008 else
1009 status |= EVENT_STATUS_OTHER;
1010 } while ((++probe_func_ptr)->func);
1011 }
1012
1013 rcu_read_unlock_sched();
1014 }
1015
1016 if (status)
1017 user_event_register_set(user);
1018 else
1019 user_event_register_clear(user);
1020
1021 user->status = status;
1022 }
1023
1024 /*
1025 * Register callback for our events from tracing sub-systems.
1026 */
user_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)1027 static int user_event_reg(struct trace_event_call *call,
1028 enum trace_reg type,
1029 void *data)
1030 {
1031 struct user_event *user = (struct user_event *)call->data;
1032 int ret = 0;
1033
1034 if (!user)
1035 return -ENOENT;
1036
1037 switch (type) {
1038 case TRACE_REG_REGISTER:
1039 ret = tracepoint_probe_register(call->tp,
1040 call->class->probe,
1041 data);
1042 if (!ret)
1043 goto inc;
1044 break;
1045
1046 case TRACE_REG_UNREGISTER:
1047 tracepoint_probe_unregister(call->tp,
1048 call->class->probe,
1049 data);
1050 goto dec;
1051
1052 #ifdef CONFIG_PERF_EVENTS
1053 case TRACE_REG_PERF_REGISTER:
1054 ret = tracepoint_probe_register(call->tp,
1055 call->class->perf_probe,
1056 data);
1057 if (!ret)
1058 goto inc;
1059 break;
1060
1061 case TRACE_REG_PERF_UNREGISTER:
1062 tracepoint_probe_unregister(call->tp,
1063 call->class->perf_probe,
1064 data);
1065 goto dec;
1066
1067 case TRACE_REG_PERF_OPEN:
1068 case TRACE_REG_PERF_CLOSE:
1069 case TRACE_REG_PERF_ADD:
1070 case TRACE_REG_PERF_DEL:
1071 break;
1072 #endif
1073 }
1074
1075 return ret;
1076 inc:
1077 refcount_inc(&user->refcnt);
1078 update_reg_page_for(user);
1079 return 0;
1080 dec:
1081 update_reg_page_for(user);
1082 refcount_dec(&user->refcnt);
1083 return 0;
1084 }
1085
user_event_create(const char * raw_command)1086 static int user_event_create(const char *raw_command)
1087 {
1088 struct user_event_group *group;
1089 struct user_event *user;
1090 char *name;
1091 int ret;
1092
1093 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1094 return -ECANCELED;
1095
1096 raw_command += USER_EVENTS_PREFIX_LEN;
1097 raw_command = skip_spaces(raw_command);
1098
1099 name = kstrdup(raw_command, GFP_KERNEL);
1100
1101 if (!name)
1102 return -ENOMEM;
1103
1104 group = current_user_event_group();
1105
1106 if (!group) {
1107 kfree(name);
1108 return -ENOENT;
1109 }
1110
1111 mutex_lock(&group->reg_mutex);
1112
1113 ret = user_event_parse_cmd(group, name, &user);
1114
1115 if (!ret)
1116 refcount_dec(&user->refcnt);
1117
1118 mutex_unlock(&group->reg_mutex);
1119
1120 if (ret)
1121 kfree(name);
1122
1123 return ret;
1124 }
1125
user_event_show(struct seq_file * m,struct dyn_event * ev)1126 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1127 {
1128 struct user_event *user = container_of(ev, struct user_event, devent);
1129 struct ftrace_event_field *field, *next;
1130 struct list_head *head;
1131 int depth = 0;
1132
1133 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1134
1135 head = trace_get_fields(&user->call);
1136
1137 list_for_each_entry_safe_reverse(field, next, head, link) {
1138 if (depth == 0)
1139 seq_puts(m, " ");
1140 else
1141 seq_puts(m, "; ");
1142
1143 seq_printf(m, "%s %s", field->type, field->name);
1144
1145 if (str_has_prefix(field->type, "struct "))
1146 seq_printf(m, " %d", field->size);
1147
1148 depth++;
1149 }
1150
1151 seq_puts(m, "\n");
1152
1153 return 0;
1154 }
1155
user_event_is_busy(struct dyn_event * ev)1156 static bool user_event_is_busy(struct dyn_event *ev)
1157 {
1158 struct user_event *user = container_of(ev, struct user_event, devent);
1159
1160 return !user_event_last_ref(user);
1161 }
1162
user_event_free(struct dyn_event * ev)1163 static int user_event_free(struct dyn_event *ev)
1164 {
1165 struct user_event *user = container_of(ev, struct user_event, devent);
1166
1167 if (!user_event_last_ref(user))
1168 return -EBUSY;
1169
1170 return destroy_user_event(user);
1171 }
1172
user_field_match(struct ftrace_event_field * field,int argc,const char ** argv,int * iout)1173 static bool user_field_match(struct ftrace_event_field *field, int argc,
1174 const char **argv, int *iout)
1175 {
1176 char *field_name = NULL, *dyn_field_name = NULL;
1177 bool colon = false, match = false;
1178 int dyn_len, len;
1179
1180 if (*iout >= argc)
1181 return false;
1182
1183 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1184 0, &colon);
1185
1186 len = user_field_set_string(field, field_name, 0, colon);
1187
1188 if (dyn_len != len)
1189 return false;
1190
1191 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1192 field_name = kmalloc(len, GFP_KERNEL);
1193
1194 if (!dyn_field_name || !field_name)
1195 goto out;
1196
1197 user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1198 dyn_len, &colon);
1199
1200 user_field_set_string(field, field_name, len, colon);
1201
1202 match = strcmp(dyn_field_name, field_name) == 0;
1203 out:
1204 kfree(dyn_field_name);
1205 kfree(field_name);
1206
1207 return match;
1208 }
1209
user_fields_match(struct user_event * user,int argc,const char ** argv)1210 static bool user_fields_match(struct user_event *user, int argc,
1211 const char **argv)
1212 {
1213 struct ftrace_event_field *field, *next;
1214 struct list_head *head = &user->fields;
1215 int i = 0;
1216
1217 list_for_each_entry_safe_reverse(field, next, head, link)
1218 if (!user_field_match(field, argc, argv, &i))
1219 return false;
1220
1221 if (i != argc)
1222 return false;
1223
1224 return true;
1225 }
1226
user_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)1227 static bool user_event_match(const char *system, const char *event,
1228 int argc, const char **argv, struct dyn_event *ev)
1229 {
1230 struct user_event *user = container_of(ev, struct user_event, devent);
1231 bool match;
1232
1233 match = strcmp(EVENT_NAME(user), event) == 0 &&
1234 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1235
1236 if (match && argc > 0)
1237 match = user_fields_match(user, argc, argv);
1238
1239 return match;
1240 }
1241
1242 static struct dyn_event_operations user_event_dops = {
1243 .create = user_event_create,
1244 .show = user_event_show,
1245 .is_busy = user_event_is_busy,
1246 .free = user_event_free,
1247 .match = user_event_match,
1248 };
1249
user_event_trace_register(struct user_event * user)1250 static int user_event_trace_register(struct user_event *user)
1251 {
1252 int ret;
1253
1254 ret = register_trace_event(&user->call.event);
1255
1256 if (!ret)
1257 return -ENODEV;
1258
1259 ret = user_event_set_call_visible(user, true);
1260
1261 if (ret)
1262 unregister_trace_event(&user->call.event);
1263
1264 return ret;
1265 }
1266
1267 /*
1268 * Parses the event name, arguments and flags then registers if successful.
1269 * The name buffer lifetime is owned by this method for success cases only.
1270 * Upon success the returned user_event has its ref count increased by 1.
1271 */
user_event_parse(struct user_event_group * group,char * name,char * args,char * flags,struct user_event ** newuser)1272 static int user_event_parse(struct user_event_group *group, char *name,
1273 char *args, char *flags,
1274 struct user_event **newuser)
1275 {
1276 int ret;
1277 int index;
1278 u32 key;
1279 struct user_event *user;
1280
1281 /* Prevent dyn_event from racing */
1282 mutex_lock(&event_mutex);
1283 user = find_user_event(group, name, &key);
1284 mutex_unlock(&event_mutex);
1285
1286 if (user) {
1287 *newuser = user;
1288 /*
1289 * Name is allocated by caller, free it since it already exists.
1290 * Caller only worries about failure cases for freeing.
1291 */
1292 kfree(name);
1293 return 0;
1294 }
1295
1296 index = find_first_zero_bit(group->page_bitmap, MAX_EVENTS);
1297
1298 if (index == MAX_EVENTS)
1299 return -EMFILE;
1300
1301 user = kzalloc(sizeof(*user), GFP_KERNEL);
1302
1303 if (!user)
1304 return -ENOMEM;
1305
1306 INIT_LIST_HEAD(&user->class.fields);
1307 INIT_LIST_HEAD(&user->fields);
1308 INIT_LIST_HEAD(&user->validators);
1309
1310 user->group = group;
1311 user->tracepoint.name = name;
1312
1313 ret = user_event_parse_fields(user, args);
1314
1315 if (ret)
1316 goto put_user;
1317
1318 ret = user_event_create_print_fmt(user);
1319
1320 if (ret)
1321 goto put_user;
1322
1323 user->call.data = user;
1324 user->call.class = &user->class;
1325 user->call.name = name;
1326 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1327 user->call.tp = &user->tracepoint;
1328 user->call.event.funcs = &user_event_funcs;
1329 user->class.system = group->system_name;
1330
1331 user->class.fields_array = user_event_fields_array;
1332 user->class.get_fields = user_event_get_fields;
1333 user->class.reg = user_event_reg;
1334 user->class.probe = user_event_ftrace;
1335 #ifdef CONFIG_PERF_EVENTS
1336 user->class.perf_probe = user_event_perf;
1337 #endif
1338
1339 mutex_lock(&event_mutex);
1340
1341 ret = user_event_trace_register(user);
1342
1343 if (ret)
1344 goto put_user_lock;
1345
1346 user->index = index;
1347
1348 /* Ensure we track self ref and caller ref (2) */
1349 refcount_set(&user->refcnt, 2);
1350
1351 dyn_event_init(&user->devent, &user_event_dops);
1352 dyn_event_add(&user->devent, &user->call);
1353 set_bit(user->index, group->page_bitmap);
1354 hash_add(group->register_table, &user->node, key);
1355
1356 mutex_unlock(&event_mutex);
1357
1358 *newuser = user;
1359 return 0;
1360 put_user_lock:
1361 mutex_unlock(&event_mutex);
1362 put_user:
1363 user_event_destroy_fields(user);
1364 user_event_destroy_validators(user);
1365 kfree(user->call.print_fmt);
1366 kfree(user);
1367 return ret;
1368 }
1369
1370 /*
1371 * Deletes a previously created event if it is no longer being used.
1372 */
delete_user_event(struct user_event_group * group,char * name)1373 static int delete_user_event(struct user_event_group *group, char *name)
1374 {
1375 u32 key;
1376 struct user_event *user = find_user_event(group, name, &key);
1377
1378 if (!user)
1379 return -ENOENT;
1380
1381 refcount_dec(&user->refcnt);
1382
1383 if (!user_event_last_ref(user))
1384 return -EBUSY;
1385
1386 return destroy_user_event(user);
1387 }
1388
1389 /*
1390 * Validates the user payload and writes via iterator.
1391 */
user_events_write_core(struct file * file,struct iov_iter * i)1392 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
1393 {
1394 struct user_event_file_info *info = file->private_data;
1395 struct user_event_refs *refs;
1396 struct user_event *user = NULL;
1397 struct tracepoint *tp;
1398 ssize_t ret = i->count;
1399 int idx;
1400
1401 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
1402 return -EFAULT;
1403
1404 if (idx < 0)
1405 return -EINVAL;
1406
1407 rcu_read_lock_sched();
1408
1409 refs = rcu_dereference_sched(info->refs);
1410
1411 /*
1412 * The refs->events array is protected by RCU, and new items may be
1413 * added. But the user retrieved from indexing into the events array
1414 * shall be immutable while the file is opened.
1415 */
1416 if (likely(refs && idx < refs->count))
1417 user = refs->events[idx];
1418
1419 rcu_read_unlock_sched();
1420
1421 if (unlikely(user == NULL))
1422 return -ENOENT;
1423
1424 if (unlikely(i->count < user->min_size))
1425 return -EINVAL;
1426
1427 tp = &user->tracepoint;
1428
1429 /*
1430 * It's possible key.enabled disables after this check, however
1431 * we don't mind if a few events are included in this condition.
1432 */
1433 if (likely(atomic_read(&tp->key.enabled) > 0)) {
1434 struct tracepoint_func *probe_func_ptr;
1435 user_event_func_t probe_func;
1436 struct iov_iter copy;
1437 void *tpdata;
1438 bool faulted;
1439
1440 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
1441 return -EFAULT;
1442
1443 faulted = false;
1444
1445 rcu_read_lock_sched();
1446
1447 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1448
1449 if (probe_func_ptr) {
1450 do {
1451 copy = *i;
1452 probe_func = probe_func_ptr->func;
1453 tpdata = probe_func_ptr->data;
1454 probe_func(user, ©, tpdata, &faulted);
1455 } while ((++probe_func_ptr)->func);
1456 }
1457
1458 rcu_read_unlock_sched();
1459
1460 if (unlikely(faulted))
1461 return -EFAULT;
1462 } else
1463 return -EBADF;
1464
1465 return ret;
1466 }
1467
user_events_open(struct inode * node,struct file * file)1468 static int user_events_open(struct inode *node, struct file *file)
1469 {
1470 struct user_event_group *group;
1471 struct user_event_file_info *info;
1472
1473 group = current_user_event_group();
1474
1475 if (!group)
1476 return -ENOENT;
1477
1478 info = kzalloc(sizeof(*info), GFP_KERNEL);
1479
1480 if (!info)
1481 return -ENOMEM;
1482
1483 info->group = group;
1484
1485 file->private_data = info;
1486
1487 return 0;
1488 }
1489
user_events_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1490 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
1491 size_t count, loff_t *ppos)
1492 {
1493 struct iovec iov;
1494 struct iov_iter i;
1495
1496 if (unlikely(*ppos != 0))
1497 return -EFAULT;
1498
1499 if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
1500 count, &iov, &i)))
1501 return -EFAULT;
1502
1503 return user_events_write_core(file, &i);
1504 }
1505
user_events_write_iter(struct kiocb * kp,struct iov_iter * i)1506 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
1507 {
1508 return user_events_write_core(kp->ki_filp, i);
1509 }
1510
user_events_ref_add(struct user_event_file_info * info,struct user_event * user)1511 static int user_events_ref_add(struct user_event_file_info *info,
1512 struct user_event *user)
1513 {
1514 struct user_event_group *group = info->group;
1515 struct user_event_refs *refs, *new_refs;
1516 int i, size, count = 0;
1517
1518 refs = rcu_dereference_protected(info->refs,
1519 lockdep_is_held(&group->reg_mutex));
1520
1521 if (refs) {
1522 count = refs->count;
1523
1524 for (i = 0; i < count; ++i)
1525 if (refs->events[i] == user)
1526 return i;
1527 }
1528
1529 size = struct_size(refs, events, count + 1);
1530
1531 new_refs = kzalloc(size, GFP_KERNEL);
1532
1533 if (!new_refs)
1534 return -ENOMEM;
1535
1536 new_refs->count = count + 1;
1537
1538 for (i = 0; i < count; ++i)
1539 new_refs->events[i] = refs->events[i];
1540
1541 new_refs->events[i] = user;
1542
1543 refcount_inc(&user->refcnt);
1544
1545 rcu_assign_pointer(info->refs, new_refs);
1546
1547 if (refs)
1548 kfree_rcu(refs, rcu);
1549
1550 return i;
1551 }
1552
user_reg_get(struct user_reg __user * ureg,struct user_reg * kreg)1553 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
1554 {
1555 u32 size;
1556 long ret;
1557
1558 ret = get_user(size, &ureg->size);
1559
1560 if (ret)
1561 return ret;
1562
1563 if (size > PAGE_SIZE)
1564 return -E2BIG;
1565
1566 if (size < offsetofend(struct user_reg, write_index))
1567 return -EINVAL;
1568
1569 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
1570
1571 if (ret)
1572 return ret;
1573
1574 kreg->size = size;
1575
1576 return 0;
1577 }
1578
1579 /*
1580 * Registers a user_event on behalf of a user process.
1581 */
user_events_ioctl_reg(struct user_event_file_info * info,unsigned long uarg)1582 static long user_events_ioctl_reg(struct user_event_file_info *info,
1583 unsigned long uarg)
1584 {
1585 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
1586 struct user_reg reg;
1587 struct user_event *user;
1588 char *name;
1589 long ret;
1590
1591 ret = user_reg_get(ureg, ®);
1592
1593 if (ret)
1594 return ret;
1595
1596 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
1597 MAX_EVENT_DESC);
1598
1599 if (IS_ERR(name)) {
1600 ret = PTR_ERR(name);
1601 return ret;
1602 }
1603
1604 ret = user_event_parse_cmd(info->group, name, &user);
1605
1606 if (ret) {
1607 kfree(name);
1608 return ret;
1609 }
1610
1611 ret = user_events_ref_add(info, user);
1612
1613 /* No longer need parse ref, ref_add either worked or not */
1614 refcount_dec(&user->refcnt);
1615
1616 /* Positive number is index and valid */
1617 if (ret < 0)
1618 return ret;
1619
1620 put_user((u32)ret, &ureg->write_index);
1621 put_user(user->index, &ureg->status_bit);
1622
1623 return 0;
1624 }
1625
1626 /*
1627 * Deletes a user_event on behalf of a user process.
1628 */
user_events_ioctl_del(struct user_event_file_info * info,unsigned long uarg)1629 static long user_events_ioctl_del(struct user_event_file_info *info,
1630 unsigned long uarg)
1631 {
1632 void __user *ubuf = (void __user *)uarg;
1633 char *name;
1634 long ret;
1635
1636 name = strndup_user(ubuf, MAX_EVENT_DESC);
1637
1638 if (IS_ERR(name))
1639 return PTR_ERR(name);
1640
1641 /* event_mutex prevents dyn_event from racing */
1642 mutex_lock(&event_mutex);
1643 ret = delete_user_event(info->group, name);
1644 mutex_unlock(&event_mutex);
1645
1646 kfree(name);
1647
1648 return ret;
1649 }
1650
1651 /*
1652 * Handles the ioctl from user mode to register or alter operations.
1653 */
user_events_ioctl(struct file * file,unsigned int cmd,unsigned long uarg)1654 static long user_events_ioctl(struct file *file, unsigned int cmd,
1655 unsigned long uarg)
1656 {
1657 struct user_event_file_info *info = file->private_data;
1658 struct user_event_group *group = info->group;
1659 long ret = -ENOTTY;
1660
1661 switch (cmd) {
1662 case DIAG_IOCSREG:
1663 mutex_lock(&group->reg_mutex);
1664 ret = user_events_ioctl_reg(info, uarg);
1665 mutex_unlock(&group->reg_mutex);
1666 break;
1667
1668 case DIAG_IOCSDEL:
1669 mutex_lock(&group->reg_mutex);
1670 ret = user_events_ioctl_del(info, uarg);
1671 mutex_unlock(&group->reg_mutex);
1672 break;
1673 }
1674
1675 return ret;
1676 }
1677
1678 /*
1679 * Handles the final close of the file from user mode.
1680 */
user_events_release(struct inode * node,struct file * file)1681 static int user_events_release(struct inode *node, struct file *file)
1682 {
1683 struct user_event_file_info *info = file->private_data;
1684 struct user_event_group *group;
1685 struct user_event_refs *refs;
1686 struct user_event *user;
1687 int i;
1688
1689 if (!info)
1690 return -EINVAL;
1691
1692 group = info->group;
1693
1694 /*
1695 * Ensure refs cannot change under any situation by taking the
1696 * register mutex during the final freeing of the references.
1697 */
1698 mutex_lock(&group->reg_mutex);
1699
1700 refs = info->refs;
1701
1702 if (!refs)
1703 goto out;
1704
1705 /*
1706 * The lifetime of refs has reached an end, it's tied to this file.
1707 * The underlying user_events are ref counted, and cannot be freed.
1708 * After this decrement, the user_events may be freed elsewhere.
1709 */
1710 for (i = 0; i < refs->count; ++i) {
1711 user = refs->events[i];
1712
1713 if (user)
1714 refcount_dec(&user->refcnt);
1715 }
1716 out:
1717 file->private_data = NULL;
1718
1719 mutex_unlock(&group->reg_mutex);
1720
1721 kfree(refs);
1722 kfree(info);
1723
1724 return 0;
1725 }
1726
1727 static const struct file_operations user_data_fops = {
1728 .open = user_events_open,
1729 .write = user_events_write,
1730 .write_iter = user_events_write_iter,
1731 .unlocked_ioctl = user_events_ioctl,
1732 .release = user_events_release,
1733 };
1734
user_status_group(struct file * file)1735 static struct user_event_group *user_status_group(struct file *file)
1736 {
1737 struct seq_file *m = file->private_data;
1738
1739 if (!m)
1740 return NULL;
1741
1742 return m->private;
1743 }
1744
1745 /*
1746 * Maps the shared page into the user process for checking if event is enabled.
1747 */
user_status_mmap(struct file * file,struct vm_area_struct * vma)1748 static int user_status_mmap(struct file *file, struct vm_area_struct *vma)
1749 {
1750 char *pages;
1751 struct user_event_group *group = user_status_group(file);
1752 unsigned long size = vma->vm_end - vma->vm_start;
1753
1754 if (size != MAX_BYTES)
1755 return -EINVAL;
1756
1757 if (!group)
1758 return -EINVAL;
1759
1760 pages = group->register_page_data;
1761
1762 return remap_pfn_range(vma, vma->vm_start,
1763 virt_to_phys(pages) >> PAGE_SHIFT,
1764 size, vm_get_page_prot(VM_READ));
1765 }
1766
user_seq_start(struct seq_file * m,loff_t * pos)1767 static void *user_seq_start(struct seq_file *m, loff_t *pos)
1768 {
1769 if (*pos)
1770 return NULL;
1771
1772 return (void *)1;
1773 }
1774
user_seq_next(struct seq_file * m,void * p,loff_t * pos)1775 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
1776 {
1777 ++*pos;
1778 return NULL;
1779 }
1780
user_seq_stop(struct seq_file * m,void * p)1781 static void user_seq_stop(struct seq_file *m, void *p)
1782 {
1783 }
1784
user_seq_show(struct seq_file * m,void * p)1785 static int user_seq_show(struct seq_file *m, void *p)
1786 {
1787 struct user_event_group *group = m->private;
1788 struct user_event *user;
1789 char status;
1790 int i, active = 0, busy = 0, flags;
1791
1792 if (!group)
1793 return -EINVAL;
1794
1795 mutex_lock(&group->reg_mutex);
1796
1797 hash_for_each(group->register_table, i, user, node) {
1798 status = user->status;
1799 flags = user->flags;
1800
1801 seq_printf(m, "%d:%s", user->index, EVENT_NAME(user));
1802
1803 if (flags != 0 || status != 0)
1804 seq_puts(m, " #");
1805
1806 if (status != 0) {
1807 seq_puts(m, " Used by");
1808 if (status & EVENT_STATUS_FTRACE)
1809 seq_puts(m, " ftrace");
1810 if (status & EVENT_STATUS_PERF)
1811 seq_puts(m, " perf");
1812 if (status & EVENT_STATUS_OTHER)
1813 seq_puts(m, " other");
1814 busy++;
1815 }
1816
1817 seq_puts(m, "\n");
1818 active++;
1819 }
1820
1821 mutex_unlock(&group->reg_mutex);
1822
1823 seq_puts(m, "\n");
1824 seq_printf(m, "Active: %d\n", active);
1825 seq_printf(m, "Busy: %d\n", busy);
1826 seq_printf(m, "Max: %ld\n", MAX_EVENTS);
1827
1828 return 0;
1829 }
1830
1831 static const struct seq_operations user_seq_ops = {
1832 .start = user_seq_start,
1833 .next = user_seq_next,
1834 .stop = user_seq_stop,
1835 .show = user_seq_show,
1836 };
1837
user_status_open(struct inode * node,struct file * file)1838 static int user_status_open(struct inode *node, struct file *file)
1839 {
1840 struct user_event_group *group;
1841 int ret;
1842
1843 group = current_user_event_group();
1844
1845 if (!group)
1846 return -ENOENT;
1847
1848 ret = seq_open(file, &user_seq_ops);
1849
1850 if (!ret) {
1851 /* Chain group to seq_file */
1852 struct seq_file *m = file->private_data;
1853
1854 m->private = group;
1855 }
1856
1857 return ret;
1858 }
1859
1860 static const struct file_operations user_status_fops = {
1861 .open = user_status_open,
1862 .mmap = user_status_mmap,
1863 .read = seq_read,
1864 .llseek = seq_lseek,
1865 .release = seq_release,
1866 };
1867
1868 /*
1869 * Creates a set of tracefs files to allow user mode interactions.
1870 */
create_user_tracefs(void)1871 static int create_user_tracefs(void)
1872 {
1873 struct dentry *edata, *emmap;
1874
1875 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
1876 NULL, NULL, &user_data_fops);
1877
1878 if (!edata) {
1879 pr_warn("Could not create tracefs 'user_events_data' entry\n");
1880 goto err;
1881 }
1882
1883 /* mmap with MAP_SHARED requires writable fd */
1884 emmap = tracefs_create_file("user_events_status", TRACE_MODE_WRITE,
1885 NULL, NULL, &user_status_fops);
1886
1887 if (!emmap) {
1888 tracefs_remove(edata);
1889 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
1890 goto err;
1891 }
1892
1893 return 0;
1894 err:
1895 return -ENODEV;
1896 }
1897
trace_events_user_init(void)1898 static int __init trace_events_user_init(void)
1899 {
1900 int ret;
1901
1902 init_group = user_event_group_create(&init_user_ns);
1903
1904 if (!init_group)
1905 return -ENOMEM;
1906
1907 ret = create_user_tracefs();
1908
1909 if (ret) {
1910 pr_warn("user_events could not register with tracefs\n");
1911 user_event_group_destroy(init_group);
1912 init_group = NULL;
1913 return ret;
1914 }
1915
1916 if (dyn_event_register(&user_event_dops))
1917 pr_warn("user_events could not register with dyn_events\n");
1918
1919 return 0;
1920 }
1921
1922 fs_initcall(trace_events_user_init);
1923