• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static char last_cmd[MAX_FILTER_STR_VAL];
48 
errpos(const char * str)49 static int errpos(const char *str)
50 {
51 	return err_pos(last_cmd, str);
52 }
53 
last_cmd_set(const char * str)54 static void last_cmd_set(const char *str)
55 {
56 	if (!str)
57 		return;
58 
59 	strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
60 }
61 
synth_err(u8 err_type,u8 err_pos)62 static void synth_err(u8 err_type, u8 err_pos)
63 {
64 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
65 			err_type, err_pos);
66 }
67 
68 static int create_synth_event(const char *raw_command);
69 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
70 static int synth_event_release(struct dyn_event *ev);
71 static bool synth_event_is_busy(struct dyn_event *ev);
72 static bool synth_event_match(const char *system, const char *event,
73 			int argc, const char **argv, struct dyn_event *ev);
74 
75 static struct dyn_event_operations synth_event_ops = {
76 	.create = create_synth_event,
77 	.show = synth_event_show,
78 	.is_busy = synth_event_is_busy,
79 	.free = synth_event_release,
80 	.match = synth_event_match,
81 };
82 
is_synth_event(struct dyn_event * ev)83 static bool is_synth_event(struct dyn_event *ev)
84 {
85 	return ev->ops == &synth_event_ops;
86 }
87 
to_synth_event(struct dyn_event * ev)88 static struct synth_event *to_synth_event(struct dyn_event *ev)
89 {
90 	return container_of(ev, struct synth_event, devent);
91 }
92 
synth_event_is_busy(struct dyn_event * ev)93 static bool synth_event_is_busy(struct dyn_event *ev)
94 {
95 	struct synth_event *event = to_synth_event(ev);
96 
97 	return event->ref != 0;
98 }
99 
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)100 static bool synth_event_match(const char *system, const char *event,
101 			int argc, const char **argv, struct dyn_event *ev)
102 {
103 	struct synth_event *sev = to_synth_event(ev);
104 
105 	return strcmp(sev->name, event) == 0 &&
106 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
107 }
108 
109 struct synth_trace_event {
110 	struct trace_entry	ent;
111 	u64			fields[];
112 };
113 
synth_event_define_fields(struct trace_event_call * call)114 static int synth_event_define_fields(struct trace_event_call *call)
115 {
116 	struct synth_trace_event trace;
117 	int offset = offsetof(typeof(trace), fields);
118 	struct synth_event *event = call->data;
119 	unsigned int i, size, n_u64;
120 	char *name, *type;
121 	bool is_signed;
122 	int ret = 0;
123 
124 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
125 		size = event->fields[i]->size;
126 		is_signed = event->fields[i]->is_signed;
127 		type = event->fields[i]->type;
128 		name = event->fields[i]->name;
129 		ret = trace_define_field(call, type, name, offset, size,
130 					 is_signed, FILTER_OTHER);
131 		if (ret)
132 			break;
133 
134 		event->fields[i]->offset = n_u64;
135 
136 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
137 			offset += STR_VAR_LEN_MAX;
138 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
139 		} else {
140 			offset += sizeof(u64);
141 			n_u64++;
142 		}
143 	}
144 
145 	event->n_u64 = n_u64;
146 
147 	return ret;
148 }
149 
synth_field_signed(char * type)150 static bool synth_field_signed(char *type)
151 {
152 	if (str_has_prefix(type, "u"))
153 		return false;
154 	if (strcmp(type, "gfp_t") == 0)
155 		return false;
156 
157 	return true;
158 }
159 
synth_field_is_string(char * type)160 static int synth_field_is_string(char *type)
161 {
162 	if (strstr(type, "char[") != NULL)
163 		return true;
164 
165 	return false;
166 }
167 
synth_field_is_stack(char * type)168 static int synth_field_is_stack(char *type)
169 {
170 	if (strstr(type, "long[") != NULL)
171 		return true;
172 
173 	return false;
174 }
175 
synth_field_string_size(char * type)176 static int synth_field_string_size(char *type)
177 {
178 	char buf[4], *end, *start;
179 	unsigned int len;
180 	int size, err;
181 
182 	start = strstr(type, "char[");
183 	if (start == NULL)
184 		return -EINVAL;
185 	start += sizeof("char[") - 1;
186 
187 	end = strchr(type, ']');
188 	if (!end || end < start || type + strlen(type) > end + 1)
189 		return -EINVAL;
190 
191 	len = end - start;
192 	if (len > 3)
193 		return -EINVAL;
194 
195 	if (len == 0)
196 		return 0; /* variable-length string */
197 
198 	strncpy(buf, start, len);
199 	buf[len] = '\0';
200 
201 	err = kstrtouint(buf, 0, &size);
202 	if (err)
203 		return err;
204 
205 	if (size > STR_VAR_LEN_MAX)
206 		return -EINVAL;
207 
208 	return size;
209 }
210 
synth_field_size(char * type)211 static int synth_field_size(char *type)
212 {
213 	int size = 0;
214 
215 	if (strcmp(type, "s64") == 0)
216 		size = sizeof(s64);
217 	else if (strcmp(type, "u64") == 0)
218 		size = sizeof(u64);
219 	else if (strcmp(type, "s32") == 0)
220 		size = sizeof(s32);
221 	else if (strcmp(type, "u32") == 0)
222 		size = sizeof(u32);
223 	else if (strcmp(type, "s16") == 0)
224 		size = sizeof(s16);
225 	else if (strcmp(type, "u16") == 0)
226 		size = sizeof(u16);
227 	else if (strcmp(type, "s8") == 0)
228 		size = sizeof(s8);
229 	else if (strcmp(type, "u8") == 0)
230 		size = sizeof(u8);
231 	else if (strcmp(type, "char") == 0)
232 		size = sizeof(char);
233 	else if (strcmp(type, "unsigned char") == 0)
234 		size = sizeof(unsigned char);
235 	else if (strcmp(type, "int") == 0)
236 		size = sizeof(int);
237 	else if (strcmp(type, "unsigned int") == 0)
238 		size = sizeof(unsigned int);
239 	else if (strcmp(type, "long") == 0)
240 		size = sizeof(long);
241 	else if (strcmp(type, "unsigned long") == 0)
242 		size = sizeof(unsigned long);
243 	else if (strcmp(type, "bool") == 0)
244 		size = sizeof(bool);
245 	else if (strcmp(type, "pid_t") == 0)
246 		size = sizeof(pid_t);
247 	else if (strcmp(type, "gfp_t") == 0)
248 		size = sizeof(gfp_t);
249 	else if (synth_field_is_string(type))
250 		size = synth_field_string_size(type);
251 	else if (synth_field_is_stack(type))
252 		size = 0;
253 
254 	return size;
255 }
256 
synth_field_fmt(char * type)257 static const char *synth_field_fmt(char *type)
258 {
259 	const char *fmt = "%llu";
260 
261 	if (strcmp(type, "s64") == 0)
262 		fmt = "%lld";
263 	else if (strcmp(type, "u64") == 0)
264 		fmt = "%llu";
265 	else if (strcmp(type, "s32") == 0)
266 		fmt = "%d";
267 	else if (strcmp(type, "u32") == 0)
268 		fmt = "%u";
269 	else if (strcmp(type, "s16") == 0)
270 		fmt = "%d";
271 	else if (strcmp(type, "u16") == 0)
272 		fmt = "%u";
273 	else if (strcmp(type, "s8") == 0)
274 		fmt = "%d";
275 	else if (strcmp(type, "u8") == 0)
276 		fmt = "%u";
277 	else if (strcmp(type, "char") == 0)
278 		fmt = "%d";
279 	else if (strcmp(type, "unsigned char") == 0)
280 		fmt = "%u";
281 	else if (strcmp(type, "int") == 0)
282 		fmt = "%d";
283 	else if (strcmp(type, "unsigned int") == 0)
284 		fmt = "%u";
285 	else if (strcmp(type, "long") == 0)
286 		fmt = "%ld";
287 	else if (strcmp(type, "unsigned long") == 0)
288 		fmt = "%lu";
289 	else if (strcmp(type, "bool") == 0)
290 		fmt = "%d";
291 	else if (strcmp(type, "pid_t") == 0)
292 		fmt = "%d";
293 	else if (strcmp(type, "gfp_t") == 0)
294 		fmt = "%x";
295 	else if (synth_field_is_string(type))
296 		fmt = "%.*s";
297 	else if (synth_field_is_stack(type))
298 		fmt = "%s";
299 
300 	return fmt;
301 }
302 
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,u64 val,char * space)303 static void print_synth_event_num_val(struct trace_seq *s,
304 				      char *print_fmt, char *name,
305 				      int size, u64 val, char *space)
306 {
307 	switch (size) {
308 	case 1:
309 		trace_seq_printf(s, print_fmt, name, (u8)val, space);
310 		break;
311 
312 	case 2:
313 		trace_seq_printf(s, print_fmt, name, (u16)val, space);
314 		break;
315 
316 	case 4:
317 		trace_seq_printf(s, print_fmt, name, (u32)val, space);
318 		break;
319 
320 	default:
321 		trace_seq_printf(s, print_fmt, name, val, space);
322 		break;
323 	}
324 }
325 
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)326 static enum print_line_t print_synth_event(struct trace_iterator *iter,
327 					   int flags,
328 					   struct trace_event *event)
329 {
330 	struct trace_array *tr = iter->tr;
331 	struct trace_seq *s = &iter->seq;
332 	struct synth_trace_event *entry;
333 	struct synth_event *se;
334 	unsigned int i, n_u64;
335 	char print_fmt[32];
336 	const char *fmt;
337 
338 	entry = (struct synth_trace_event *)iter->ent;
339 	se = container_of(event, struct synth_event, call.event);
340 
341 	trace_seq_printf(s, "%s: ", se->name);
342 
343 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
344 		if (trace_seq_has_overflowed(s))
345 			goto end;
346 
347 		fmt = synth_field_fmt(se->fields[i]->type);
348 
349 		/* parameter types */
350 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
351 			trace_seq_printf(s, "%s ", fmt);
352 
353 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
354 
355 		/* parameter values */
356 		if (se->fields[i]->is_string) {
357 			if (se->fields[i]->is_dynamic) {
358 				u32 offset, data_offset;
359 				char *str_field;
360 
361 				offset = (u32)entry->fields[n_u64];
362 				data_offset = offset & 0xffff;
363 
364 				str_field = (char *)entry + data_offset;
365 
366 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
367 						 STR_VAR_LEN_MAX,
368 						 str_field,
369 						 i == se->n_fields - 1 ? "" : " ");
370 				n_u64++;
371 			} else {
372 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
373 						 STR_VAR_LEN_MAX,
374 						 (char *)&entry->fields[n_u64],
375 						 i == se->n_fields - 1 ? "" : " ");
376 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
377 			}
378 		} else if (se->fields[i]->is_stack) {
379 			u32 offset, data_offset, len;
380 			unsigned long *p, *end;
381 
382 			offset = (u32)entry->fields[n_u64];
383 			data_offset = offset & 0xffff;
384 			len = offset >> 16;
385 
386 			p = (void *)entry + data_offset;
387 			end = (void *)p + len - (sizeof(long) - 1);
388 
389 			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
390 
391 			for (; *p && p < end; p++)
392 				trace_seq_printf(s, "=> %pS\n", (void *)*p);
393 			n_u64++;
394 
395 		} else {
396 			struct trace_print_flags __flags[] = {
397 			    __def_gfpflag_names, {-1, NULL} };
398 			char *space = (i == se->n_fields - 1 ? "" : " ");
399 
400 			print_synth_event_num_val(s, print_fmt,
401 						  se->fields[i]->name,
402 						  se->fields[i]->size,
403 						  entry->fields[n_u64],
404 						  space);
405 
406 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
407 				trace_seq_puts(s, " (");
408 				trace_print_flags_seq(s, "|",
409 						      entry->fields[n_u64],
410 						      __flags);
411 				trace_seq_putc(s, ')');
412 			}
413 			n_u64++;
414 		}
415 	}
416 end:
417 	trace_seq_putc(s, '\n');
418 
419 	return trace_handle_return(s);
420 }
421 
422 static struct trace_event_functions synth_event_funcs = {
423 	.trace		= print_synth_event
424 };
425 
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)426 static unsigned int trace_string(struct synth_trace_event *entry,
427 				 struct synth_event *event,
428 				 char *str_val,
429 				 bool is_dynamic,
430 				 unsigned int data_size,
431 				 unsigned int *n_u64)
432 {
433 	unsigned int len = 0;
434 	char *str_field;
435 	int ret;
436 
437 	if (is_dynamic) {
438 		u32 data_offset;
439 
440 		data_offset = offsetof(typeof(*entry), fields);
441 		data_offset += event->n_u64 * sizeof(u64);
442 		data_offset += data_size;
443 
444 		len = kern_fetch_store_strlen((unsigned long)str_val);
445 
446 		data_offset |= len << 16;
447 		*(u32 *)&entry->fields[*n_u64] = data_offset;
448 
449 		ret = kern_fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
450 
451 		(*n_u64)++;
452 	} else {
453 		str_field = (char *)&entry->fields[*n_u64];
454 
455 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
456 		if ((unsigned long)str_val < TASK_SIZE)
457 			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
458 		else
459 #endif
460 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
461 
462 		if (ret < 0)
463 			strcpy(str_field, FAULT_STRING);
464 
465 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
466 	}
467 
468 	return len;
469 }
470 
trace_stack(struct synth_trace_event * entry,struct synth_event * event,long * stack,unsigned int data_size,unsigned int * n_u64)471 static unsigned int trace_stack(struct synth_trace_event *entry,
472 				 struct synth_event *event,
473 				 long *stack,
474 				 unsigned int data_size,
475 				 unsigned int *n_u64)
476 {
477 	unsigned int len;
478 	u32 data_offset;
479 	void *data_loc;
480 
481 	data_offset = struct_size(entry, fields, event->n_u64);
482 	data_offset += data_size;
483 
484 	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
485 		if (!stack[len])
486 			break;
487 	}
488 
489 	/* Include the zero'd element if it fits */
490 	if (len < HIST_STACKTRACE_DEPTH)
491 		len++;
492 
493 	len *= sizeof(long);
494 
495 	/* Find the dynamic section to copy the stack into. */
496 	data_loc = (void *)entry + data_offset;
497 	memcpy(data_loc, stack, len);
498 
499 	/* Fill in the field that holds the offset/len combo */
500 	data_offset |= len << 16;
501 	*(u32 *)&entry->fields[*n_u64] = data_offset;
502 
503 	(*n_u64)++;
504 
505 	return len;
506 }
507 
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)508 static notrace void trace_event_raw_event_synth(void *__data,
509 						u64 *var_ref_vals,
510 						unsigned int *var_ref_idx)
511 {
512 	unsigned int i, n_u64, val_idx, len, data_size = 0;
513 	struct trace_event_file *trace_file = __data;
514 	struct synth_trace_event *entry;
515 	struct trace_event_buffer fbuffer;
516 	struct trace_buffer *buffer;
517 	struct synth_event *event;
518 	int fields_size = 0;
519 
520 	event = trace_file->event_call->data;
521 
522 	if (trace_trigger_soft_disabled(trace_file))
523 		return;
524 
525 	fields_size = event->n_u64 * sizeof(u64);
526 
527 	for (i = 0; i < event->n_dynamic_fields; i++) {
528 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
529 		char *str_val;
530 
531 		val_idx = var_ref_idx[field_pos];
532 		str_val = (char *)(long)var_ref_vals[val_idx];
533 
534 		len = kern_fetch_store_strlen((unsigned long)str_val);
535 
536 		fields_size += len;
537 	}
538 
539 	/*
540 	 * Avoid ring buffer recursion detection, as this event
541 	 * is being performed within another event.
542 	 */
543 	buffer = trace_file->tr->array_buffer.buffer;
544 	ring_buffer_nest_start(buffer);
545 
546 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
547 					   sizeof(*entry) + fields_size);
548 	if (!entry)
549 		goto out;
550 
551 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
552 		val_idx = var_ref_idx[i];
553 		if (event->fields[i]->is_string) {
554 			char *str_val = (char *)(long)var_ref_vals[val_idx];
555 
556 			len = trace_string(entry, event, str_val,
557 					   event->fields[i]->is_dynamic,
558 					   data_size, &n_u64);
559 			data_size += len; /* only dynamic string increments */
560 		} else if (event->fields[i]->is_stack) {
561 			long *stack = (long *)(long)var_ref_vals[val_idx];
562 
563 			len = trace_stack(entry, event, stack,
564 					   data_size, &n_u64);
565 			data_size += len;
566 		} else {
567 			struct synth_field *field = event->fields[i];
568 			u64 val = var_ref_vals[val_idx];
569 
570 			switch (field->size) {
571 			case 1:
572 				*(u8 *)&entry->fields[n_u64] = (u8)val;
573 				break;
574 
575 			case 2:
576 				*(u16 *)&entry->fields[n_u64] = (u16)val;
577 				break;
578 
579 			case 4:
580 				*(u32 *)&entry->fields[n_u64] = (u32)val;
581 				break;
582 
583 			default:
584 				entry->fields[n_u64] = val;
585 				break;
586 			}
587 			n_u64++;
588 		}
589 	}
590 
591 	trace_event_buffer_commit(&fbuffer);
592 out:
593 	ring_buffer_nest_end(buffer);
594 }
595 
free_synth_event_print_fmt(struct trace_event_call * call)596 static void free_synth_event_print_fmt(struct trace_event_call *call)
597 {
598 	if (call) {
599 		kfree(call->print_fmt);
600 		call->print_fmt = NULL;
601 	}
602 }
603 
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)604 static int __set_synth_event_print_fmt(struct synth_event *event,
605 				       char *buf, int len)
606 {
607 	const char *fmt;
608 	int pos = 0;
609 	int i;
610 
611 	/* When len=0, we just calculate the needed length */
612 #define LEN_OR_ZERO (len ? len - pos : 0)
613 
614 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
615 	for (i = 0; i < event->n_fields; i++) {
616 		fmt = synth_field_fmt(event->fields[i]->type);
617 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
618 				event->fields[i]->name, fmt,
619 				i == event->n_fields - 1 ? "" : ", ");
620 	}
621 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
622 
623 	for (i = 0; i < event->n_fields; i++) {
624 		if (event->fields[i]->is_string &&
625 		    event->fields[i]->is_dynamic)
626 			pos += snprintf(buf + pos, LEN_OR_ZERO,
627 				", __get_str(%s)", event->fields[i]->name);
628 		else if (event->fields[i]->is_stack)
629 			pos += snprintf(buf + pos, LEN_OR_ZERO,
630 				", __get_stacktrace(%s)", event->fields[i]->name);
631 		else
632 			pos += snprintf(buf + pos, LEN_OR_ZERO,
633 					", REC->%s", event->fields[i]->name);
634 	}
635 
636 #undef LEN_OR_ZERO
637 
638 	/* return the length of print_fmt */
639 	return pos;
640 }
641 
set_synth_event_print_fmt(struct trace_event_call * call)642 static int set_synth_event_print_fmt(struct trace_event_call *call)
643 {
644 	struct synth_event *event = call->data;
645 	char *print_fmt;
646 	int len;
647 
648 	/* First: called with 0 length to calculate the needed length */
649 	len = __set_synth_event_print_fmt(event, NULL, 0);
650 
651 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
652 	if (!print_fmt)
653 		return -ENOMEM;
654 
655 	/* Second: actually write the @print_fmt */
656 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
657 	call->print_fmt = print_fmt;
658 
659 	return 0;
660 }
661 
free_synth_field(struct synth_field * field)662 static void free_synth_field(struct synth_field *field)
663 {
664 	kfree(field->type);
665 	kfree(field->name);
666 	kfree(field);
667 }
668 
check_field_version(const char * prefix,const char * field_type,const char * field_name)669 static int check_field_version(const char *prefix, const char *field_type,
670 			       const char *field_name)
671 {
672 	/*
673 	 * For backward compatibility, the old synthetic event command
674 	 * format did not require semicolons, and in order to not
675 	 * break user space, that old format must still work. If a new
676 	 * feature is added, then the format that uses the new feature
677 	 * will be required to have semicolons, as nothing that uses
678 	 * the old format would be using the new, yet to be created,
679 	 * feature. When a new feature is added, this will detect it,
680 	 * and return a number greater than 1, and require the format
681 	 * to use semicolons.
682 	 */
683 	return 1;
684 }
685 
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)686 static struct synth_field *parse_synth_field(int argc, char **argv,
687 					     int *consumed, int *field_version)
688 {
689 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
690 	struct synth_field *field;
691 	int len, ret = -ENOMEM;
692 	struct seq_buf s;
693 	ssize_t size;
694 
695 	if (!strcmp(field_type, "unsigned")) {
696 		if (argc < 3) {
697 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
698 			return ERR_PTR(-EINVAL);
699 		}
700 		prefix = "unsigned ";
701 		field_type = argv[1];
702 		field_name = argv[2];
703 		*consumed += 3;
704 	} else {
705 		field_name = argv[1];
706 		*consumed += 2;
707 	}
708 
709 	if (!field_name) {
710 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
711 		return ERR_PTR(-EINVAL);
712 	}
713 
714 	*field_version = check_field_version(prefix, field_type, field_name);
715 
716 	field = kzalloc(sizeof(*field), GFP_KERNEL);
717 	if (!field)
718 		return ERR_PTR(-ENOMEM);
719 
720 	len = strlen(field_name);
721 	array = strchr(field_name, '[');
722 	if (array)
723 		len -= strlen(array);
724 
725 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
726 	if (!field->name)
727 		goto free;
728 
729 	if (!is_good_name(field->name)) {
730 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
731 		ret = -EINVAL;
732 		goto free;
733 	}
734 
735 	len = strlen(field_type) + 1;
736 
737 	if (array)
738 		len += strlen(array);
739 
740 	if (prefix)
741 		len += strlen(prefix);
742 
743 	field->type = kzalloc(len, GFP_KERNEL);
744 	if (!field->type)
745 		goto free;
746 
747 	seq_buf_init(&s, field->type, len);
748 	if (prefix)
749 		seq_buf_puts(&s, prefix);
750 	seq_buf_puts(&s, field_type);
751 	if (array)
752 		seq_buf_puts(&s, array);
753 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
754 		goto free;
755 
756 	s.buffer[s.len] = '\0';
757 
758 	size = synth_field_size(field->type);
759 	if (size < 0) {
760 		if (array)
761 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
762 		else
763 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
764 		ret = -EINVAL;
765 		goto free;
766 	} else if (size == 0) {
767 		if (synth_field_is_string(field->type) ||
768 		    synth_field_is_stack(field->type)) {
769 			char *type;
770 
771 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
772 			type = kzalloc(len, GFP_KERNEL);
773 			if (!type)
774 				goto free;
775 
776 			seq_buf_init(&s, type, len);
777 			seq_buf_puts(&s, "__data_loc ");
778 			seq_buf_puts(&s, field->type);
779 
780 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
781 				goto free;
782 			s.buffer[s.len] = '\0';
783 
784 			kfree(field->type);
785 			field->type = type;
786 
787 			field->is_dynamic = true;
788 			size = sizeof(u64);
789 		} else {
790 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
791 			ret = -EINVAL;
792 			goto free;
793 		}
794 	}
795 	field->size = size;
796 
797 	if (synth_field_is_string(field->type))
798 		field->is_string = true;
799 	else if (synth_field_is_stack(field->type))
800 		field->is_stack = true;
801 
802 	field->is_signed = synth_field_signed(field->type);
803  out:
804 	return field;
805  free:
806 	free_synth_field(field);
807 	field = ERR_PTR(ret);
808 	goto out;
809 }
810 
free_synth_tracepoint(struct tracepoint * tp)811 static void free_synth_tracepoint(struct tracepoint *tp)
812 {
813 	if (!tp)
814 		return;
815 
816 	kfree(tp->name);
817 	kfree(tp);
818 }
819 
alloc_synth_tracepoint(char * name)820 static struct tracepoint *alloc_synth_tracepoint(char *name)
821 {
822 	struct tracepoint *tp;
823 
824 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
825 	if (!tp)
826 		return ERR_PTR(-ENOMEM);
827 
828 	tp->name = kstrdup(name, GFP_KERNEL);
829 	if (!tp->name) {
830 		kfree(tp);
831 		return ERR_PTR(-ENOMEM);
832 	}
833 
834 	return tp;
835 }
836 
find_synth_event(const char * name)837 struct synth_event *find_synth_event(const char *name)
838 {
839 	struct dyn_event *pos;
840 	struct synth_event *event;
841 
842 	for_each_dyn_event(pos) {
843 		if (!is_synth_event(pos))
844 			continue;
845 		event = to_synth_event(pos);
846 		if (strcmp(event->name, name) == 0)
847 			return event;
848 	}
849 
850 	return NULL;
851 }
852 
853 static struct trace_event_fields synth_event_fields_array[] = {
854 	{ .type = TRACE_FUNCTION_TYPE,
855 	  .define_fields = synth_event_define_fields },
856 	{}
857 };
858 
register_synth_event(struct synth_event * event)859 static int register_synth_event(struct synth_event *event)
860 {
861 	struct trace_event_call *call = &event->call;
862 	int ret = 0;
863 
864 	event->call.class = &event->class;
865 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
866 	if (!event->class.system) {
867 		ret = -ENOMEM;
868 		goto out;
869 	}
870 
871 	event->tp = alloc_synth_tracepoint(event->name);
872 	if (IS_ERR(event->tp)) {
873 		ret = PTR_ERR(event->tp);
874 		event->tp = NULL;
875 		goto out;
876 	}
877 
878 	INIT_LIST_HEAD(&call->class->fields);
879 	call->event.funcs = &synth_event_funcs;
880 	call->class->fields_array = synth_event_fields_array;
881 
882 	ret = register_trace_event(&call->event);
883 	if (!ret) {
884 		ret = -ENODEV;
885 		goto out;
886 	}
887 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
888 	call->class->reg = trace_event_reg;
889 	call->class->probe = trace_event_raw_event_synth;
890 	call->data = event;
891 	call->tp = event->tp;
892 
893 	ret = trace_add_event_call(call);
894 	if (ret) {
895 		pr_warn("Failed to register synthetic event: %s\n",
896 			trace_event_name(call));
897 		goto err;
898 	}
899 
900 	ret = set_synth_event_print_fmt(call);
901 	/* unregister_trace_event() will be called inside */
902 	if (ret < 0)
903 		trace_remove_event_call(call);
904  out:
905 	return ret;
906  err:
907 	unregister_trace_event(&call->event);
908 	goto out;
909 }
910 
unregister_synth_event(struct synth_event * event)911 static int unregister_synth_event(struct synth_event *event)
912 {
913 	struct trace_event_call *call = &event->call;
914 	int ret;
915 
916 	ret = trace_remove_event_call(call);
917 
918 	return ret;
919 }
920 
free_synth_event(struct synth_event * event)921 static void free_synth_event(struct synth_event *event)
922 {
923 	unsigned int i;
924 
925 	if (!event)
926 		return;
927 
928 	for (i = 0; i < event->n_fields; i++)
929 		free_synth_field(event->fields[i]);
930 
931 	kfree(event->fields);
932 	kfree(event->dynamic_fields);
933 	kfree(event->name);
934 	kfree(event->class.system);
935 	free_synth_tracepoint(event->tp);
936 	free_synth_event_print_fmt(&event->call);
937 	kfree(event);
938 }
939 
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)940 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
941 					     struct synth_field **fields)
942 {
943 	unsigned int i, j, n_dynamic_fields = 0;
944 	struct synth_event *event;
945 
946 	event = kzalloc(sizeof(*event), GFP_KERNEL);
947 	if (!event) {
948 		event = ERR_PTR(-ENOMEM);
949 		goto out;
950 	}
951 
952 	event->name = kstrdup(name, GFP_KERNEL);
953 	if (!event->name) {
954 		kfree(event);
955 		event = ERR_PTR(-ENOMEM);
956 		goto out;
957 	}
958 
959 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
960 	if (!event->fields) {
961 		free_synth_event(event);
962 		event = ERR_PTR(-ENOMEM);
963 		goto out;
964 	}
965 
966 	for (i = 0; i < n_fields; i++)
967 		if (fields[i]->is_dynamic)
968 			n_dynamic_fields++;
969 
970 	if (n_dynamic_fields) {
971 		event->dynamic_fields = kcalloc(n_dynamic_fields,
972 						sizeof(*event->dynamic_fields),
973 						GFP_KERNEL);
974 		if (!event->dynamic_fields) {
975 			free_synth_event(event);
976 			event = ERR_PTR(-ENOMEM);
977 			goto out;
978 		}
979 	}
980 
981 	dyn_event_init(&event->devent, &synth_event_ops);
982 
983 	for (i = 0, j = 0; i < n_fields; i++) {
984 		fields[i]->field_pos = i;
985 		event->fields[i] = fields[i];
986 
987 		if (fields[i]->is_dynamic)
988 			event->dynamic_fields[j++] = fields[i];
989 	}
990 	event->n_dynamic_fields = j;
991 	event->n_fields = n_fields;
992  out:
993 	return event;
994 }
995 
synth_event_check_arg_fn(void * data)996 static int synth_event_check_arg_fn(void *data)
997 {
998 	struct dynevent_arg_pair *arg_pair = data;
999 	int size;
1000 
1001 	size = synth_field_size((char *)arg_pair->lhs);
1002 	if (size == 0) {
1003 		if (strstr((char *)arg_pair->lhs, "["))
1004 			return 0;
1005 	}
1006 
1007 	return size ? 0 : -EINVAL;
1008 }
1009 
1010 /**
1011  * synth_event_add_field - Add a new field to a synthetic event cmd
1012  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1013  * @type: The type of the new field to add
1014  * @name: The name of the new field to add
1015  *
1016  * Add a new field to a synthetic event cmd object.  Field ordering is in
1017  * the same order the fields are added.
1018  *
1019  * See synth_field_size() for available types. If field_name contains
1020  * [n] the field is considered to be an array.
1021  *
1022  * Return: 0 if successful, error otherwise.
1023  */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)1024 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1025 			  const char *name)
1026 {
1027 	struct dynevent_arg_pair arg_pair;
1028 	int ret;
1029 
1030 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1031 		return -EINVAL;
1032 
1033 	if (!type || !name)
1034 		return -EINVAL;
1035 
1036 	dynevent_arg_pair_init(&arg_pair, 0, ';');
1037 
1038 	arg_pair.lhs = type;
1039 	arg_pair.rhs = name;
1040 
1041 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1042 	if (ret)
1043 		return ret;
1044 
1045 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1046 		ret = -EINVAL;
1047 
1048 	return ret;
1049 }
1050 EXPORT_SYMBOL_GPL(synth_event_add_field);
1051 
1052 /**
1053  * synth_event_add_field_str - Add a new field to a synthetic event cmd
1054  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1055  * @type_name: The type and name of the new field to add, as a single string
1056  *
1057  * Add a new field to a synthetic event cmd object, as a single
1058  * string.  The @type_name string is expected to be of the form 'type
1059  * name', which will be appended by ';'.  No sanity checking is done -
1060  * what's passed in is assumed to already be well-formed.  Field
1061  * ordering is in the same order the fields are added.
1062  *
1063  * See synth_field_size() for available types. If field_name contains
1064  * [n] the field is considered to be an array.
1065  *
1066  * Return: 0 if successful, error otherwise.
1067  */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)1068 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1069 {
1070 	struct dynevent_arg arg;
1071 	int ret;
1072 
1073 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1074 		return -EINVAL;
1075 
1076 	if (!type_name)
1077 		return -EINVAL;
1078 
1079 	dynevent_arg_init(&arg, ';');
1080 
1081 	arg.str = type_name;
1082 
1083 	ret = dynevent_arg_add(cmd, &arg, NULL);
1084 	if (ret)
1085 		return ret;
1086 
1087 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1088 		ret = -EINVAL;
1089 
1090 	return ret;
1091 }
1092 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1093 
1094 /**
1095  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1096  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1097  * @fields: An array of type/name field descriptions
1098  * @n_fields: The number of field descriptions contained in the fields array
1099  *
1100  * Add a new set of fields to a synthetic event cmd object.  The event
1101  * fields that will be defined for the event should be passed in as an
1102  * array of struct synth_field_desc, and the number of elements in the
1103  * array passed in as n_fields.  Field ordering will retain the
1104  * ordering given in the fields array.
1105  *
1106  * See synth_field_size() for available types. If field_name contains
1107  * [n] the field is considered to be an array.
1108  *
1109  * Return: 0 if successful, error otherwise.
1110  */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1111 int synth_event_add_fields(struct dynevent_cmd *cmd,
1112 			   struct synth_field_desc *fields,
1113 			   unsigned int n_fields)
1114 {
1115 	unsigned int i;
1116 	int ret = 0;
1117 
1118 	for (i = 0; i < n_fields; i++) {
1119 		if (fields[i].type == NULL || fields[i].name == NULL) {
1120 			ret = -EINVAL;
1121 			break;
1122 		}
1123 
1124 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1125 		if (ret)
1126 			break;
1127 	}
1128 
1129 	return ret;
1130 }
1131 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1132 
1133 /**
1134  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1135  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1136  * @name: The name of the synthetic event
1137  * @mod: The module creating the event, NULL if not created from a module
1138  * @args: Variable number of arg (pairs), one pair for each field
1139  *
1140  * NOTE: Users normally won't want to call this function directly, but
1141  * rather use the synth_event_gen_cmd_start() wrapper, which
1142  * automatically adds a NULL to the end of the arg list.  If this
1143  * function is used directly, make sure the last arg in the variable
1144  * arg list is NULL.
1145  *
1146  * Generate a synthetic event command to be executed by
1147  * synth_event_gen_cmd_end().  This function can be used to generate
1148  * the complete command or only the first part of it; in the latter
1149  * case, synth_event_add_field(), synth_event_add_field_str(), or
1150  * synth_event_add_fields() can be used to add more fields following
1151  * this.
1152  *
1153  * There should be an even number variable args, each pair consisting
1154  * of a type followed by a field name.
1155  *
1156  * See synth_field_size() for available types. If field_name contains
1157  * [n] the field is considered to be an array.
1158  *
1159  * Return: 0 if successful, error otherwise.
1160  */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1161 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1162 				struct module *mod, ...)
1163 {
1164 	struct dynevent_arg arg;
1165 	va_list args;
1166 	int ret;
1167 
1168 	cmd->event_name = name;
1169 	cmd->private_data = mod;
1170 
1171 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1172 		return -EINVAL;
1173 
1174 	dynevent_arg_init(&arg, 0);
1175 	arg.str = name;
1176 	ret = dynevent_arg_add(cmd, &arg, NULL);
1177 	if (ret)
1178 		return ret;
1179 
1180 	va_start(args, mod);
1181 	for (;;) {
1182 		const char *type, *name;
1183 
1184 		type = va_arg(args, const char *);
1185 		if (!type)
1186 			break;
1187 		name = va_arg(args, const char *);
1188 		if (!name)
1189 			break;
1190 
1191 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1192 			ret = -EINVAL;
1193 			break;
1194 		}
1195 
1196 		ret = synth_event_add_field(cmd, type, name);
1197 		if (ret)
1198 			break;
1199 	}
1200 	va_end(args);
1201 
1202 	return ret;
1203 }
1204 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1205 
1206 /**
1207  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1208  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1209  * @name: The name of the synthetic event
1210  * @fields: An array of type/name field descriptions
1211  * @n_fields: The number of field descriptions contained in the fields array
1212  *
1213  * Generate a synthetic event command to be executed by
1214  * synth_event_gen_cmd_end().  This function can be used to generate
1215  * the complete command or only the first part of it; in the latter
1216  * case, synth_event_add_field(), synth_event_add_field_str(), or
1217  * synth_event_add_fields() can be used to add more fields following
1218  * this.
1219  *
1220  * The event fields that will be defined for the event should be
1221  * passed in as an array of struct synth_field_desc, and the number of
1222  * elements in the array passed in as n_fields.  Field ordering will
1223  * retain the ordering given in the fields array.
1224  *
1225  * See synth_field_size() for available types. If field_name contains
1226  * [n] the field is considered to be an array.
1227  *
1228  * Return: 0 if successful, error otherwise.
1229  */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1230 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1231 				    struct module *mod,
1232 				    struct synth_field_desc *fields,
1233 				    unsigned int n_fields)
1234 {
1235 	struct dynevent_arg arg;
1236 	unsigned int i;
1237 	int ret = 0;
1238 
1239 	cmd->event_name = name;
1240 	cmd->private_data = mod;
1241 
1242 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1243 		return -EINVAL;
1244 
1245 	if (n_fields > SYNTH_FIELDS_MAX)
1246 		return -EINVAL;
1247 
1248 	dynevent_arg_init(&arg, 0);
1249 	arg.str = name;
1250 	ret = dynevent_arg_add(cmd, &arg, NULL);
1251 	if (ret)
1252 		return ret;
1253 
1254 	for (i = 0; i < n_fields; i++) {
1255 		if (fields[i].type == NULL || fields[i].name == NULL)
1256 			return -EINVAL;
1257 
1258 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1259 		if (ret)
1260 			break;
1261 	}
1262 
1263 	return ret;
1264 }
1265 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1266 
__create_synth_event(const char * name,const char * raw_fields)1267 static int __create_synth_event(const char *name, const char *raw_fields)
1268 {
1269 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1270 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1271 	int consumed, cmd_version = 1, n_fields_this_loop;
1272 	int i, argc, n_fields = 0, ret = 0;
1273 	struct synth_event *event = NULL;
1274 
1275 	/*
1276 	 * Argument syntax:
1277 	 *  - Add synthetic event: <event_name> field[;field] ...
1278 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1279 	 *      where 'field' = type field_name
1280 	 */
1281 
1282 	if (name[0] == '\0') {
1283 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1284 		return -EINVAL;
1285 	}
1286 
1287 	if (!is_good_name(name)) {
1288 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1289 		return -EINVAL;
1290 	}
1291 
1292 	mutex_lock(&event_mutex);
1293 
1294 	event = find_synth_event(name);
1295 	if (event) {
1296 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1297 		ret = -EEXIST;
1298 		goto err;
1299 	}
1300 
1301 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1302 	if (!tmp_fields) {
1303 		ret = -ENOMEM;
1304 		goto err;
1305 	}
1306 
1307 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1308 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1309 		if (!argv) {
1310 			ret = -ENOMEM;
1311 			goto err;
1312 		}
1313 
1314 		if (!argc) {
1315 			argv_free(argv);
1316 			continue;
1317 		}
1318 
1319 		n_fields_this_loop = 0;
1320 		consumed = 0;
1321 		while (argc > consumed) {
1322 			int field_version;
1323 
1324 			field = parse_synth_field(argc - consumed,
1325 						  argv + consumed, &consumed,
1326 						  &field_version);
1327 			if (IS_ERR(field)) {
1328 				argv_free(argv);
1329 				ret = PTR_ERR(field);
1330 				goto err;
1331 			}
1332 
1333 			/*
1334 			 * Track the highest version of any field we
1335 			 * found in the command.
1336 			 */
1337 			if (field_version > cmd_version)
1338 				cmd_version = field_version;
1339 
1340 			/*
1341 			 * Now sort out what is and isn't valid for
1342 			 * each supported version.
1343 			 *
1344 			 * If we see more than 1 field per loop, it
1345 			 * means we have multiple fields between
1346 			 * semicolons, and that's something we no
1347 			 * longer support in a version 2 or greater
1348 			 * command.
1349 			 */
1350 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1351 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1352 				ret = -EINVAL;
1353 				goto err;
1354 			}
1355 
1356 			if (n_fields == SYNTH_FIELDS_MAX) {
1357 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1358 				ret = -EINVAL;
1359 				goto err;
1360 			}
1361 			fields[n_fields++] = field;
1362 
1363 			n_fields_this_loop++;
1364 		}
1365 
1366 		if (consumed < argc) {
1367 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1368 			ret = -EINVAL;
1369 			goto err;
1370 		}
1371 
1372 		argv_free(argv);
1373 	}
1374 
1375 	if (n_fields == 0) {
1376 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1377 		ret = -EINVAL;
1378 		goto err;
1379 	}
1380 
1381 	event = alloc_synth_event(name, n_fields, fields);
1382 	if (IS_ERR(event)) {
1383 		ret = PTR_ERR(event);
1384 		event = NULL;
1385 		goto err;
1386 	}
1387 	ret = register_synth_event(event);
1388 	if (!ret)
1389 		dyn_event_add(&event->devent, &event->call);
1390 	else
1391 		free_synth_event(event);
1392  out:
1393 	mutex_unlock(&event_mutex);
1394 
1395 	kfree(saved_fields);
1396 
1397 	return ret;
1398  err:
1399 	for (i = 0; i < n_fields; i++)
1400 		free_synth_field(fields[i]);
1401 
1402 	goto out;
1403 }
1404 
1405 /**
1406  * synth_event_create - Create a new synthetic event
1407  * @name: The name of the new synthetic event
1408  * @fields: An array of type/name field descriptions
1409  * @n_fields: The number of field descriptions contained in the fields array
1410  * @mod: The module creating the event, NULL if not created from a module
1411  *
1412  * Create a new synthetic event with the given name under the
1413  * trace/events/synthetic/ directory.  The event fields that will be
1414  * defined for the event should be passed in as an array of struct
1415  * synth_field_desc, and the number elements in the array passed in as
1416  * n_fields. Field ordering will retain the ordering given in the
1417  * fields array.
1418  *
1419  * If the new synthetic event is being created from a module, the mod
1420  * param must be non-NULL.  This will ensure that the trace buffer
1421  * won't contain unreadable events.
1422  *
1423  * The new synth event should be deleted using synth_event_delete()
1424  * function.  The new synthetic event can be generated from modules or
1425  * other kernel code using trace_synth_event() and related functions.
1426  *
1427  * Return: 0 if successful, error otherwise.
1428  */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1429 int synth_event_create(const char *name, struct synth_field_desc *fields,
1430 		       unsigned int n_fields, struct module *mod)
1431 {
1432 	struct dynevent_cmd cmd;
1433 	char *buf;
1434 	int ret;
1435 
1436 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1437 	if (!buf)
1438 		return -ENOMEM;
1439 
1440 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1441 
1442 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1443 					      fields, n_fields);
1444 	if (ret)
1445 		goto out;
1446 
1447 	ret = synth_event_gen_cmd_end(&cmd);
1448  out:
1449 	kfree(buf);
1450 
1451 	return ret;
1452 }
1453 EXPORT_SYMBOL_GPL(synth_event_create);
1454 
destroy_synth_event(struct synth_event * se)1455 static int destroy_synth_event(struct synth_event *se)
1456 {
1457 	int ret;
1458 
1459 	if (se->ref)
1460 		return -EBUSY;
1461 
1462 	if (trace_event_dyn_busy(&se->call))
1463 		return -EBUSY;
1464 
1465 	ret = unregister_synth_event(se);
1466 	if (!ret) {
1467 		dyn_event_remove(&se->devent);
1468 		free_synth_event(se);
1469 	}
1470 
1471 	return ret;
1472 }
1473 
1474 /**
1475  * synth_event_delete - Delete a synthetic event
1476  * @event_name: The name of the new synthetic event
1477  *
1478  * Delete a synthetic event that was created with synth_event_create().
1479  *
1480  * Return: 0 if successful, error otherwise.
1481  */
synth_event_delete(const char * event_name)1482 int synth_event_delete(const char *event_name)
1483 {
1484 	struct synth_event *se = NULL;
1485 	struct module *mod = NULL;
1486 	int ret = -ENOENT;
1487 
1488 	mutex_lock(&event_mutex);
1489 	se = find_synth_event(event_name);
1490 	if (se) {
1491 		mod = se->mod;
1492 		ret = destroy_synth_event(se);
1493 	}
1494 	mutex_unlock(&event_mutex);
1495 
1496 	if (mod) {
1497 		/*
1498 		 * It is safest to reset the ring buffer if the module
1499 		 * being unloaded registered any events that were
1500 		 * used. The only worry is if a new module gets
1501 		 * loaded, and takes on the same id as the events of
1502 		 * this module. When printing out the buffer, traced
1503 		 * events left over from this module may be passed to
1504 		 * the new module events and unexpected results may
1505 		 * occur.
1506 		 */
1507 		tracing_reset_all_online_cpus();
1508 	}
1509 
1510 	return ret;
1511 }
1512 EXPORT_SYMBOL_GPL(synth_event_delete);
1513 
check_command(const char * raw_command)1514 static int check_command(const char *raw_command)
1515 {
1516 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1517 	int argc, ret = 0;
1518 
1519 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1520 	if (!cmd)
1521 		return -ENOMEM;
1522 
1523 	name_and_field = strsep(&cmd, ";");
1524 	if (!name_and_field) {
1525 		ret = -EINVAL;
1526 		goto free;
1527 	}
1528 
1529 	if (name_and_field[0] == '!')
1530 		goto free;
1531 
1532 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1533 	if (!argv) {
1534 		ret = -ENOMEM;
1535 		goto free;
1536 	}
1537 	argv_free(argv);
1538 
1539 	if (argc < 3)
1540 		ret = -EINVAL;
1541 free:
1542 	kfree(saved_cmd);
1543 
1544 	return ret;
1545 }
1546 
create_or_delete_synth_event(const char * raw_command)1547 static int create_or_delete_synth_event(const char *raw_command)
1548 {
1549 	char *name = NULL, *fields, *p;
1550 	int ret = 0;
1551 
1552 	raw_command = skip_spaces(raw_command);
1553 	if (raw_command[0] == '\0')
1554 		return ret;
1555 
1556 	last_cmd_set(raw_command);
1557 
1558 	ret = check_command(raw_command);
1559 	if (ret) {
1560 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1561 		return ret;
1562 	}
1563 
1564 	p = strpbrk(raw_command, " \t");
1565 	if (!p && raw_command[0] != '!') {
1566 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1567 		ret = -EINVAL;
1568 		goto free;
1569 	}
1570 
1571 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1572 	if (!name)
1573 		return -ENOMEM;
1574 
1575 	if (name[0] == '!') {
1576 		ret = synth_event_delete(name + 1);
1577 		goto free;
1578 	}
1579 
1580 	fields = skip_spaces(p);
1581 
1582 	ret = __create_synth_event(name, fields);
1583 free:
1584 	kfree(name);
1585 
1586 	return ret;
1587 }
1588 
synth_event_run_command(struct dynevent_cmd * cmd)1589 static int synth_event_run_command(struct dynevent_cmd *cmd)
1590 {
1591 	struct synth_event *se;
1592 	int ret;
1593 
1594 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1595 	if (ret)
1596 		return ret;
1597 
1598 	se = find_synth_event(cmd->event_name);
1599 	if (WARN_ON(!se))
1600 		return -ENOENT;
1601 
1602 	se->mod = cmd->private_data;
1603 
1604 	return ret;
1605 }
1606 
1607 /**
1608  * synth_event_cmd_init - Initialize a synthetic event command object
1609  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1610  * @buf: A pointer to the buffer used to build the command
1611  * @maxlen: The length of the buffer passed in @buf
1612  *
1613  * Initialize a synthetic event command object.  Use this before
1614  * calling any of the other dyenvent_cmd functions.
1615  */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1616 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1617 {
1618 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1619 			  synth_event_run_command);
1620 }
1621 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1622 
1623 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1624 __synth_event_trace_init(struct trace_event_file *file,
1625 			 struct synth_event_trace_state *trace_state)
1626 {
1627 	int ret = 0;
1628 
1629 	memset(trace_state, '\0', sizeof(*trace_state));
1630 
1631 	/*
1632 	 * Normal event tracing doesn't get called at all unless the
1633 	 * ENABLED bit is set (which attaches the probe thus allowing
1634 	 * this code to be called, etc).  Because this is called
1635 	 * directly by the user, we don't have that but we still need
1636 	 * to honor not logging when disabled.  For the iterated
1637 	 * trace case, we save the enabled state upon start and just
1638 	 * ignore the following data calls.
1639 	 */
1640 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1641 	    trace_trigger_soft_disabled(file)) {
1642 		trace_state->disabled = true;
1643 		ret = -ENOENT;
1644 		goto out;
1645 	}
1646 
1647 	trace_state->event = file->event_call->data;
1648 out:
1649 	return ret;
1650 }
1651 
1652 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1653 __synth_event_trace_start(struct trace_event_file *file,
1654 			  struct synth_event_trace_state *trace_state,
1655 			  int dynamic_fields_size)
1656 {
1657 	int entry_size, fields_size = 0;
1658 	int ret = 0;
1659 
1660 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1661 	fields_size += dynamic_fields_size;
1662 
1663 	/*
1664 	 * Avoid ring buffer recursion detection, as this event
1665 	 * is being performed within another event.
1666 	 */
1667 	trace_state->buffer = file->tr->array_buffer.buffer;
1668 	ring_buffer_nest_start(trace_state->buffer);
1669 
1670 	entry_size = sizeof(*trace_state->entry) + fields_size;
1671 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1672 							file,
1673 							entry_size);
1674 	if (!trace_state->entry) {
1675 		ring_buffer_nest_end(trace_state->buffer);
1676 		ret = -EINVAL;
1677 	}
1678 
1679 	return ret;
1680 }
1681 
1682 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1683 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1684 {
1685 	trace_event_buffer_commit(&trace_state->fbuffer);
1686 
1687 	ring_buffer_nest_end(trace_state->buffer);
1688 }
1689 
1690 /**
1691  * synth_event_trace - Trace a synthetic event
1692  * @file: The trace_event_file representing the synthetic event
1693  * @n_vals: The number of values in vals
1694  * @args: Variable number of args containing the event values
1695  *
1696  * Trace a synthetic event using the values passed in the variable
1697  * argument list.
1698  *
1699  * The argument list should be a list 'n_vals' u64 values.  The number
1700  * of vals must match the number of field in the synthetic event, and
1701  * must be in the same order as the synthetic event fields.
1702  *
1703  * All vals should be cast to u64, and string vals are just pointers
1704  * to strings, cast to u64.  Strings will be copied into space
1705  * reserved in the event for the string, using these pointers.
1706  *
1707  * Return: 0 on success, err otherwise.
1708  */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1709 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1710 {
1711 	unsigned int i, n_u64, len, data_size = 0;
1712 	struct synth_event_trace_state state;
1713 	va_list args;
1714 	int ret;
1715 
1716 	ret = __synth_event_trace_init(file, &state);
1717 	if (ret) {
1718 		if (ret == -ENOENT)
1719 			ret = 0; /* just disabled, not really an error */
1720 		return ret;
1721 	}
1722 
1723 	if (state.event->n_dynamic_fields) {
1724 		va_start(args, n_vals);
1725 
1726 		for (i = 0; i < state.event->n_fields; i++) {
1727 			u64 val = va_arg(args, u64);
1728 
1729 			if (state.event->fields[i]->is_string &&
1730 			    state.event->fields[i]->is_dynamic) {
1731 				char *str_val = (char *)(long)val;
1732 
1733 				data_size += strlen(str_val) + 1;
1734 			}
1735 		}
1736 
1737 		va_end(args);
1738 	}
1739 
1740 	ret = __synth_event_trace_start(file, &state, data_size);
1741 	if (ret)
1742 		return ret;
1743 
1744 	if (n_vals != state.event->n_fields) {
1745 		ret = -EINVAL;
1746 		goto out;
1747 	}
1748 
1749 	data_size = 0;
1750 
1751 	va_start(args, n_vals);
1752 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1753 		u64 val;
1754 
1755 		val = va_arg(args, u64);
1756 
1757 		if (state.event->fields[i]->is_string) {
1758 			char *str_val = (char *)(long)val;
1759 
1760 			len = trace_string(state.entry, state.event, str_val,
1761 					   state.event->fields[i]->is_dynamic,
1762 					   data_size, &n_u64);
1763 			data_size += len; /* only dynamic string increments */
1764 		} else {
1765 			struct synth_field *field = state.event->fields[i];
1766 
1767 			switch (field->size) {
1768 			case 1:
1769 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1770 				break;
1771 
1772 			case 2:
1773 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1774 				break;
1775 
1776 			case 4:
1777 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1778 				break;
1779 
1780 			default:
1781 				state.entry->fields[n_u64] = val;
1782 				break;
1783 			}
1784 			n_u64++;
1785 		}
1786 	}
1787 	va_end(args);
1788 out:
1789 	__synth_event_trace_end(&state);
1790 
1791 	return ret;
1792 }
1793 EXPORT_SYMBOL_GPL(synth_event_trace);
1794 
1795 /**
1796  * synth_event_trace_array - Trace a synthetic event from an array
1797  * @file: The trace_event_file representing the synthetic event
1798  * @vals: Array of values
1799  * @n_vals: The number of values in vals
1800  *
1801  * Trace a synthetic event using the values passed in as 'vals'.
1802  *
1803  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1804  * vals must match the number of field in the synthetic event, and
1805  * must be in the same order as the synthetic event fields.
1806  *
1807  * All vals should be cast to u64, and string vals are just pointers
1808  * to strings, cast to u64.  Strings will be copied into space
1809  * reserved in the event for the string, using these pointers.
1810  *
1811  * Return: 0 on success, err otherwise.
1812  */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1813 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1814 			    unsigned int n_vals)
1815 {
1816 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1817 	struct synth_event_trace_state state;
1818 	char *str_val;
1819 	int ret;
1820 
1821 	ret = __synth_event_trace_init(file, &state);
1822 	if (ret) {
1823 		if (ret == -ENOENT)
1824 			ret = 0; /* just disabled, not really an error */
1825 		return ret;
1826 	}
1827 
1828 	if (state.event->n_dynamic_fields) {
1829 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1830 			field_pos = state.event->dynamic_fields[i]->field_pos;
1831 			str_val = (char *)(long)vals[field_pos];
1832 			len = strlen(str_val) + 1;
1833 			data_size += len;
1834 		}
1835 	}
1836 
1837 	ret = __synth_event_trace_start(file, &state, data_size);
1838 	if (ret)
1839 		return ret;
1840 
1841 	if (n_vals != state.event->n_fields) {
1842 		ret = -EINVAL;
1843 		goto out;
1844 	}
1845 
1846 	data_size = 0;
1847 
1848 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1849 		if (state.event->fields[i]->is_string) {
1850 			char *str_val = (char *)(long)vals[i];
1851 
1852 			len = trace_string(state.entry, state.event, str_val,
1853 					   state.event->fields[i]->is_dynamic,
1854 					   data_size, &n_u64);
1855 			data_size += len; /* only dynamic string increments */
1856 		} else {
1857 			struct synth_field *field = state.event->fields[i];
1858 			u64 val = vals[i];
1859 
1860 			switch (field->size) {
1861 			case 1:
1862 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1863 				break;
1864 
1865 			case 2:
1866 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1867 				break;
1868 
1869 			case 4:
1870 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1871 				break;
1872 
1873 			default:
1874 				state.entry->fields[n_u64] = val;
1875 				break;
1876 			}
1877 			n_u64++;
1878 		}
1879 	}
1880 out:
1881 	__synth_event_trace_end(&state);
1882 
1883 	return ret;
1884 }
1885 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1886 
1887 /**
1888  * synth_event_trace_start - Start piecewise synthetic event trace
1889  * @file: The trace_event_file representing the synthetic event
1890  * @trace_state: A pointer to object tracking the piecewise trace state
1891  *
1892  * Start the trace of a synthetic event field-by-field rather than all
1893  * at once.
1894  *
1895  * This function 'opens' an event trace, which means space is reserved
1896  * for the event in the trace buffer, after which the event's
1897  * individual field values can be set through either
1898  * synth_event_add_next_val() or synth_event_add_val().
1899  *
1900  * A pointer to a trace_state object is passed in, which will keep
1901  * track of the current event trace state until the event trace is
1902  * closed (and the event finally traced) using
1903  * synth_event_trace_end().
1904  *
1905  * Note that synth_event_trace_end() must be called after all values
1906  * have been added for each event trace, regardless of whether adding
1907  * all field values succeeded or not.
1908  *
1909  * Note also that for a given event trace, all fields must be added
1910  * using either synth_event_add_next_val() or synth_event_add_val()
1911  * but not both together or interleaved.
1912  *
1913  * Return: 0 on success, err otherwise.
1914  */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1915 int synth_event_trace_start(struct trace_event_file *file,
1916 			    struct synth_event_trace_state *trace_state)
1917 {
1918 	int ret;
1919 
1920 	if (!trace_state)
1921 		return -EINVAL;
1922 
1923 	ret = __synth_event_trace_init(file, trace_state);
1924 	if (ret) {
1925 		if (ret == -ENOENT)
1926 			ret = 0; /* just disabled, not really an error */
1927 		return ret;
1928 	}
1929 
1930 	if (trace_state->event->n_dynamic_fields)
1931 		return -ENOTSUPP;
1932 
1933 	ret = __synth_event_trace_start(file, trace_state, 0);
1934 
1935 	return ret;
1936 }
1937 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1938 
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1939 static int __synth_event_add_val(const char *field_name, u64 val,
1940 				 struct synth_event_trace_state *trace_state)
1941 {
1942 	struct synth_field *field = NULL;
1943 	struct synth_trace_event *entry;
1944 	struct synth_event *event;
1945 	int i, ret = 0;
1946 
1947 	if (!trace_state) {
1948 		ret = -EINVAL;
1949 		goto out;
1950 	}
1951 
1952 	/* can't mix add_next_synth_val() with add_synth_val() */
1953 	if (field_name) {
1954 		if (trace_state->add_next) {
1955 			ret = -EINVAL;
1956 			goto out;
1957 		}
1958 		trace_state->add_name = true;
1959 	} else {
1960 		if (trace_state->add_name) {
1961 			ret = -EINVAL;
1962 			goto out;
1963 		}
1964 		trace_state->add_next = true;
1965 	}
1966 
1967 	if (trace_state->disabled)
1968 		goto out;
1969 
1970 	event = trace_state->event;
1971 	if (trace_state->add_name) {
1972 		for (i = 0; i < event->n_fields; i++) {
1973 			field = event->fields[i];
1974 			if (strcmp(field->name, field_name) == 0)
1975 				break;
1976 		}
1977 		if (!field) {
1978 			ret = -EINVAL;
1979 			goto out;
1980 		}
1981 	} else {
1982 		if (trace_state->cur_field >= event->n_fields) {
1983 			ret = -EINVAL;
1984 			goto out;
1985 		}
1986 		field = event->fields[trace_state->cur_field++];
1987 	}
1988 
1989 	entry = trace_state->entry;
1990 	if (field->is_string) {
1991 		char *str_val = (char *)(long)val;
1992 		char *str_field;
1993 
1994 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
1995 			ret = -EINVAL;
1996 			goto out;
1997 		}
1998 
1999 		if (!str_val) {
2000 			ret = -EINVAL;
2001 			goto out;
2002 		}
2003 
2004 		str_field = (char *)&entry->fields[field->offset];
2005 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2006 	} else {
2007 		switch (field->size) {
2008 		case 1:
2009 			*(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
2010 			break;
2011 
2012 		case 2:
2013 			*(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
2014 			break;
2015 
2016 		case 4:
2017 			*(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
2018 			break;
2019 
2020 		default:
2021 			trace_state->entry->fields[field->offset] = val;
2022 			break;
2023 		}
2024 	}
2025  out:
2026 	return ret;
2027 }
2028 
2029 /**
2030  * synth_event_add_next_val - Add the next field's value to an open synth trace
2031  * @val: The value to set the next field to
2032  * @trace_state: A pointer to object tracking the piecewise trace state
2033  *
2034  * Set the value of the next field in an event that's been opened by
2035  * synth_event_trace_start().
2036  *
2037  * The val param should be the value cast to u64.  If the value points
2038  * to a string, the val param should be a char * cast to u64.
2039  *
2040  * This function assumes all the fields in an event are to be set one
2041  * after another - successive calls to this function are made, one for
2042  * each field, in the order of the fields in the event, until all
2043  * fields have been set.  If you'd rather set each field individually
2044  * without regard to ordering, synth_event_add_val() can be used
2045  * instead.
2046  *
2047  * Note however that synth_event_add_next_val() and
2048  * synth_event_add_val() can't be intermixed for a given event trace -
2049  * one or the other but not both can be used at the same time.
2050  *
2051  * Note also that synth_event_trace_end() must be called after all
2052  * values have been added for each event trace, regardless of whether
2053  * adding all field values succeeded or not.
2054  *
2055  * Return: 0 on success, err otherwise.
2056  */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)2057 int synth_event_add_next_val(u64 val,
2058 			     struct synth_event_trace_state *trace_state)
2059 {
2060 	return __synth_event_add_val(NULL, val, trace_state);
2061 }
2062 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2063 
2064 /**
2065  * synth_event_add_val - Add a named field's value to an open synth trace
2066  * @field_name: The name of the synthetic event field value to set
2067  * @val: The value to set the next field to
2068  * @trace_state: A pointer to object tracking the piecewise trace state
2069  *
2070  * Set the value of the named field in an event that's been opened by
2071  * synth_event_trace_start().
2072  *
2073  * The val param should be the value cast to u64.  If the value points
2074  * to a string, the val param should be a char * cast to u64.
2075  *
2076  * This function looks up the field name, and if found, sets the field
2077  * to the specified value.  This lookup makes this function more
2078  * expensive than synth_event_add_next_val(), so use that or the
2079  * none-piecewise synth_event_trace() instead if efficiency is more
2080  * important.
2081  *
2082  * Note however that synth_event_add_next_val() and
2083  * synth_event_add_val() can't be intermixed for a given event trace -
2084  * one or the other but not both can be used at the same time.
2085  *
2086  * Note also that synth_event_trace_end() must be called after all
2087  * values have been added for each event trace, regardless of whether
2088  * adding all field values succeeded or not.
2089  *
2090  * Return: 0 on success, err otherwise.
2091  */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2092 int synth_event_add_val(const char *field_name, u64 val,
2093 			struct synth_event_trace_state *trace_state)
2094 {
2095 	return __synth_event_add_val(field_name, val, trace_state);
2096 }
2097 EXPORT_SYMBOL_GPL(synth_event_add_val);
2098 
2099 /**
2100  * synth_event_trace_end - End piecewise synthetic event trace
2101  * @trace_state: A pointer to object tracking the piecewise trace state
2102  *
2103  * End the trace of a synthetic event opened by
2104  * synth_event_trace__start().
2105  *
2106  * This function 'closes' an event trace, which basically means that
2107  * it commits the reserved event and cleans up other loose ends.
2108  *
2109  * A pointer to a trace_state object is passed in, which will keep
2110  * track of the current event trace state opened with
2111  * synth_event_trace_start().
2112  *
2113  * Note that this function must be called after all values have been
2114  * added for each event trace, regardless of whether adding all field
2115  * values succeeded or not.
2116  *
2117  * Return: 0 on success, err otherwise.
2118  */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2119 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2120 {
2121 	if (!trace_state)
2122 		return -EINVAL;
2123 
2124 	__synth_event_trace_end(trace_state);
2125 
2126 	return 0;
2127 }
2128 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2129 
create_synth_event(const char * raw_command)2130 static int create_synth_event(const char *raw_command)
2131 {
2132 	char *fields, *p;
2133 	const char *name;
2134 	int len, ret = 0;
2135 
2136 	raw_command = skip_spaces(raw_command);
2137 	if (raw_command[0] == '\0')
2138 		return ret;
2139 
2140 	last_cmd_set(raw_command);
2141 
2142 	name = raw_command;
2143 
2144 	/* Don't try to process if not our system */
2145 	if (name[0] != 's' || name[1] != ':')
2146 		return -ECANCELED;
2147 	name += 2;
2148 
2149 	p = strpbrk(raw_command, " \t");
2150 	if (!p) {
2151 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2152 		return -EINVAL;
2153 	}
2154 
2155 	fields = skip_spaces(p);
2156 
2157 	/* This interface accepts group name prefix */
2158 	if (strchr(name, '/')) {
2159 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2160 		if (len == 0) {
2161 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2162 			return -EINVAL;
2163 		}
2164 		name += len;
2165 	}
2166 
2167 	len = name - raw_command;
2168 
2169 	ret = check_command(raw_command + len);
2170 	if (ret) {
2171 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2172 		return ret;
2173 	}
2174 
2175 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2176 	if (!name)
2177 		return -ENOMEM;
2178 
2179 	ret = __create_synth_event(name, fields);
2180 
2181 	kfree(name);
2182 
2183 	return ret;
2184 }
2185 
synth_event_release(struct dyn_event * ev)2186 static int synth_event_release(struct dyn_event *ev)
2187 {
2188 	struct synth_event *event = to_synth_event(ev);
2189 	int ret;
2190 
2191 	if (event->ref)
2192 		return -EBUSY;
2193 
2194 	if (trace_event_dyn_busy(&event->call))
2195 		return -EBUSY;
2196 
2197 	ret = unregister_synth_event(event);
2198 	if (ret)
2199 		return ret;
2200 
2201 	dyn_event_remove(ev);
2202 	free_synth_event(event);
2203 	return 0;
2204 }
2205 
__synth_event_show(struct seq_file * m,struct synth_event * event)2206 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2207 {
2208 	struct synth_field *field;
2209 	unsigned int i;
2210 	char *type, *t;
2211 
2212 	seq_printf(m, "%s\t", event->name);
2213 
2214 	for (i = 0; i < event->n_fields; i++) {
2215 		field = event->fields[i];
2216 
2217 		type = field->type;
2218 		t = strstr(type, "__data_loc");
2219 		if (t) { /* __data_loc belongs in format but not event desc */
2220 			t += sizeof("__data_loc");
2221 			type = t;
2222 		}
2223 
2224 		/* parameter values */
2225 		seq_printf(m, "%s %s%s", type, field->name,
2226 			   i == event->n_fields - 1 ? "" : "; ");
2227 	}
2228 
2229 	seq_putc(m, '\n');
2230 
2231 	return 0;
2232 }
2233 
synth_event_show(struct seq_file * m,struct dyn_event * ev)2234 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2235 {
2236 	struct synth_event *event = to_synth_event(ev);
2237 
2238 	seq_printf(m, "s:%s/", event->class.system);
2239 
2240 	return __synth_event_show(m, event);
2241 }
2242 
synth_events_seq_show(struct seq_file * m,void * v)2243 static int synth_events_seq_show(struct seq_file *m, void *v)
2244 {
2245 	struct dyn_event *ev = v;
2246 
2247 	if (!is_synth_event(ev))
2248 		return 0;
2249 
2250 	return __synth_event_show(m, to_synth_event(ev));
2251 }
2252 
2253 static const struct seq_operations synth_events_seq_op = {
2254 	.start	= dyn_event_seq_start,
2255 	.next	= dyn_event_seq_next,
2256 	.stop	= dyn_event_seq_stop,
2257 	.show	= synth_events_seq_show,
2258 };
2259 
synth_events_open(struct inode * inode,struct file * file)2260 static int synth_events_open(struct inode *inode, struct file *file)
2261 {
2262 	int ret;
2263 
2264 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2265 	if (ret)
2266 		return ret;
2267 
2268 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2269 		ret = dyn_events_release_all(&synth_event_ops);
2270 		if (ret < 0)
2271 			return ret;
2272 	}
2273 
2274 	return seq_open(file, &synth_events_seq_op);
2275 }
2276 
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2277 static ssize_t synth_events_write(struct file *file,
2278 				  const char __user *buffer,
2279 				  size_t count, loff_t *ppos)
2280 {
2281 	return trace_parse_run_command(file, buffer, count, ppos,
2282 				       create_or_delete_synth_event);
2283 }
2284 
2285 static const struct file_operations synth_events_fops = {
2286 	.open           = synth_events_open,
2287 	.write		= synth_events_write,
2288 	.read           = seq_read,
2289 	.llseek         = seq_lseek,
2290 	.release        = seq_release,
2291 };
2292 
2293 /*
2294  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2295  * events in postcore_initcall without tracefs.
2296  */
trace_events_synth_init_early(void)2297 static __init int trace_events_synth_init_early(void)
2298 {
2299 	int err = 0;
2300 
2301 	err = dyn_event_register(&synth_event_ops);
2302 	if (err)
2303 		pr_warn("Could not register synth_event_ops\n");
2304 
2305 	return err;
2306 }
2307 core_initcall(trace_events_synth_init_early);
2308 
trace_events_synth_init(void)2309 static __init int trace_events_synth_init(void)
2310 {
2311 	struct dentry *entry = NULL;
2312 	int err = 0;
2313 	err = tracing_init_dentry();
2314 	if (err)
2315 		goto err;
2316 
2317 	entry = tracefs_create_file("synthetic_events", 0644, NULL,
2318 				    NULL, &synth_events_fops);
2319 	if (!entry) {
2320 		err = -ENODEV;
2321 		goto err;
2322 	}
2323 
2324 	return err;
2325  err:
2326 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2327 
2328 	return err;
2329 }
2330 
2331 fs_initcall(trace_events_synth_init);
2332