1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_hist - trace event hist triggers
4 *
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20
21 #include "tracing_map.h"
22 #include "trace.h"
23 #include "trace_dynevent.h"
24
25 #define SYNTH_SYSTEM "synthetic"
26 #define SYNTH_FIELDS_MAX 16
27
28 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
29
30 #define ERRORS \
31 C(NONE, "No error"), \
32 C(DUPLICATE_VAR, "Variable already defined"), \
33 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 C(TOO_MANY_VARS, "Too many variables defined"), \
35 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
36 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
38 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
39 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
40 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
41 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
42 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
43 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
44 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
45 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
46 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
47 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
48 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
49 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
50 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
51 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
52 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
53 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
54 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
55 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
56 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
57 C(TOO_MANY_PARAMS, "Too many action params"), \
58 C(PARAM_NOT_FOUND, "Couldn't find param"), \
59 C(INVALID_PARAM, "Invalid action param"), \
60 C(ACTION_NOT_FOUND, "No action found"), \
61 C(NO_SAVE_PARAMS, "No params found for save()"), \
62 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 C(ACTION_MISMATCH, "Handler doesn't support action"), \
64 C(NO_CLOSING_PAREN, "No closing paren found"), \
65 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
66 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
67 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
68 C(VAR_NOT_FOUND, "Couldn't find variable"), \
69 C(FIELD_NOT_FOUND, "Couldn't find field"), \
70 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"),
71
72 #undef C
73 #define C(a, b) HIST_ERR_##a
74
75 enum { ERRORS };
76
77 #undef C
78 #define C(a, b) b
79
80 static const char *err_text[] = { ERRORS };
81
82 struct hist_field;
83
84 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
85 struct tracing_map_elt *elt,
86 struct ring_buffer_event *rbe,
87 void *event);
88
89 #define HIST_FIELD_OPERANDS_MAX 2
90 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
91 #define HIST_ACTIONS_MAX 8
92
93 enum field_op_id {
94 FIELD_OP_NONE,
95 FIELD_OP_PLUS,
96 FIELD_OP_MINUS,
97 FIELD_OP_UNARY_MINUS,
98 };
99
100 /*
101 * A hist_var (histogram variable) contains variable information for
102 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
103 * flag set. A hist_var has a variable name e.g. ts0, and is
104 * associated with a given histogram trigger, as specified by
105 * hist_data. The hist_var idx is the unique index assigned to the
106 * variable by the hist trigger's tracing_map. The idx is what is
107 * used to set a variable's value and, by a variable reference, to
108 * retrieve it.
109 */
110 struct hist_var {
111 char *name;
112 struct hist_trigger_data *hist_data;
113 unsigned int idx;
114 };
115
116 struct hist_field {
117 struct ftrace_event_field *field;
118 unsigned long flags;
119 hist_field_fn_t fn;
120 unsigned int ref;
121 unsigned int size;
122 unsigned int offset;
123 unsigned int is_signed;
124 const char *type;
125 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
126 struct hist_trigger_data *hist_data;
127
128 /*
129 * Variable fields contain variable-specific info in var.
130 */
131 struct hist_var var;
132 enum field_op_id operator;
133 char *system;
134 char *event_name;
135
136 /*
137 * The name field is used for EXPR and VAR_REF fields. VAR
138 * fields contain the variable name in var.name.
139 */
140 char *name;
141
142 /*
143 * When a histogram trigger is hit, if it has any references
144 * to variables, the values of those variables are collected
145 * into a var_ref_vals array by resolve_var_refs(). The
146 * current value of each variable is read from the tracing_map
147 * using the hist field's hist_var.idx and entered into the
148 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
149 */
150 unsigned int var_ref_idx;
151 bool read_once;
152
153 unsigned int var_str_idx;
154 };
155
hist_field_none(struct hist_field * field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)156 static u64 hist_field_none(struct hist_field *field,
157 struct tracing_map_elt *elt,
158 struct ring_buffer_event *rbe,
159 void *event)
160 {
161 return 0;
162 }
163
hist_field_counter(struct hist_field * field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)164 static u64 hist_field_counter(struct hist_field *field,
165 struct tracing_map_elt *elt,
166 struct ring_buffer_event *rbe,
167 void *event)
168 {
169 return 1;
170 }
171
hist_field_string(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)172 static u64 hist_field_string(struct hist_field *hist_field,
173 struct tracing_map_elt *elt,
174 struct ring_buffer_event *rbe,
175 void *event)
176 {
177 char *addr = (char *)(event + hist_field->field->offset);
178
179 return (u64)(unsigned long)addr;
180 }
181
hist_field_dynstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)182 static u64 hist_field_dynstring(struct hist_field *hist_field,
183 struct tracing_map_elt *elt,
184 struct ring_buffer_event *rbe,
185 void *event)
186 {
187 u32 str_item = *(u32 *)(event + hist_field->field->offset);
188 int str_loc = str_item & 0xffff;
189 char *addr = (char *)(event + str_loc);
190
191 return (u64)(unsigned long)addr;
192 }
193
hist_field_pstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)194 static u64 hist_field_pstring(struct hist_field *hist_field,
195 struct tracing_map_elt *elt,
196 struct ring_buffer_event *rbe,
197 void *event)
198 {
199 char **addr = (char **)(event + hist_field->field->offset);
200
201 return (u64)(unsigned long)*addr;
202 }
203
hist_field_log2(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)204 static u64 hist_field_log2(struct hist_field *hist_field,
205 struct tracing_map_elt *elt,
206 struct ring_buffer_event *rbe,
207 void *event)
208 {
209 struct hist_field *operand = hist_field->operands[0];
210
211 u64 val = operand->fn(operand, elt, rbe, event);
212
213 return (u64) ilog2(roundup_pow_of_two(val));
214 }
215
hist_field_plus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)216 static u64 hist_field_plus(struct hist_field *hist_field,
217 struct tracing_map_elt *elt,
218 struct ring_buffer_event *rbe,
219 void *event)
220 {
221 struct hist_field *operand1 = hist_field->operands[0];
222 struct hist_field *operand2 = hist_field->operands[1];
223
224 u64 val1 = operand1->fn(operand1, elt, rbe, event);
225 u64 val2 = operand2->fn(operand2, elt, rbe, event);
226
227 return val1 + val2;
228 }
229
hist_field_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)230 static u64 hist_field_minus(struct hist_field *hist_field,
231 struct tracing_map_elt *elt,
232 struct ring_buffer_event *rbe,
233 void *event)
234 {
235 struct hist_field *operand1 = hist_field->operands[0];
236 struct hist_field *operand2 = hist_field->operands[1];
237
238 u64 val1 = operand1->fn(operand1, elt, rbe, event);
239 u64 val2 = operand2->fn(operand2, elt, rbe, event);
240
241 return val1 - val2;
242 }
243
hist_field_unary_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)244 static u64 hist_field_unary_minus(struct hist_field *hist_field,
245 struct tracing_map_elt *elt,
246 struct ring_buffer_event *rbe,
247 void *event)
248 {
249 struct hist_field *operand = hist_field->operands[0];
250
251 s64 sval = (s64)operand->fn(operand, elt, rbe, event);
252 u64 val = (u64)-sval;
253
254 return val;
255 }
256
257 #define DEFINE_HIST_FIELD_FN(type) \
258 static u64 hist_field_##type(struct hist_field *hist_field, \
259 struct tracing_map_elt *elt, \
260 struct ring_buffer_event *rbe, \
261 void *event) \
262 { \
263 type *addr = (type *)(event + hist_field->field->offset); \
264 \
265 return (u64)(unsigned long)*addr; \
266 }
267
268 DEFINE_HIST_FIELD_FN(s64);
269 DEFINE_HIST_FIELD_FN(u64);
270 DEFINE_HIST_FIELD_FN(s32);
271 DEFINE_HIST_FIELD_FN(u32);
272 DEFINE_HIST_FIELD_FN(s16);
273 DEFINE_HIST_FIELD_FN(u16);
274 DEFINE_HIST_FIELD_FN(s8);
275 DEFINE_HIST_FIELD_FN(u8);
276
277 #define for_each_hist_field(i, hist_data) \
278 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
279
280 #define for_each_hist_val_field(i, hist_data) \
281 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
282
283 #define for_each_hist_key_field(i, hist_data) \
284 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
285
286 #define HIST_STACKTRACE_DEPTH 16
287 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
288 #define HIST_STACKTRACE_SKIP 5
289
290 #define HITCOUNT_IDX 0
291 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
292
293 enum hist_field_flags {
294 HIST_FIELD_FL_HITCOUNT = 1 << 0,
295 HIST_FIELD_FL_KEY = 1 << 1,
296 HIST_FIELD_FL_STRING = 1 << 2,
297 HIST_FIELD_FL_HEX = 1 << 3,
298 HIST_FIELD_FL_SYM = 1 << 4,
299 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
300 HIST_FIELD_FL_EXECNAME = 1 << 6,
301 HIST_FIELD_FL_SYSCALL = 1 << 7,
302 HIST_FIELD_FL_STACKTRACE = 1 << 8,
303 HIST_FIELD_FL_LOG2 = 1 << 9,
304 HIST_FIELD_FL_TIMESTAMP = 1 << 10,
305 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
306 HIST_FIELD_FL_VAR = 1 << 12,
307 HIST_FIELD_FL_EXPR = 1 << 13,
308 HIST_FIELD_FL_VAR_REF = 1 << 14,
309 HIST_FIELD_FL_CPU = 1 << 15,
310 HIST_FIELD_FL_ALIAS = 1 << 16,
311 };
312
313 struct var_defs {
314 unsigned int n_vars;
315 char *name[TRACING_MAP_VARS_MAX];
316 char *expr[TRACING_MAP_VARS_MAX];
317 };
318
319 struct hist_trigger_attrs {
320 char *keys_str;
321 char *vals_str;
322 char *sort_key_str;
323 char *name;
324 char *clock;
325 bool pause;
326 bool cont;
327 bool clear;
328 bool ts_in_usecs;
329 unsigned int map_bits;
330
331 char *assignment_str[TRACING_MAP_VARS_MAX];
332 unsigned int n_assignments;
333
334 char *action_str[HIST_ACTIONS_MAX];
335 unsigned int n_actions;
336
337 struct var_defs var_defs;
338 };
339
340 struct field_var {
341 struct hist_field *var;
342 struct hist_field *val;
343 };
344
345 struct field_var_hist {
346 struct hist_trigger_data *hist_data;
347 char *cmd;
348 };
349
350 struct hist_trigger_data {
351 struct hist_field *fields[HIST_FIELDS_MAX];
352 unsigned int n_vals;
353 unsigned int n_keys;
354 unsigned int n_fields;
355 unsigned int n_vars;
356 unsigned int n_var_str;
357 unsigned int key_size;
358 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
359 unsigned int n_sort_keys;
360 struct trace_event_file *event_file;
361 struct hist_trigger_attrs *attrs;
362 struct tracing_map *map;
363 bool enable_timestamps;
364 bool remove;
365 struct hist_field *var_refs[TRACING_MAP_VARS_MAX];
366 unsigned int n_var_refs;
367
368 struct action_data *actions[HIST_ACTIONS_MAX];
369 unsigned int n_actions;
370
371 struct field_var *field_vars[SYNTH_FIELDS_MAX];
372 unsigned int n_field_vars;
373 unsigned int n_field_var_str;
374 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
375 unsigned int n_field_var_hists;
376
377 struct field_var *save_vars[SYNTH_FIELDS_MAX];
378 unsigned int n_save_vars;
379 unsigned int n_save_var_str;
380 };
381
382 static int synth_event_create(int argc, const char **argv);
383 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
384 static int synth_event_release(struct dyn_event *ev);
385 static bool synth_event_is_busy(struct dyn_event *ev);
386 static bool synth_event_match(const char *system, const char *event,
387 int argc, const char **argv, struct dyn_event *ev);
388
389 static struct dyn_event_operations synth_event_ops = {
390 .create = synth_event_create,
391 .show = synth_event_show,
392 .is_busy = synth_event_is_busy,
393 .free = synth_event_release,
394 .match = synth_event_match,
395 };
396
397 struct synth_field {
398 char *type;
399 char *name;
400 size_t size;
401 bool is_signed;
402 bool is_string;
403 };
404
405 struct synth_event {
406 struct dyn_event devent;
407 int ref;
408 char *name;
409 struct synth_field **fields;
410 unsigned int n_fields;
411 unsigned int n_u64;
412 struct trace_event_class class;
413 struct trace_event_call call;
414 struct tracepoint *tp;
415 };
416
is_synth_event(struct dyn_event * ev)417 static bool is_synth_event(struct dyn_event *ev)
418 {
419 return ev->ops == &synth_event_ops;
420 }
421
to_synth_event(struct dyn_event * ev)422 static struct synth_event *to_synth_event(struct dyn_event *ev)
423 {
424 return container_of(ev, struct synth_event, devent);
425 }
426
synth_event_is_busy(struct dyn_event * ev)427 static bool synth_event_is_busy(struct dyn_event *ev)
428 {
429 struct synth_event *event = to_synth_event(ev);
430
431 return event->ref != 0;
432 }
433
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)434 static bool synth_event_match(const char *system, const char *event,
435 int argc, const char **argv, struct dyn_event *ev)
436 {
437 struct synth_event *sev = to_synth_event(ev);
438
439 return strcmp(sev->name, event) == 0 &&
440 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
441 }
442
443 struct action_data;
444
445 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
446 struct tracing_map_elt *elt, void *rec,
447 struct ring_buffer_event *rbe, void *key,
448 struct action_data *data, u64 *var_ref_vals);
449
450 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
451
452 enum handler_id {
453 HANDLER_ONMATCH = 1,
454 HANDLER_ONMAX,
455 HANDLER_ONCHANGE,
456 };
457
458 enum action_id {
459 ACTION_SAVE = 1,
460 ACTION_TRACE,
461 ACTION_SNAPSHOT,
462 };
463
464 struct action_data {
465 enum handler_id handler;
466 enum action_id action;
467 char *action_name;
468 action_fn_t fn;
469
470 unsigned int n_params;
471 char *params[SYNTH_FIELDS_MAX];
472
473 /*
474 * When a histogram trigger is hit, the values of any
475 * references to variables, including variables being passed
476 * as parameters to synthetic events, are collected into a
477 * var_ref_vals array. This var_ref_idx array is an array of
478 * indices into the var_ref_vals array, one for each synthetic
479 * event param, and is passed to the synthetic event
480 * invocation.
481 */
482 unsigned int var_ref_idx[SYNTH_FIELDS_MAX];
483 struct synth_event *synth_event;
484 bool use_trace_keyword;
485 char *synth_event_name;
486
487 union {
488 struct {
489 char *event;
490 char *event_system;
491 } match_data;
492
493 struct {
494 /*
495 * var_str contains the $-unstripped variable
496 * name referenced by var_ref, and used when
497 * printing the action. Because var_ref
498 * creation is deferred to create_actions(),
499 * we need a per-action way to save it until
500 * then, thus var_str.
501 */
502 char *var_str;
503
504 /*
505 * var_ref refers to the variable being
506 * tracked e.g onmax($var).
507 */
508 struct hist_field *var_ref;
509
510 /*
511 * track_var contains the 'invisible' tracking
512 * variable created to keep the current
513 * e.g. max value.
514 */
515 struct hist_field *track_var;
516
517 check_track_val_fn_t check_val;
518 action_fn_t save_data;
519 } track_data;
520 };
521 };
522
523 struct track_data {
524 u64 track_val;
525 bool updated;
526
527 unsigned int key_len;
528 void *key;
529 struct tracing_map_elt elt;
530
531 struct action_data *action_data;
532 struct hist_trigger_data *hist_data;
533 };
534
535 struct hist_elt_data {
536 char *comm;
537 u64 *var_ref_vals;
538 char *field_var_str[SYNTH_FIELDS_MAX];
539 };
540
541 struct snapshot_context {
542 struct tracing_map_elt *elt;
543 void *key;
544 };
545
track_data_free(struct track_data * track_data)546 static void track_data_free(struct track_data *track_data)
547 {
548 struct hist_elt_data *elt_data;
549
550 if (!track_data)
551 return;
552
553 kfree(track_data->key);
554
555 elt_data = track_data->elt.private_data;
556 if (elt_data) {
557 kfree(elt_data->comm);
558 kfree(elt_data);
559 }
560
561 kfree(track_data);
562 }
563
track_data_alloc(unsigned int key_len,struct action_data * action_data,struct hist_trigger_data * hist_data)564 static struct track_data *track_data_alloc(unsigned int key_len,
565 struct action_data *action_data,
566 struct hist_trigger_data *hist_data)
567 {
568 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
569 struct hist_elt_data *elt_data;
570
571 if (!data)
572 return ERR_PTR(-ENOMEM);
573
574 data->key = kzalloc(key_len, GFP_KERNEL);
575 if (!data->key) {
576 track_data_free(data);
577 return ERR_PTR(-ENOMEM);
578 }
579
580 data->key_len = key_len;
581 data->action_data = action_data;
582 data->hist_data = hist_data;
583
584 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
585 if (!elt_data) {
586 track_data_free(data);
587 return ERR_PTR(-ENOMEM);
588 }
589 data->elt.private_data = elt_data;
590
591 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
592 if (!elt_data->comm) {
593 track_data_free(data);
594 return ERR_PTR(-ENOMEM);
595 }
596
597 return data;
598 }
599
600 static char last_cmd[MAX_FILTER_STR_VAL];
601 static char last_cmd_loc[MAX_FILTER_STR_VAL];
602
errpos(char * str)603 static int errpos(char *str)
604 {
605 return err_pos(last_cmd, str);
606 }
607
last_cmd_set(struct trace_event_file * file,char * str)608 static void last_cmd_set(struct trace_event_file *file, char *str)
609 {
610 const char *system = NULL, *name = NULL;
611 struct trace_event_call *call;
612
613 if (!str)
614 return;
615
616 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
617
618 if (file) {
619 call = file->event_call;
620
621 system = call->class->system;
622 if (system) {
623 name = trace_event_name(call);
624 if (!name)
625 system = NULL;
626 }
627 }
628
629 if (system)
630 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
631 }
632
hist_err(struct trace_array * tr,u8 err_type,u8 err_pos)633 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
634 {
635 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
636 err_type, err_pos);
637 }
638
hist_err_clear(void)639 static void hist_err_clear(void)
640 {
641 last_cmd[0] = '\0';
642 last_cmd_loc[0] = '\0';
643 }
644
645 struct synth_trace_event {
646 struct trace_entry ent;
647 u64 fields[];
648 };
649
synth_event_define_fields(struct trace_event_call * call)650 static int synth_event_define_fields(struct trace_event_call *call)
651 {
652 struct synth_trace_event trace;
653 int offset = offsetof(typeof(trace), fields);
654 struct synth_event *event = call->data;
655 unsigned int i, size, n_u64;
656 char *name, *type;
657 bool is_signed;
658 int ret = 0;
659
660 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
661 size = event->fields[i]->size;
662 is_signed = event->fields[i]->is_signed;
663 type = event->fields[i]->type;
664 name = event->fields[i]->name;
665 ret = trace_define_field(call, type, name, offset, size,
666 is_signed, FILTER_OTHER);
667 if (ret)
668 break;
669
670 if (event->fields[i]->is_string) {
671 offset += STR_VAR_LEN_MAX;
672 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
673 } else {
674 offset += sizeof(u64);
675 n_u64++;
676 }
677 }
678
679 event->n_u64 = n_u64;
680
681 return ret;
682 }
683
synth_field_signed(char * type)684 static bool synth_field_signed(char *type)
685 {
686 if (str_has_prefix(type, "u"))
687 return false;
688 if (strcmp(type, "gfp_t") == 0)
689 return false;
690
691 return true;
692 }
693
synth_field_is_string(char * type)694 static int synth_field_is_string(char *type)
695 {
696 if (strstr(type, "char[") != NULL)
697 return true;
698
699 return false;
700 }
701
synth_field_string_size(char * type)702 static int synth_field_string_size(char *type)
703 {
704 char buf[4], *end, *start;
705 unsigned int len;
706 int size, err;
707
708 start = strstr(type, "char[");
709 if (start == NULL)
710 return -EINVAL;
711 start += sizeof("char[") - 1;
712
713 end = strchr(type, ']');
714 if (!end || end < start)
715 return -EINVAL;
716
717 len = end - start;
718 if (len > 3)
719 return -EINVAL;
720
721 strncpy(buf, start, len);
722 buf[len] = '\0';
723
724 err = kstrtouint(buf, 0, &size);
725 if (err)
726 return err;
727
728 if (size > STR_VAR_LEN_MAX)
729 return -EINVAL;
730
731 return size;
732 }
733
synth_field_size(char * type)734 static int synth_field_size(char *type)
735 {
736 int size = 0;
737
738 if (strcmp(type, "s64") == 0)
739 size = sizeof(s64);
740 else if (strcmp(type, "u64") == 0)
741 size = sizeof(u64);
742 else if (strcmp(type, "s32") == 0)
743 size = sizeof(s32);
744 else if (strcmp(type, "u32") == 0)
745 size = sizeof(u32);
746 else if (strcmp(type, "s16") == 0)
747 size = sizeof(s16);
748 else if (strcmp(type, "u16") == 0)
749 size = sizeof(u16);
750 else if (strcmp(type, "s8") == 0)
751 size = sizeof(s8);
752 else if (strcmp(type, "u8") == 0)
753 size = sizeof(u8);
754 else if (strcmp(type, "char") == 0)
755 size = sizeof(char);
756 else if (strcmp(type, "unsigned char") == 0)
757 size = sizeof(unsigned char);
758 else if (strcmp(type, "int") == 0)
759 size = sizeof(int);
760 else if (strcmp(type, "unsigned int") == 0)
761 size = sizeof(unsigned int);
762 else if (strcmp(type, "long") == 0)
763 size = sizeof(long);
764 else if (strcmp(type, "unsigned long") == 0)
765 size = sizeof(unsigned long);
766 else if (strcmp(type, "pid_t") == 0)
767 size = sizeof(pid_t);
768 else if (strcmp(type, "gfp_t") == 0)
769 size = sizeof(gfp_t);
770 else if (synth_field_is_string(type))
771 size = synth_field_string_size(type);
772
773 return size;
774 }
775
synth_field_fmt(char * type)776 static const char *synth_field_fmt(char *type)
777 {
778 const char *fmt = "%llu";
779
780 if (strcmp(type, "s64") == 0)
781 fmt = "%lld";
782 else if (strcmp(type, "u64") == 0)
783 fmt = "%llu";
784 else if (strcmp(type, "s32") == 0)
785 fmt = "%d";
786 else if (strcmp(type, "u32") == 0)
787 fmt = "%u";
788 else if (strcmp(type, "s16") == 0)
789 fmt = "%d";
790 else if (strcmp(type, "u16") == 0)
791 fmt = "%u";
792 else if (strcmp(type, "s8") == 0)
793 fmt = "%d";
794 else if (strcmp(type, "u8") == 0)
795 fmt = "%u";
796 else if (strcmp(type, "char") == 0)
797 fmt = "%d";
798 else if (strcmp(type, "unsigned char") == 0)
799 fmt = "%u";
800 else if (strcmp(type, "int") == 0)
801 fmt = "%d";
802 else if (strcmp(type, "unsigned int") == 0)
803 fmt = "%u";
804 else if (strcmp(type, "long") == 0)
805 fmt = "%ld";
806 else if (strcmp(type, "unsigned long") == 0)
807 fmt = "%lu";
808 else if (strcmp(type, "pid_t") == 0)
809 fmt = "%d";
810 else if (strcmp(type, "gfp_t") == 0)
811 fmt = "%x";
812 else if (synth_field_is_string(type))
813 fmt = "%s";
814
815 return fmt;
816 }
817
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,u64 val,char * space)818 static void print_synth_event_num_val(struct trace_seq *s,
819 char *print_fmt, char *name,
820 int size, u64 val, char *space)
821 {
822 switch (size) {
823 case 1:
824 trace_seq_printf(s, print_fmt, name, (u8)val, space);
825 break;
826
827 case 2:
828 trace_seq_printf(s, print_fmt, name, (u16)val, space);
829 break;
830
831 case 4:
832 trace_seq_printf(s, print_fmt, name, (u32)val, space);
833 break;
834
835 default:
836 trace_seq_printf(s, print_fmt, name, val, space);
837 break;
838 }
839 }
840
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)841 static enum print_line_t print_synth_event(struct trace_iterator *iter,
842 int flags,
843 struct trace_event *event)
844 {
845 struct trace_array *tr = iter->tr;
846 struct trace_seq *s = &iter->seq;
847 struct synth_trace_event *entry;
848 struct synth_event *se;
849 unsigned int i, n_u64;
850 char print_fmt[32];
851 const char *fmt;
852
853 entry = (struct synth_trace_event *)iter->ent;
854 se = container_of(event, struct synth_event, call.event);
855
856 trace_seq_printf(s, "%s: ", se->name);
857
858 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
859 if (trace_seq_has_overflowed(s))
860 goto end;
861
862 fmt = synth_field_fmt(se->fields[i]->type);
863
864 /* parameter types */
865 if (tr->trace_flags & TRACE_ITER_VERBOSE)
866 trace_seq_printf(s, "%s ", fmt);
867
868 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
869
870 /* parameter values */
871 if (se->fields[i]->is_string) {
872 trace_seq_printf(s, print_fmt, se->fields[i]->name,
873 (char *)&entry->fields[n_u64],
874 i == se->n_fields - 1 ? "" : " ");
875 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
876 } else {
877 struct trace_print_flags __flags[] = {
878 __def_gfpflag_names, {-1, NULL} };
879 char *space = (i == se->n_fields - 1 ? "" : " ");
880
881 print_synth_event_num_val(s, print_fmt,
882 se->fields[i]->name,
883 se->fields[i]->size,
884 entry->fields[n_u64],
885 space);
886
887 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
888 trace_seq_puts(s, " (");
889 trace_print_flags_seq(s, "|",
890 entry->fields[n_u64],
891 __flags);
892 trace_seq_putc(s, ')');
893 }
894 n_u64++;
895 }
896 }
897 end:
898 trace_seq_putc(s, '\n');
899
900 return trace_handle_return(s);
901 }
902
903 static struct trace_event_functions synth_event_funcs = {
904 .trace = print_synth_event
905 };
906
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)907 static notrace void trace_event_raw_event_synth(void *__data,
908 u64 *var_ref_vals,
909 unsigned int *var_ref_idx)
910 {
911 struct trace_event_file *trace_file = __data;
912 struct synth_trace_event *entry;
913 struct trace_event_buffer fbuffer;
914 struct ring_buffer *buffer;
915 struct synth_event *event;
916 unsigned int i, n_u64, val_idx;
917 int fields_size = 0;
918
919 event = trace_file->event_call->data;
920
921 if (trace_trigger_soft_disabled(trace_file))
922 return;
923
924 fields_size = event->n_u64 * sizeof(u64);
925
926 /*
927 * Avoid ring buffer recursion detection, as this event
928 * is being performed within another event.
929 */
930 buffer = trace_file->tr->trace_buffer.buffer;
931 ring_buffer_nest_start(buffer);
932
933 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
934 sizeof(*entry) + fields_size);
935 if (!entry)
936 goto out;
937
938 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
939 val_idx = var_ref_idx[i];
940 if (event->fields[i]->is_string) {
941 char *str_val = (char *)(long)var_ref_vals[val_idx];
942 char *str_field = (char *)&entry->fields[n_u64];
943
944 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
945 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
946 } else {
947 struct synth_field *field = event->fields[i];
948 u64 val = var_ref_vals[val_idx];
949
950 switch (field->size) {
951 case 1:
952 *(u8 *)&entry->fields[n_u64] = (u8)val;
953 break;
954
955 case 2:
956 *(u16 *)&entry->fields[n_u64] = (u16)val;
957 break;
958
959 case 4:
960 *(u32 *)&entry->fields[n_u64] = (u32)val;
961 break;
962
963 default:
964 entry->fields[n_u64] = val;
965 break;
966 }
967 n_u64++;
968 }
969 }
970
971 trace_event_buffer_commit(&fbuffer);
972 out:
973 ring_buffer_nest_end(buffer);
974 }
975
free_synth_event_print_fmt(struct trace_event_call * call)976 static void free_synth_event_print_fmt(struct trace_event_call *call)
977 {
978 if (call) {
979 kfree(call->print_fmt);
980 call->print_fmt = NULL;
981 }
982 }
983
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)984 static int __set_synth_event_print_fmt(struct synth_event *event,
985 char *buf, int len)
986 {
987 const char *fmt;
988 int pos = 0;
989 int i;
990
991 /* When len=0, we just calculate the needed length */
992 #define LEN_OR_ZERO (len ? len - pos : 0)
993
994 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
995 for (i = 0; i < event->n_fields; i++) {
996 fmt = synth_field_fmt(event->fields[i]->type);
997 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
998 event->fields[i]->name, fmt,
999 i == event->n_fields - 1 ? "" : ", ");
1000 }
1001 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1002
1003 for (i = 0; i < event->n_fields; i++) {
1004 pos += snprintf(buf + pos, LEN_OR_ZERO,
1005 ", REC->%s", event->fields[i]->name);
1006 }
1007
1008 #undef LEN_OR_ZERO
1009
1010 /* return the length of print_fmt */
1011 return pos;
1012 }
1013
set_synth_event_print_fmt(struct trace_event_call * call)1014 static int set_synth_event_print_fmt(struct trace_event_call *call)
1015 {
1016 struct synth_event *event = call->data;
1017 char *print_fmt;
1018 int len;
1019
1020 /* First: called with 0 length to calculate the needed length */
1021 len = __set_synth_event_print_fmt(event, NULL, 0);
1022
1023 print_fmt = kmalloc(len + 1, GFP_KERNEL);
1024 if (!print_fmt)
1025 return -ENOMEM;
1026
1027 /* Second: actually write the @print_fmt */
1028 __set_synth_event_print_fmt(event, print_fmt, len + 1);
1029 call->print_fmt = print_fmt;
1030
1031 return 0;
1032 }
1033
free_synth_field(struct synth_field * field)1034 static void free_synth_field(struct synth_field *field)
1035 {
1036 kfree(field->type);
1037 kfree(field->name);
1038 kfree(field);
1039 }
1040
parse_synth_field(int argc,const char ** argv,int * consumed)1041 static struct synth_field *parse_synth_field(int argc, const char **argv,
1042 int *consumed)
1043 {
1044 struct synth_field *field;
1045 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
1046 int len, ret = 0;
1047
1048 if (field_type[0] == ';')
1049 field_type++;
1050
1051 if (!strcmp(field_type, "unsigned")) {
1052 if (argc < 3)
1053 return ERR_PTR(-EINVAL);
1054 prefix = "unsigned ";
1055 field_type = argv[1];
1056 field_name = argv[2];
1057 *consumed = 3;
1058 } else {
1059 field_name = argv[1];
1060 *consumed = 2;
1061 }
1062
1063 field = kzalloc(sizeof(*field), GFP_KERNEL);
1064 if (!field)
1065 return ERR_PTR(-ENOMEM);
1066
1067 len = strlen(field_name);
1068 array = strchr(field_name, '[');
1069 if (array)
1070 len -= strlen(array);
1071 else if (field_name[len - 1] == ';')
1072 len--;
1073
1074 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
1075 if (!field->name) {
1076 ret = -ENOMEM;
1077 goto free;
1078 }
1079
1080 if (field_type[0] == ';')
1081 field_type++;
1082 len = strlen(field_type) + 1;
1083 if (array)
1084 len += strlen(array);
1085 if (prefix)
1086 len += strlen(prefix);
1087
1088 field->type = kzalloc(len, GFP_KERNEL);
1089 if (!field->type) {
1090 ret = -ENOMEM;
1091 goto free;
1092 }
1093 if (prefix)
1094 strcat(field->type, prefix);
1095 strcat(field->type, field_type);
1096 if (array) {
1097 strcat(field->type, array);
1098 if (field->type[len - 1] == ';')
1099 field->type[len - 1] = '\0';
1100 }
1101
1102 field->size = synth_field_size(field->type);
1103 if (!field->size) {
1104 ret = -EINVAL;
1105 goto free;
1106 }
1107
1108 if (synth_field_is_string(field->type))
1109 field->is_string = true;
1110
1111 field->is_signed = synth_field_signed(field->type);
1112
1113 out:
1114 return field;
1115 free:
1116 free_synth_field(field);
1117 field = ERR_PTR(ret);
1118 goto out;
1119 }
1120
free_synth_tracepoint(struct tracepoint * tp)1121 static void free_synth_tracepoint(struct tracepoint *tp)
1122 {
1123 if (!tp)
1124 return;
1125
1126 kfree(tp->name);
1127 kfree(tp);
1128 }
1129
alloc_synth_tracepoint(char * name)1130 static struct tracepoint *alloc_synth_tracepoint(char *name)
1131 {
1132 struct tracepoint *tp;
1133
1134 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
1135 if (!tp)
1136 return ERR_PTR(-ENOMEM);
1137
1138 tp->name = kstrdup(name, GFP_KERNEL);
1139 if (!tp->name) {
1140 kfree(tp);
1141 return ERR_PTR(-ENOMEM);
1142 }
1143
1144 return tp;
1145 }
1146
1147 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
1148 unsigned int *var_ref_idx);
1149
trace_synth(struct synth_event * event,u64 * var_ref_vals,unsigned int * var_ref_idx)1150 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
1151 unsigned int *var_ref_idx)
1152 {
1153 struct tracepoint *tp = event->tp;
1154
1155 if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
1156 struct tracepoint_func *probe_func_ptr;
1157 synth_probe_func_t probe_func;
1158 void *__data;
1159
1160 if (!(cpu_online(raw_smp_processor_id())))
1161 return;
1162
1163 probe_func_ptr = rcu_dereference_sched((tp)->funcs);
1164 if (probe_func_ptr) {
1165 do {
1166 probe_func = probe_func_ptr->func;
1167 __data = probe_func_ptr->data;
1168 probe_func(__data, var_ref_vals, var_ref_idx);
1169 } while ((++probe_func_ptr)->func);
1170 }
1171 }
1172 }
1173
find_synth_event(const char * name)1174 static struct synth_event *find_synth_event(const char *name)
1175 {
1176 struct dyn_event *pos;
1177 struct synth_event *event;
1178
1179 for_each_dyn_event(pos) {
1180 if (!is_synth_event(pos))
1181 continue;
1182 event = to_synth_event(pos);
1183 if (strcmp(event->name, name) == 0)
1184 return event;
1185 }
1186
1187 return NULL;
1188 }
1189
register_synth_event(struct synth_event * event)1190 static int register_synth_event(struct synth_event *event)
1191 {
1192 struct trace_event_call *call = &event->call;
1193 int ret = 0;
1194
1195 event->call.class = &event->class;
1196 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
1197 if (!event->class.system) {
1198 ret = -ENOMEM;
1199 goto out;
1200 }
1201
1202 event->tp = alloc_synth_tracepoint(event->name);
1203 if (IS_ERR(event->tp)) {
1204 ret = PTR_ERR(event->tp);
1205 event->tp = NULL;
1206 goto out;
1207 }
1208
1209 INIT_LIST_HEAD(&call->class->fields);
1210 call->event.funcs = &synth_event_funcs;
1211 call->class->define_fields = synth_event_define_fields;
1212
1213 ret = register_trace_event(&call->event);
1214 if (!ret) {
1215 ret = -ENODEV;
1216 goto out;
1217 }
1218 call->flags = TRACE_EVENT_FL_TRACEPOINT;
1219 call->class->reg = trace_event_reg;
1220 call->class->probe = trace_event_raw_event_synth;
1221 call->data = event;
1222 call->tp = event->tp;
1223
1224 ret = trace_add_event_call(call);
1225 if (ret) {
1226 pr_warn("Failed to register synthetic event: %s\n",
1227 trace_event_name(call));
1228 goto err;
1229 }
1230
1231 ret = set_synth_event_print_fmt(call);
1232 if (ret < 0) {
1233 trace_remove_event_call(call);
1234 goto err;
1235 }
1236 out:
1237 return ret;
1238 err:
1239 unregister_trace_event(&call->event);
1240 goto out;
1241 }
1242
unregister_synth_event(struct synth_event * event)1243 static int unregister_synth_event(struct synth_event *event)
1244 {
1245 struct trace_event_call *call = &event->call;
1246 int ret;
1247
1248 ret = trace_remove_event_call(call);
1249
1250 return ret;
1251 }
1252
free_synth_event(struct synth_event * event)1253 static void free_synth_event(struct synth_event *event)
1254 {
1255 unsigned int i;
1256
1257 if (!event)
1258 return;
1259
1260 for (i = 0; i < event->n_fields; i++)
1261 free_synth_field(event->fields[i]);
1262
1263 kfree(event->fields);
1264 kfree(event->name);
1265 kfree(event->class.system);
1266 free_synth_tracepoint(event->tp);
1267 free_synth_event_print_fmt(&event->call);
1268 kfree(event);
1269 }
1270
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)1271 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
1272 struct synth_field **fields)
1273 {
1274 struct synth_event *event;
1275 unsigned int i;
1276
1277 event = kzalloc(sizeof(*event), GFP_KERNEL);
1278 if (!event) {
1279 event = ERR_PTR(-ENOMEM);
1280 goto out;
1281 }
1282
1283 event->name = kstrdup(name, GFP_KERNEL);
1284 if (!event->name) {
1285 kfree(event);
1286 event = ERR_PTR(-ENOMEM);
1287 goto out;
1288 }
1289
1290 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
1291 if (!event->fields) {
1292 free_synth_event(event);
1293 event = ERR_PTR(-ENOMEM);
1294 goto out;
1295 }
1296
1297 dyn_event_init(&event->devent, &synth_event_ops);
1298
1299 for (i = 0; i < n_fields; i++)
1300 event->fields[i] = fields[i];
1301
1302 event->n_fields = n_fields;
1303 out:
1304 return event;
1305 }
1306
action_trace(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)1307 static void action_trace(struct hist_trigger_data *hist_data,
1308 struct tracing_map_elt *elt, void *rec,
1309 struct ring_buffer_event *rbe, void *key,
1310 struct action_data *data, u64 *var_ref_vals)
1311 {
1312 struct synth_event *event = data->synth_event;
1313
1314 trace_synth(event, var_ref_vals, data->var_ref_idx);
1315 }
1316
1317 struct hist_var_data {
1318 struct list_head list;
1319 struct hist_trigger_data *hist_data;
1320 };
1321
__create_synth_event(int argc,const char * name,const char ** argv)1322 static int __create_synth_event(int argc, const char *name, const char **argv)
1323 {
1324 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1325 struct synth_event *event = NULL;
1326 int i, consumed = 0, n_fields = 0, ret = 0;
1327
1328 /*
1329 * Argument syntax:
1330 * - Add synthetic event: <event_name> field[;field] ...
1331 * - Remove synthetic event: !<event_name> field[;field] ...
1332 * where 'field' = type field_name
1333 */
1334
1335 if (name[0] == '\0' || argc < 1)
1336 return -EINVAL;
1337
1338 mutex_lock(&event_mutex);
1339
1340 event = find_synth_event(name);
1341 if (event) {
1342 ret = -EEXIST;
1343 goto out;
1344 }
1345
1346 for (i = 0; i < argc - 1; i++) {
1347 if (strcmp(argv[i], ";") == 0)
1348 continue;
1349 if (n_fields == SYNTH_FIELDS_MAX) {
1350 ret = -EINVAL;
1351 goto err;
1352 }
1353
1354 field = parse_synth_field(argc - i, &argv[i], &consumed);
1355 if (IS_ERR(field)) {
1356 ret = PTR_ERR(field);
1357 goto err;
1358 }
1359 fields[n_fields++] = field;
1360 i += consumed - 1;
1361 }
1362
1363 if (i < argc && strcmp(argv[i], ";") != 0) {
1364 ret = -EINVAL;
1365 goto err;
1366 }
1367
1368 event = alloc_synth_event(name, n_fields, fields);
1369 if (IS_ERR(event)) {
1370 ret = PTR_ERR(event);
1371 event = NULL;
1372 goto err;
1373 }
1374 ret = register_synth_event(event);
1375 if (!ret)
1376 dyn_event_add(&event->devent);
1377 else
1378 free_synth_event(event);
1379 out:
1380 mutex_unlock(&event_mutex);
1381
1382 return ret;
1383 err:
1384 for (i = 0; i < n_fields; i++)
1385 free_synth_field(fields[i]);
1386
1387 goto out;
1388 }
1389
create_or_delete_synth_event(int argc,char ** argv)1390 static int create_or_delete_synth_event(int argc, char **argv)
1391 {
1392 const char *name = argv[0];
1393 struct synth_event *event = NULL;
1394 int ret;
1395
1396 /* trace_run_command() ensures argc != 0 */
1397 if (name[0] == '!') {
1398 mutex_lock(&event_mutex);
1399 event = find_synth_event(name + 1);
1400 if (event) {
1401 if (event->ref)
1402 ret = -EBUSY;
1403 else {
1404 ret = unregister_synth_event(event);
1405 if (!ret) {
1406 dyn_event_remove(&event->devent);
1407 free_synth_event(event);
1408 }
1409 }
1410 } else
1411 ret = -ENOENT;
1412 mutex_unlock(&event_mutex);
1413 return ret;
1414 }
1415
1416 ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1417 return ret == -ECANCELED ? -EINVAL : ret;
1418 }
1419
synth_event_create(int argc,const char ** argv)1420 static int synth_event_create(int argc, const char **argv)
1421 {
1422 const char *name = argv[0];
1423 int len;
1424
1425 if (name[0] != 's' || name[1] != ':')
1426 return -ECANCELED;
1427 name += 2;
1428
1429 /* This interface accepts group name prefix */
1430 if (strchr(name, '/')) {
1431 len = str_has_prefix(name, SYNTH_SYSTEM "/");
1432 if (len == 0)
1433 return -EINVAL;
1434 name += len;
1435 }
1436 return __create_synth_event(argc - 1, name, argv + 1);
1437 }
1438
synth_event_release(struct dyn_event * ev)1439 static int synth_event_release(struct dyn_event *ev)
1440 {
1441 struct synth_event *event = to_synth_event(ev);
1442 int ret;
1443
1444 if (event->ref)
1445 return -EBUSY;
1446
1447 ret = unregister_synth_event(event);
1448 if (ret)
1449 return ret;
1450
1451 dyn_event_remove(ev);
1452 free_synth_event(event);
1453 return 0;
1454 }
1455
__synth_event_show(struct seq_file * m,struct synth_event * event)1456 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1457 {
1458 struct synth_field *field;
1459 unsigned int i;
1460
1461 seq_printf(m, "%s\t", event->name);
1462
1463 for (i = 0; i < event->n_fields; i++) {
1464 field = event->fields[i];
1465
1466 /* parameter values */
1467 seq_printf(m, "%s %s%s", field->type, field->name,
1468 i == event->n_fields - 1 ? "" : "; ");
1469 }
1470
1471 seq_putc(m, '\n');
1472
1473 return 0;
1474 }
1475
synth_event_show(struct seq_file * m,struct dyn_event * ev)1476 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1477 {
1478 struct synth_event *event = to_synth_event(ev);
1479
1480 seq_printf(m, "s:%s/", event->class.system);
1481
1482 return __synth_event_show(m, event);
1483 }
1484
synth_events_seq_show(struct seq_file * m,void * v)1485 static int synth_events_seq_show(struct seq_file *m, void *v)
1486 {
1487 struct dyn_event *ev = v;
1488
1489 if (!is_synth_event(ev))
1490 return 0;
1491
1492 return __synth_event_show(m, to_synth_event(ev));
1493 }
1494
1495 static const struct seq_operations synth_events_seq_op = {
1496 .start = dyn_event_seq_start,
1497 .next = dyn_event_seq_next,
1498 .stop = dyn_event_seq_stop,
1499 .show = synth_events_seq_show,
1500 };
1501
synth_events_open(struct inode * inode,struct file * file)1502 static int synth_events_open(struct inode *inode, struct file *file)
1503 {
1504 int ret;
1505
1506 ret = security_locked_down(LOCKDOWN_TRACEFS);
1507 if (ret)
1508 return ret;
1509
1510 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1511 ret = dyn_events_release_all(&synth_event_ops);
1512 if (ret < 0)
1513 return ret;
1514 }
1515
1516 return seq_open(file, &synth_events_seq_op);
1517 }
1518
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)1519 static ssize_t synth_events_write(struct file *file,
1520 const char __user *buffer,
1521 size_t count, loff_t *ppos)
1522 {
1523 return trace_parse_run_command(file, buffer, count, ppos,
1524 create_or_delete_synth_event);
1525 }
1526
1527 static const struct file_operations synth_events_fops = {
1528 .open = synth_events_open,
1529 .write = synth_events_write,
1530 .read = seq_read,
1531 .llseek = seq_lseek,
1532 .release = seq_release,
1533 };
1534
hist_field_timestamp(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)1535 static u64 hist_field_timestamp(struct hist_field *hist_field,
1536 struct tracing_map_elt *elt,
1537 struct ring_buffer_event *rbe,
1538 void *event)
1539 {
1540 struct hist_trigger_data *hist_data = hist_field->hist_data;
1541 struct trace_array *tr = hist_data->event_file->tr;
1542
1543 u64 ts = ring_buffer_event_time_stamp(rbe);
1544
1545 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1546 ts = ns2usecs(ts);
1547
1548 return ts;
1549 }
1550
hist_field_cpu(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)1551 static u64 hist_field_cpu(struct hist_field *hist_field,
1552 struct tracing_map_elt *elt,
1553 struct ring_buffer_event *rbe,
1554 void *event)
1555 {
1556 int cpu = smp_processor_id();
1557
1558 return cpu;
1559 }
1560
1561 /**
1562 * check_field_for_var_ref - Check if a VAR_REF field references a variable
1563 * @hist_field: The VAR_REF field to check
1564 * @var_data: The hist trigger that owns the variable
1565 * @var_idx: The trigger variable identifier
1566 *
1567 * Check the given VAR_REF field to see whether or not it references
1568 * the given variable associated with the given trigger.
1569 *
1570 * Return: The VAR_REF field if it does reference the variable, NULL if not
1571 */
1572 static struct hist_field *
check_field_for_var_ref(struct hist_field * hist_field,struct hist_trigger_data * var_data,unsigned int var_idx)1573 check_field_for_var_ref(struct hist_field *hist_field,
1574 struct hist_trigger_data *var_data,
1575 unsigned int var_idx)
1576 {
1577 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
1578
1579 if (hist_field && hist_field->var.idx == var_idx &&
1580 hist_field->var.hist_data == var_data)
1581 return hist_field;
1582
1583 return NULL;
1584 }
1585
1586 /**
1587 * find_var_ref - Check if a trigger has a reference to a trigger variable
1588 * @hist_data: The hist trigger that might have a reference to the variable
1589 * @var_data: The hist trigger that owns the variable
1590 * @var_idx: The trigger variable identifier
1591 *
1592 * Check the list of var_refs[] on the first hist trigger to see
1593 * whether any of them are references to the variable on the second
1594 * trigger.
1595 *
1596 * Return: The VAR_REF field referencing the variable if so, NULL if not
1597 */
find_var_ref(struct hist_trigger_data * hist_data,struct hist_trigger_data * var_data,unsigned int var_idx)1598 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1599 struct hist_trigger_data *var_data,
1600 unsigned int var_idx)
1601 {
1602 struct hist_field *hist_field;
1603 unsigned int i;
1604
1605 for (i = 0; i < hist_data->n_var_refs; i++) {
1606 hist_field = hist_data->var_refs[i];
1607 if (check_field_for_var_ref(hist_field, var_data, var_idx))
1608 return hist_field;
1609 }
1610
1611 return NULL;
1612 }
1613
1614 /**
1615 * find_any_var_ref - Check if there is a reference to a given trigger variable
1616 * @hist_data: The hist trigger
1617 * @var_idx: The trigger variable identifier
1618 *
1619 * Check to see whether the given variable is currently referenced by
1620 * any other trigger.
1621 *
1622 * The trigger the variable is defined on is explicitly excluded - the
1623 * assumption being that a self-reference doesn't prevent a trigger
1624 * from being removed.
1625 *
1626 * Return: The VAR_REF field referencing the variable if so, NULL if not
1627 */
find_any_var_ref(struct hist_trigger_data * hist_data,unsigned int var_idx)1628 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1629 unsigned int var_idx)
1630 {
1631 struct trace_array *tr = hist_data->event_file->tr;
1632 struct hist_field *found = NULL;
1633 struct hist_var_data *var_data;
1634
1635 list_for_each_entry(var_data, &tr->hist_vars, list) {
1636 if (var_data->hist_data == hist_data)
1637 continue;
1638 found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1639 if (found)
1640 break;
1641 }
1642
1643 return found;
1644 }
1645
1646 /**
1647 * check_var_refs - Check if there is a reference to any of trigger's variables
1648 * @hist_data: The hist trigger
1649 *
1650 * A trigger can define one or more variables. If any one of them is
1651 * currently referenced by any other trigger, this function will
1652 * determine that.
1653
1654 * Typically used to determine whether or not a trigger can be removed
1655 * - if there are any references to a trigger's variables, it cannot.
1656 *
1657 * Return: True if there is a reference to any of trigger's variables
1658 */
check_var_refs(struct hist_trigger_data * hist_data)1659 static bool check_var_refs(struct hist_trigger_data *hist_data)
1660 {
1661 struct hist_field *field;
1662 bool found = false;
1663 int i;
1664
1665 for_each_hist_field(i, hist_data) {
1666 field = hist_data->fields[i];
1667 if (field && field->flags & HIST_FIELD_FL_VAR) {
1668 if (find_any_var_ref(hist_data, field->var.idx)) {
1669 found = true;
1670 break;
1671 }
1672 }
1673 }
1674
1675 return found;
1676 }
1677
find_hist_vars(struct hist_trigger_data * hist_data)1678 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1679 {
1680 struct trace_array *tr = hist_data->event_file->tr;
1681 struct hist_var_data *var_data, *found = NULL;
1682
1683 list_for_each_entry(var_data, &tr->hist_vars, list) {
1684 if (var_data->hist_data == hist_data) {
1685 found = var_data;
1686 break;
1687 }
1688 }
1689
1690 return found;
1691 }
1692
field_has_hist_vars(struct hist_field * hist_field,unsigned int level)1693 static bool field_has_hist_vars(struct hist_field *hist_field,
1694 unsigned int level)
1695 {
1696 int i;
1697
1698 if (level > 3)
1699 return false;
1700
1701 if (!hist_field)
1702 return false;
1703
1704 if (hist_field->flags & HIST_FIELD_FL_VAR ||
1705 hist_field->flags & HIST_FIELD_FL_VAR_REF)
1706 return true;
1707
1708 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1709 struct hist_field *operand;
1710
1711 operand = hist_field->operands[i];
1712 if (field_has_hist_vars(operand, level + 1))
1713 return true;
1714 }
1715
1716 return false;
1717 }
1718
has_hist_vars(struct hist_trigger_data * hist_data)1719 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1720 {
1721 struct hist_field *hist_field;
1722 int i;
1723
1724 for_each_hist_field(i, hist_data) {
1725 hist_field = hist_data->fields[i];
1726 if (field_has_hist_vars(hist_field, 0))
1727 return true;
1728 }
1729
1730 return false;
1731 }
1732
save_hist_vars(struct hist_trigger_data * hist_data)1733 static int save_hist_vars(struct hist_trigger_data *hist_data)
1734 {
1735 struct trace_array *tr = hist_data->event_file->tr;
1736 struct hist_var_data *var_data;
1737
1738 var_data = find_hist_vars(hist_data);
1739 if (var_data)
1740 return 0;
1741
1742 if (tracing_check_open_get_tr(tr))
1743 return -ENODEV;
1744
1745 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1746 if (!var_data) {
1747 trace_array_put(tr);
1748 return -ENOMEM;
1749 }
1750
1751 var_data->hist_data = hist_data;
1752 list_add(&var_data->list, &tr->hist_vars);
1753
1754 return 0;
1755 }
1756
remove_hist_vars(struct hist_trigger_data * hist_data)1757 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1758 {
1759 struct trace_array *tr = hist_data->event_file->tr;
1760 struct hist_var_data *var_data;
1761
1762 var_data = find_hist_vars(hist_data);
1763 if (!var_data)
1764 return;
1765
1766 if (WARN_ON(check_var_refs(hist_data)))
1767 return;
1768
1769 list_del(&var_data->list);
1770
1771 kfree(var_data);
1772
1773 trace_array_put(tr);
1774 }
1775
find_var_field(struct hist_trigger_data * hist_data,const char * var_name)1776 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1777 const char *var_name)
1778 {
1779 struct hist_field *hist_field, *found = NULL;
1780 int i;
1781
1782 for_each_hist_field(i, hist_data) {
1783 hist_field = hist_data->fields[i];
1784 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1785 strcmp(hist_field->var.name, var_name) == 0) {
1786 found = hist_field;
1787 break;
1788 }
1789 }
1790
1791 return found;
1792 }
1793
find_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,const char * var_name)1794 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1795 struct trace_event_file *file,
1796 const char *var_name)
1797 {
1798 struct hist_trigger_data *test_data;
1799 struct event_trigger_data *test;
1800 struct hist_field *hist_field;
1801
1802 lockdep_assert_held(&event_mutex);
1803
1804 hist_field = find_var_field(hist_data, var_name);
1805 if (hist_field)
1806 return hist_field;
1807
1808 list_for_each_entry(test, &file->triggers, list) {
1809 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1810 test_data = test->private_data;
1811 hist_field = find_var_field(test_data, var_name);
1812 if (hist_field)
1813 return hist_field;
1814 }
1815 }
1816
1817 return NULL;
1818 }
1819
find_var_file(struct trace_array * tr,char * system,char * event_name,char * var_name)1820 static struct trace_event_file *find_var_file(struct trace_array *tr,
1821 char *system,
1822 char *event_name,
1823 char *var_name)
1824 {
1825 struct hist_trigger_data *var_hist_data;
1826 struct hist_var_data *var_data;
1827 struct trace_event_file *file, *found = NULL;
1828
1829 if (system)
1830 return find_event_file(tr, system, event_name);
1831
1832 list_for_each_entry(var_data, &tr->hist_vars, list) {
1833 var_hist_data = var_data->hist_data;
1834 file = var_hist_data->event_file;
1835 if (file == found)
1836 continue;
1837
1838 if (find_var_field(var_hist_data, var_name)) {
1839 if (found) {
1840 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1841 return NULL;
1842 }
1843
1844 found = file;
1845 }
1846 }
1847
1848 return found;
1849 }
1850
find_file_var(struct trace_event_file * file,const char * var_name)1851 static struct hist_field *find_file_var(struct trace_event_file *file,
1852 const char *var_name)
1853 {
1854 struct hist_trigger_data *test_data;
1855 struct event_trigger_data *test;
1856 struct hist_field *hist_field;
1857
1858 lockdep_assert_held(&event_mutex);
1859
1860 list_for_each_entry(test, &file->triggers, list) {
1861 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1862 test_data = test->private_data;
1863 hist_field = find_var_field(test_data, var_name);
1864 if (hist_field)
1865 return hist_field;
1866 }
1867 }
1868
1869 return NULL;
1870 }
1871
1872 static struct hist_field *
find_match_var(struct hist_trigger_data * hist_data,char * var_name)1873 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1874 {
1875 struct trace_array *tr = hist_data->event_file->tr;
1876 struct hist_field *hist_field, *found = NULL;
1877 struct trace_event_file *file;
1878 unsigned int i;
1879
1880 for (i = 0; i < hist_data->n_actions; i++) {
1881 struct action_data *data = hist_data->actions[i];
1882
1883 if (data->handler == HANDLER_ONMATCH) {
1884 char *system = data->match_data.event_system;
1885 char *event_name = data->match_data.event;
1886
1887 file = find_var_file(tr, system, event_name, var_name);
1888 if (!file)
1889 continue;
1890 hist_field = find_file_var(file, var_name);
1891 if (hist_field) {
1892 if (found) {
1893 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1894 errpos(var_name));
1895 return ERR_PTR(-EINVAL);
1896 }
1897
1898 found = hist_field;
1899 }
1900 }
1901 }
1902 return found;
1903 }
1904
find_event_var(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)1905 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1906 char *system,
1907 char *event_name,
1908 char *var_name)
1909 {
1910 struct trace_array *tr = hist_data->event_file->tr;
1911 struct hist_field *hist_field = NULL;
1912 struct trace_event_file *file;
1913
1914 if (!system || !event_name) {
1915 hist_field = find_match_var(hist_data, var_name);
1916 if (IS_ERR(hist_field))
1917 return NULL;
1918 if (hist_field)
1919 return hist_field;
1920 }
1921
1922 file = find_var_file(tr, system, event_name, var_name);
1923 if (!file)
1924 return NULL;
1925
1926 hist_field = find_file_var(file, var_name);
1927
1928 return hist_field;
1929 }
1930
hist_field_var_ref(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)1931 static u64 hist_field_var_ref(struct hist_field *hist_field,
1932 struct tracing_map_elt *elt,
1933 struct ring_buffer_event *rbe,
1934 void *event)
1935 {
1936 struct hist_elt_data *elt_data;
1937 u64 var_val = 0;
1938
1939 if (WARN_ON_ONCE(!elt))
1940 return var_val;
1941
1942 elt_data = elt->private_data;
1943 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1944
1945 return var_val;
1946 }
1947
resolve_var_refs(struct hist_trigger_data * hist_data,void * key,u64 * var_ref_vals,bool self)1948 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1949 u64 *var_ref_vals, bool self)
1950 {
1951 struct hist_trigger_data *var_data;
1952 struct tracing_map_elt *var_elt;
1953 struct hist_field *hist_field;
1954 unsigned int i, var_idx;
1955 bool resolved = true;
1956 u64 var_val = 0;
1957
1958 for (i = 0; i < hist_data->n_var_refs; i++) {
1959 hist_field = hist_data->var_refs[i];
1960 var_idx = hist_field->var.idx;
1961 var_data = hist_field->var.hist_data;
1962
1963 if (var_data == NULL) {
1964 resolved = false;
1965 break;
1966 }
1967
1968 if ((self && var_data != hist_data) ||
1969 (!self && var_data == hist_data))
1970 continue;
1971
1972 var_elt = tracing_map_lookup(var_data->map, key);
1973 if (!var_elt) {
1974 resolved = false;
1975 break;
1976 }
1977
1978 if (!tracing_map_var_set(var_elt, var_idx)) {
1979 resolved = false;
1980 break;
1981 }
1982
1983 if (self || !hist_field->read_once)
1984 var_val = tracing_map_read_var(var_elt, var_idx);
1985 else
1986 var_val = tracing_map_read_var_once(var_elt, var_idx);
1987
1988 var_ref_vals[i] = var_val;
1989 }
1990
1991 return resolved;
1992 }
1993
hist_field_name(struct hist_field * field,unsigned int level)1994 static const char *hist_field_name(struct hist_field *field,
1995 unsigned int level)
1996 {
1997 const char *field_name = "";
1998
1999 if (WARN_ON_ONCE(!field))
2000 return field_name;
2001
2002 if (level > 1)
2003 return field_name;
2004
2005 if (field->field)
2006 field_name = field->field->name;
2007 else if (field->flags & HIST_FIELD_FL_LOG2 ||
2008 field->flags & HIST_FIELD_FL_ALIAS)
2009 field_name = hist_field_name(field->operands[0], ++level);
2010 else if (field->flags & HIST_FIELD_FL_CPU)
2011 field_name = "common_cpu";
2012 else if (field->flags & HIST_FIELD_FL_EXPR ||
2013 field->flags & HIST_FIELD_FL_VAR_REF) {
2014 if (field->system) {
2015 static char full_name[MAX_FILTER_STR_VAL];
2016
2017 strcat(full_name, field->system);
2018 strcat(full_name, ".");
2019 strcat(full_name, field->event_name);
2020 strcat(full_name, ".");
2021 strcat(full_name, field->name);
2022 field_name = full_name;
2023 } else
2024 field_name = field->name;
2025 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
2026 field_name = "common_timestamp";
2027
2028 if (field_name == NULL)
2029 field_name = "";
2030
2031 return field_name;
2032 }
2033
select_value_fn(int field_size,int field_is_signed)2034 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
2035 {
2036 hist_field_fn_t fn = NULL;
2037
2038 switch (field_size) {
2039 case 8:
2040 if (field_is_signed)
2041 fn = hist_field_s64;
2042 else
2043 fn = hist_field_u64;
2044 break;
2045 case 4:
2046 if (field_is_signed)
2047 fn = hist_field_s32;
2048 else
2049 fn = hist_field_u32;
2050 break;
2051 case 2:
2052 if (field_is_signed)
2053 fn = hist_field_s16;
2054 else
2055 fn = hist_field_u16;
2056 break;
2057 case 1:
2058 if (field_is_signed)
2059 fn = hist_field_s8;
2060 else
2061 fn = hist_field_u8;
2062 break;
2063 }
2064
2065 return fn;
2066 }
2067
parse_map_size(char * str)2068 static int parse_map_size(char *str)
2069 {
2070 unsigned long size, map_bits;
2071 int ret;
2072
2073 ret = kstrtoul(str, 0, &size);
2074 if (ret)
2075 goto out;
2076
2077 map_bits = ilog2(roundup_pow_of_two(size));
2078 if (map_bits < TRACING_MAP_BITS_MIN ||
2079 map_bits > TRACING_MAP_BITS_MAX)
2080 ret = -EINVAL;
2081 else
2082 ret = map_bits;
2083 out:
2084 return ret;
2085 }
2086
destroy_hist_trigger_attrs(struct hist_trigger_attrs * attrs)2087 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
2088 {
2089 unsigned int i;
2090
2091 if (!attrs)
2092 return;
2093
2094 for (i = 0; i < attrs->n_assignments; i++)
2095 kfree(attrs->assignment_str[i]);
2096
2097 for (i = 0; i < attrs->n_actions; i++)
2098 kfree(attrs->action_str[i]);
2099
2100 kfree(attrs->name);
2101 kfree(attrs->sort_key_str);
2102 kfree(attrs->keys_str);
2103 kfree(attrs->vals_str);
2104 kfree(attrs->clock);
2105 kfree(attrs);
2106 }
2107
parse_action(char * str,struct hist_trigger_attrs * attrs)2108 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2109 {
2110 int ret = -EINVAL;
2111
2112 if (attrs->n_actions >= HIST_ACTIONS_MAX)
2113 return ret;
2114
2115 if ((str_has_prefix(str, "onmatch(")) ||
2116 (str_has_prefix(str, "onmax(")) ||
2117 (str_has_prefix(str, "onchange("))) {
2118 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
2119 if (!attrs->action_str[attrs->n_actions]) {
2120 ret = -ENOMEM;
2121 return ret;
2122 }
2123 attrs->n_actions++;
2124 ret = 0;
2125 }
2126 return ret;
2127 }
2128
parse_assignment(struct trace_array * tr,char * str,struct hist_trigger_attrs * attrs)2129 static int parse_assignment(struct trace_array *tr,
2130 char *str, struct hist_trigger_attrs *attrs)
2131 {
2132 int len, ret = 0;
2133
2134 if ((len = str_has_prefix(str, "key=")) ||
2135 (len = str_has_prefix(str, "keys="))) {
2136 attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
2137 if (!attrs->keys_str) {
2138 ret = -ENOMEM;
2139 goto out;
2140 }
2141 } else if ((len = str_has_prefix(str, "val=")) ||
2142 (len = str_has_prefix(str, "vals=")) ||
2143 (len = str_has_prefix(str, "values="))) {
2144 attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
2145 if (!attrs->vals_str) {
2146 ret = -ENOMEM;
2147 goto out;
2148 }
2149 } else if ((len = str_has_prefix(str, "sort="))) {
2150 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
2151 if (!attrs->sort_key_str) {
2152 ret = -ENOMEM;
2153 goto out;
2154 }
2155 } else if (str_has_prefix(str, "name=")) {
2156 attrs->name = kstrdup(str, GFP_KERNEL);
2157 if (!attrs->name) {
2158 ret = -ENOMEM;
2159 goto out;
2160 }
2161 } else if ((len = str_has_prefix(str, "clock="))) {
2162 str += len;
2163
2164 str = strstrip(str);
2165 attrs->clock = kstrdup(str, GFP_KERNEL);
2166 if (!attrs->clock) {
2167 ret = -ENOMEM;
2168 goto out;
2169 }
2170 } else if ((len = str_has_prefix(str, "size="))) {
2171 int map_bits = parse_map_size(str + len);
2172
2173 if (map_bits < 0) {
2174 ret = map_bits;
2175 goto out;
2176 }
2177 attrs->map_bits = map_bits;
2178 } else {
2179 char *assignment;
2180
2181 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2182 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2183 ret = -EINVAL;
2184 goto out;
2185 }
2186
2187 assignment = kstrdup(str, GFP_KERNEL);
2188 if (!assignment) {
2189 ret = -ENOMEM;
2190 goto out;
2191 }
2192
2193 attrs->assignment_str[attrs->n_assignments++] = assignment;
2194 }
2195 out:
2196 return ret;
2197 }
2198
2199 static struct hist_trigger_attrs *
parse_hist_trigger_attrs(struct trace_array * tr,char * trigger_str)2200 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2201 {
2202 struct hist_trigger_attrs *attrs;
2203 int ret = 0;
2204
2205 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
2206 if (!attrs)
2207 return ERR_PTR(-ENOMEM);
2208
2209 while (trigger_str) {
2210 char *str = strsep(&trigger_str, ":");
2211 char *rhs;
2212
2213 rhs = strchr(str, '=');
2214 if (rhs) {
2215 if (!strlen(++rhs)) {
2216 ret = -EINVAL;
2217 goto free;
2218 }
2219 ret = parse_assignment(tr, str, attrs);
2220 if (ret)
2221 goto free;
2222 } else if (strcmp(str, "pause") == 0)
2223 attrs->pause = true;
2224 else if ((strcmp(str, "cont") == 0) ||
2225 (strcmp(str, "continue") == 0))
2226 attrs->cont = true;
2227 else if (strcmp(str, "clear") == 0)
2228 attrs->clear = true;
2229 else {
2230 ret = parse_action(str, attrs);
2231 if (ret)
2232 goto free;
2233 }
2234 }
2235
2236 if (!attrs->keys_str) {
2237 ret = -EINVAL;
2238 goto free;
2239 }
2240
2241 if (!attrs->clock) {
2242 attrs->clock = kstrdup("global", GFP_KERNEL);
2243 if (!attrs->clock) {
2244 ret = -ENOMEM;
2245 goto free;
2246 }
2247 }
2248
2249 return attrs;
2250 free:
2251 destroy_hist_trigger_attrs(attrs);
2252
2253 return ERR_PTR(ret);
2254 }
2255
save_comm(char * comm,struct task_struct * task)2256 static inline void save_comm(char *comm, struct task_struct *task)
2257 {
2258 if (!task->pid) {
2259 strcpy(comm, "<idle>");
2260 return;
2261 }
2262
2263 if (WARN_ON_ONCE(task->pid < 0)) {
2264 strcpy(comm, "<XXX>");
2265 return;
2266 }
2267
2268 strncpy(comm, task->comm, TASK_COMM_LEN);
2269 }
2270
hist_elt_data_free(struct hist_elt_data * elt_data)2271 static void hist_elt_data_free(struct hist_elt_data *elt_data)
2272 {
2273 unsigned int i;
2274
2275 for (i = 0; i < SYNTH_FIELDS_MAX; i++)
2276 kfree(elt_data->field_var_str[i]);
2277
2278 kfree(elt_data->comm);
2279 kfree(elt_data);
2280 }
2281
hist_trigger_elt_data_free(struct tracing_map_elt * elt)2282 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
2283 {
2284 struct hist_elt_data *elt_data = elt->private_data;
2285
2286 hist_elt_data_free(elt_data);
2287 }
2288
hist_trigger_elt_data_alloc(struct tracing_map_elt * elt)2289 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
2290 {
2291 struct hist_trigger_data *hist_data = elt->map->private_data;
2292 unsigned int size = TASK_COMM_LEN;
2293 struct hist_elt_data *elt_data;
2294 struct hist_field *key_field;
2295 unsigned int i, n_str;
2296
2297 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
2298 if (!elt_data)
2299 return -ENOMEM;
2300
2301 for_each_hist_key_field(i, hist_data) {
2302 key_field = hist_data->fields[i];
2303
2304 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
2305 elt_data->comm = kzalloc(size, GFP_KERNEL);
2306 if (!elt_data->comm) {
2307 kfree(elt_data);
2308 return -ENOMEM;
2309 }
2310 break;
2311 }
2312 }
2313
2314 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str +
2315 hist_data->n_var_str;
2316 if (n_str > SYNTH_FIELDS_MAX) {
2317 hist_elt_data_free(elt_data);
2318 return -EINVAL;
2319 }
2320
2321 size = STR_VAR_LEN_MAX;
2322
2323 for (i = 0; i < n_str; i++) {
2324 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
2325 if (!elt_data->field_var_str[i]) {
2326 hist_elt_data_free(elt_data);
2327 return -ENOMEM;
2328 }
2329 }
2330
2331 elt->private_data = elt_data;
2332
2333 return 0;
2334 }
2335
hist_trigger_elt_data_init(struct tracing_map_elt * elt)2336 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
2337 {
2338 struct hist_elt_data *elt_data = elt->private_data;
2339
2340 if (elt_data->comm)
2341 save_comm(elt_data->comm, current);
2342 }
2343
2344 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
2345 .elt_alloc = hist_trigger_elt_data_alloc,
2346 .elt_free = hist_trigger_elt_data_free,
2347 .elt_init = hist_trigger_elt_data_init,
2348 };
2349
get_hist_field_flags(struct hist_field * hist_field)2350 static const char *get_hist_field_flags(struct hist_field *hist_field)
2351 {
2352 const char *flags_str = NULL;
2353
2354 if (hist_field->flags & HIST_FIELD_FL_HEX)
2355 flags_str = "hex";
2356 else if (hist_field->flags & HIST_FIELD_FL_SYM)
2357 flags_str = "sym";
2358 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2359 flags_str = "sym-offset";
2360 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2361 flags_str = "execname";
2362 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2363 flags_str = "syscall";
2364 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2365 flags_str = "log2";
2366 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2367 flags_str = "usecs";
2368
2369 return flags_str;
2370 }
2371
expr_field_str(struct hist_field * field,char * expr)2372 static void expr_field_str(struct hist_field *field, char *expr)
2373 {
2374 if (field->flags & HIST_FIELD_FL_VAR_REF)
2375 strcat(expr, "$");
2376
2377 strcat(expr, hist_field_name(field, 0));
2378
2379 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
2380 const char *flags_str = get_hist_field_flags(field);
2381
2382 if (flags_str) {
2383 strcat(expr, ".");
2384 strcat(expr, flags_str);
2385 }
2386 }
2387 }
2388
expr_str(struct hist_field * field,unsigned int level)2389 static char *expr_str(struct hist_field *field, unsigned int level)
2390 {
2391 char *expr;
2392
2393 if (level > 1)
2394 return NULL;
2395
2396 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2397 if (!expr)
2398 return NULL;
2399
2400 if (!field->operands[0]) {
2401 expr_field_str(field, expr);
2402 return expr;
2403 }
2404
2405 if (field->operator == FIELD_OP_UNARY_MINUS) {
2406 char *subexpr;
2407
2408 strcat(expr, "-(");
2409 subexpr = expr_str(field->operands[0], ++level);
2410 if (!subexpr) {
2411 kfree(expr);
2412 return NULL;
2413 }
2414 strcat(expr, subexpr);
2415 strcat(expr, ")");
2416
2417 kfree(subexpr);
2418
2419 return expr;
2420 }
2421
2422 expr_field_str(field->operands[0], expr);
2423
2424 switch (field->operator) {
2425 case FIELD_OP_MINUS:
2426 strcat(expr, "-");
2427 break;
2428 case FIELD_OP_PLUS:
2429 strcat(expr, "+");
2430 break;
2431 default:
2432 kfree(expr);
2433 return NULL;
2434 }
2435
2436 expr_field_str(field->operands[1], expr);
2437
2438 return expr;
2439 }
2440
contains_operator(char * str)2441 static int contains_operator(char *str)
2442 {
2443 enum field_op_id field_op = FIELD_OP_NONE;
2444 char *op;
2445
2446 op = strpbrk(str, "+-");
2447 if (!op)
2448 return FIELD_OP_NONE;
2449
2450 switch (*op) {
2451 case '-':
2452 /*
2453 * Unfortunately, the modifier ".sym-offset"
2454 * can confuse things.
2455 */
2456 if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11))
2457 return FIELD_OP_NONE;
2458
2459 if (*str == '-')
2460 field_op = FIELD_OP_UNARY_MINUS;
2461 else
2462 field_op = FIELD_OP_MINUS;
2463 break;
2464 case '+':
2465 field_op = FIELD_OP_PLUS;
2466 break;
2467 default:
2468 break;
2469 }
2470
2471 return field_op;
2472 }
2473
get_hist_field(struct hist_field * hist_field)2474 static void get_hist_field(struct hist_field *hist_field)
2475 {
2476 hist_field->ref++;
2477 }
2478
__destroy_hist_field(struct hist_field * hist_field)2479 static void __destroy_hist_field(struct hist_field *hist_field)
2480 {
2481 if (--hist_field->ref > 1)
2482 return;
2483
2484 kfree(hist_field->var.name);
2485 kfree(hist_field->name);
2486 kfree(hist_field->type);
2487
2488 kfree(hist_field->system);
2489 kfree(hist_field->event_name);
2490
2491 kfree(hist_field);
2492 }
2493
destroy_hist_field(struct hist_field * hist_field,unsigned int level)2494 static void destroy_hist_field(struct hist_field *hist_field,
2495 unsigned int level)
2496 {
2497 unsigned int i;
2498
2499 if (level > 3)
2500 return;
2501
2502 if (!hist_field)
2503 return;
2504
2505 if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
2506 return; /* var refs will be destroyed separately */
2507
2508 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2509 destroy_hist_field(hist_field->operands[i], level + 1);
2510
2511 __destroy_hist_field(hist_field);
2512 }
2513
create_hist_field(struct hist_trigger_data * hist_data,struct ftrace_event_field * field,unsigned long flags,char * var_name)2514 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2515 struct ftrace_event_field *field,
2516 unsigned long flags,
2517 char *var_name)
2518 {
2519 struct hist_field *hist_field;
2520
2521 if (field && is_function_field(field))
2522 return NULL;
2523
2524 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2525 if (!hist_field)
2526 return NULL;
2527
2528 hist_field->ref = 1;
2529
2530 hist_field->hist_data = hist_data;
2531
2532 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2533 goto out; /* caller will populate */
2534
2535 if (flags & HIST_FIELD_FL_VAR_REF) {
2536 hist_field->fn = hist_field_var_ref;
2537 goto out;
2538 }
2539
2540 if (flags & HIST_FIELD_FL_HITCOUNT) {
2541 hist_field->fn = hist_field_counter;
2542 hist_field->size = sizeof(u64);
2543 hist_field->type = kstrdup("u64", GFP_KERNEL);
2544 if (!hist_field->type)
2545 goto free;
2546 goto out;
2547 }
2548
2549 if (flags & HIST_FIELD_FL_STACKTRACE) {
2550 hist_field->fn = hist_field_none;
2551 goto out;
2552 }
2553
2554 if (flags & HIST_FIELD_FL_LOG2) {
2555 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2556 hist_field->fn = hist_field_log2;
2557 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2558 if (!hist_field->operands[0])
2559 goto free;
2560 hist_field->size = hist_field->operands[0]->size;
2561 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2562 if (!hist_field->type)
2563 goto free;
2564 goto out;
2565 }
2566
2567 if (flags & HIST_FIELD_FL_TIMESTAMP) {
2568 hist_field->fn = hist_field_timestamp;
2569 hist_field->size = sizeof(u64);
2570 hist_field->type = kstrdup("u64", GFP_KERNEL);
2571 if (!hist_field->type)
2572 goto free;
2573 goto out;
2574 }
2575
2576 if (flags & HIST_FIELD_FL_CPU) {
2577 hist_field->fn = hist_field_cpu;
2578 hist_field->size = sizeof(int);
2579 hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2580 if (!hist_field->type)
2581 goto free;
2582 goto out;
2583 }
2584
2585 if (WARN_ON_ONCE(!field))
2586 goto out;
2587
2588 /* Pointers to strings are just pointers and dangerous to dereference */
2589 if (is_string_field(field) &&
2590 (field->filter_type != FILTER_PTR_STRING)) {
2591 flags |= HIST_FIELD_FL_STRING;
2592
2593 hist_field->size = MAX_FILTER_STR_VAL;
2594 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2595 if (!hist_field->type)
2596 goto free;
2597
2598 if (field->filter_type == FILTER_STATIC_STRING) {
2599 hist_field->fn = hist_field_string;
2600 hist_field->size = field->size;
2601 } else if (field->filter_type == FILTER_DYN_STRING)
2602 hist_field->fn = hist_field_dynstring;
2603 else
2604 hist_field->fn = hist_field_pstring;
2605 } else {
2606 hist_field->size = field->size;
2607 hist_field->is_signed = field->is_signed;
2608 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2609 if (!hist_field->type)
2610 goto free;
2611
2612 hist_field->fn = select_value_fn(field->size,
2613 field->is_signed);
2614 if (!hist_field->fn) {
2615 destroy_hist_field(hist_field, 0);
2616 return NULL;
2617 }
2618 }
2619 out:
2620 hist_field->field = field;
2621 hist_field->flags = flags;
2622
2623 if (var_name) {
2624 hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2625 if (!hist_field->var.name)
2626 goto free;
2627 }
2628
2629 return hist_field;
2630 free:
2631 destroy_hist_field(hist_field, 0);
2632 return NULL;
2633 }
2634
destroy_hist_fields(struct hist_trigger_data * hist_data)2635 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2636 {
2637 unsigned int i;
2638
2639 for (i = 0; i < HIST_FIELDS_MAX; i++) {
2640 if (hist_data->fields[i]) {
2641 destroy_hist_field(hist_data->fields[i], 0);
2642 hist_data->fields[i] = NULL;
2643 }
2644 }
2645
2646 for (i = 0; i < hist_data->n_var_refs; i++) {
2647 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
2648 __destroy_hist_field(hist_data->var_refs[i]);
2649 hist_data->var_refs[i] = NULL;
2650 }
2651 }
2652
init_var_ref(struct hist_field * ref_field,struct hist_field * var_field,char * system,char * event_name)2653 static int init_var_ref(struct hist_field *ref_field,
2654 struct hist_field *var_field,
2655 char *system, char *event_name)
2656 {
2657 int err = 0;
2658
2659 ref_field->var.idx = var_field->var.idx;
2660 ref_field->var.hist_data = var_field->hist_data;
2661 ref_field->size = var_field->size;
2662 ref_field->is_signed = var_field->is_signed;
2663 ref_field->flags |= var_field->flags &
2664 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2665
2666 if (system) {
2667 ref_field->system = kstrdup(system, GFP_KERNEL);
2668 if (!ref_field->system)
2669 return -ENOMEM;
2670 }
2671
2672 if (event_name) {
2673 ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2674 if (!ref_field->event_name) {
2675 err = -ENOMEM;
2676 goto free;
2677 }
2678 }
2679
2680 if (var_field->var.name) {
2681 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2682 if (!ref_field->name) {
2683 err = -ENOMEM;
2684 goto free;
2685 }
2686 } else if (var_field->name) {
2687 ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2688 if (!ref_field->name) {
2689 err = -ENOMEM;
2690 goto free;
2691 }
2692 }
2693
2694 ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2695 if (!ref_field->type) {
2696 err = -ENOMEM;
2697 goto free;
2698 }
2699 out:
2700 return err;
2701 free:
2702 kfree(ref_field->system);
2703 ref_field->system = NULL;
2704 kfree(ref_field->event_name);
2705 ref_field->event_name = NULL;
2706 kfree(ref_field->name);
2707 ref_field->name = NULL;
2708
2709 goto out;
2710 }
2711
find_var_ref_idx(struct hist_trigger_data * hist_data,struct hist_field * var_field)2712 static int find_var_ref_idx(struct hist_trigger_data *hist_data,
2713 struct hist_field *var_field)
2714 {
2715 struct hist_field *ref_field;
2716 int i;
2717
2718 for (i = 0; i < hist_data->n_var_refs; i++) {
2719 ref_field = hist_data->var_refs[i];
2720 if (ref_field->var.idx == var_field->var.idx &&
2721 ref_field->var.hist_data == var_field->hist_data)
2722 return i;
2723 }
2724
2725 return -ENOENT;
2726 }
2727
2728 /**
2729 * create_var_ref - Create a variable reference and attach it to trigger
2730 * @hist_data: The trigger that will be referencing the variable
2731 * @var_field: The VAR field to create a reference to
2732 * @system: The optional system string
2733 * @event_name: The optional event_name string
2734 *
2735 * Given a variable hist_field, create a VAR_REF hist_field that
2736 * represents a reference to it.
2737 *
2738 * This function also adds the reference to the trigger that
2739 * now references the variable.
2740 *
2741 * Return: The VAR_REF field if successful, NULL if not
2742 */
create_var_ref(struct hist_trigger_data * hist_data,struct hist_field * var_field,char * system,char * event_name)2743 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
2744 struct hist_field *var_field,
2745 char *system, char *event_name)
2746 {
2747 unsigned long flags = HIST_FIELD_FL_VAR_REF;
2748 struct hist_field *ref_field;
2749 int i;
2750
2751 /* Check if the variable already exists */
2752 for (i = 0; i < hist_data->n_var_refs; i++) {
2753 ref_field = hist_data->var_refs[i];
2754 if (ref_field->var.idx == var_field->var.idx &&
2755 ref_field->var.hist_data == var_field->hist_data) {
2756 get_hist_field(ref_field);
2757 return ref_field;
2758 }
2759 }
2760 /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
2761 if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
2762 return NULL;
2763 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2764 if (ref_field) {
2765 if (init_var_ref(ref_field, var_field, system, event_name)) {
2766 destroy_hist_field(ref_field, 0);
2767 return NULL;
2768 }
2769
2770 hist_data->var_refs[hist_data->n_var_refs] = ref_field;
2771 ref_field->var_ref_idx = hist_data->n_var_refs++;
2772 }
2773
2774 return ref_field;
2775 }
2776
is_var_ref(char * var_name)2777 static bool is_var_ref(char *var_name)
2778 {
2779 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2780 return false;
2781
2782 return true;
2783 }
2784
field_name_from_var(struct hist_trigger_data * hist_data,char * var_name)2785 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2786 char *var_name)
2787 {
2788 char *name, *field;
2789 unsigned int i;
2790
2791 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2792 name = hist_data->attrs->var_defs.name[i];
2793
2794 if (strcmp(var_name, name) == 0) {
2795 field = hist_data->attrs->var_defs.expr[i];
2796 if (contains_operator(field) || is_var_ref(field))
2797 continue;
2798 return field;
2799 }
2800 }
2801
2802 return NULL;
2803 }
2804
local_field_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)2805 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2806 char *system, char *event_name,
2807 char *var_name)
2808 {
2809 struct trace_event_call *call;
2810
2811 if (system && event_name) {
2812 call = hist_data->event_file->event_call;
2813
2814 if (strcmp(system, call->class->system) != 0)
2815 return NULL;
2816
2817 if (strcmp(event_name, trace_event_name(call)) != 0)
2818 return NULL;
2819 }
2820
2821 if (!!system != !!event_name)
2822 return NULL;
2823
2824 if (!is_var_ref(var_name))
2825 return NULL;
2826
2827 var_name++;
2828
2829 return field_name_from_var(hist_data, var_name);
2830 }
2831
parse_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)2832 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2833 char *system, char *event_name,
2834 char *var_name)
2835 {
2836 struct hist_field *var_field = NULL, *ref_field = NULL;
2837 struct trace_array *tr = hist_data->event_file->tr;
2838
2839 if (!is_var_ref(var_name))
2840 return NULL;
2841
2842 var_name++;
2843
2844 var_field = find_event_var(hist_data, system, event_name, var_name);
2845 if (var_field)
2846 ref_field = create_var_ref(hist_data, var_field,
2847 system, event_name);
2848
2849 if (!ref_field)
2850 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2851
2852 return ref_field;
2853 }
2854
2855 static struct ftrace_event_field *
parse_field(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_str,unsigned long * flags)2856 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2857 char *field_str, unsigned long *flags)
2858 {
2859 struct ftrace_event_field *field = NULL;
2860 char *field_name, *modifier, *str;
2861 struct trace_array *tr = file->tr;
2862
2863 modifier = str = kstrdup(field_str, GFP_KERNEL);
2864 if (!modifier)
2865 return ERR_PTR(-ENOMEM);
2866
2867 field_name = strsep(&modifier, ".");
2868 if (modifier) {
2869 if (strcmp(modifier, "hex") == 0)
2870 *flags |= HIST_FIELD_FL_HEX;
2871 else if (strcmp(modifier, "sym") == 0)
2872 *flags |= HIST_FIELD_FL_SYM;
2873 else if (strcmp(modifier, "sym-offset") == 0)
2874 *flags |= HIST_FIELD_FL_SYM_OFFSET;
2875 else if ((strcmp(modifier, "execname") == 0) &&
2876 (strcmp(field_name, "common_pid") == 0))
2877 *flags |= HIST_FIELD_FL_EXECNAME;
2878 else if (strcmp(modifier, "syscall") == 0)
2879 *flags |= HIST_FIELD_FL_SYSCALL;
2880 else if (strcmp(modifier, "log2") == 0)
2881 *flags |= HIST_FIELD_FL_LOG2;
2882 else if (strcmp(modifier, "usecs") == 0)
2883 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2884 else {
2885 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2886 field = ERR_PTR(-EINVAL);
2887 goto out;
2888 }
2889 }
2890
2891 if (strcmp(field_name, "common_timestamp") == 0) {
2892 *flags |= HIST_FIELD_FL_TIMESTAMP;
2893 hist_data->enable_timestamps = true;
2894 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2895 hist_data->attrs->ts_in_usecs = true;
2896 } else if (strcmp(field_name, "common_cpu") == 0)
2897 *flags |= HIST_FIELD_FL_CPU;
2898 else {
2899 field = trace_find_event_field(file->event_call, field_name);
2900 if (!field || !field->size) {
2901 /*
2902 * For backward compatibility, if field_name
2903 * was "cpu", then we treat this the same as
2904 * common_cpu. This also works for "CPU".
2905 */
2906 if (field && field->filter_type == FILTER_CPU) {
2907 *flags |= HIST_FIELD_FL_CPU;
2908 } else {
2909 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
2910 errpos(field_name));
2911 field = ERR_PTR(-EINVAL);
2912 goto out;
2913 }
2914 }
2915 }
2916 out:
2917 kfree(str);
2918
2919 return field;
2920 }
2921
create_alias(struct hist_trigger_data * hist_data,struct hist_field * var_ref,char * var_name)2922 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2923 struct hist_field *var_ref,
2924 char *var_name)
2925 {
2926 struct hist_field *alias = NULL;
2927 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2928
2929 alias = create_hist_field(hist_data, NULL, flags, var_name);
2930 if (!alias)
2931 return NULL;
2932
2933 alias->fn = var_ref->fn;
2934 alias->operands[0] = var_ref;
2935
2936 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2937 destroy_hist_field(alias, 0);
2938 return NULL;
2939 }
2940
2941 alias->var_ref_idx = var_ref->var_ref_idx;
2942
2943 return alias;
2944 }
2945
parse_atom(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long * flags,char * var_name)2946 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2947 struct trace_event_file *file, char *str,
2948 unsigned long *flags, char *var_name)
2949 {
2950 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2951 struct ftrace_event_field *field = NULL;
2952 struct hist_field *hist_field = NULL;
2953 int ret = 0;
2954
2955 s = strchr(str, '.');
2956 if (s) {
2957 s = strchr(++s, '.');
2958 if (s) {
2959 ref_system = strsep(&str, ".");
2960 if (!str) {
2961 ret = -EINVAL;
2962 goto out;
2963 }
2964 ref_event = strsep(&str, ".");
2965 if (!str) {
2966 ret = -EINVAL;
2967 goto out;
2968 }
2969 ref_var = str;
2970 }
2971 }
2972
2973 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2974 if (!s) {
2975 hist_field = parse_var_ref(hist_data, ref_system,
2976 ref_event, ref_var);
2977 if (hist_field) {
2978 if (var_name) {
2979 hist_field = create_alias(hist_data, hist_field, var_name);
2980 if (!hist_field) {
2981 ret = -ENOMEM;
2982 goto out;
2983 }
2984 }
2985 return hist_field;
2986 }
2987 } else
2988 str = s;
2989
2990 field = parse_field(hist_data, file, str, flags);
2991 if (IS_ERR(field)) {
2992 ret = PTR_ERR(field);
2993 goto out;
2994 }
2995
2996 hist_field = create_hist_field(hist_data, field, *flags, var_name);
2997 if (!hist_field) {
2998 ret = -ENOMEM;
2999 goto out;
3000 }
3001
3002 return hist_field;
3003 out:
3004 return ERR_PTR(ret);
3005 }
3006
3007 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
3008 struct trace_event_file *file,
3009 char *str, unsigned long flags,
3010 char *var_name, unsigned int level);
3011
parse_unary(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int level)3012 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
3013 struct trace_event_file *file,
3014 char *str, unsigned long flags,
3015 char *var_name, unsigned int level)
3016 {
3017 struct hist_field *operand1, *expr = NULL;
3018 unsigned long operand_flags;
3019 int ret = 0;
3020 char *s;
3021
3022 /* we support only -(xxx) i.e. explicit parens required */
3023
3024 if (level > 3) {
3025 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3026 ret = -EINVAL;
3027 goto free;
3028 }
3029
3030 str++; /* skip leading '-' */
3031
3032 s = strchr(str, '(');
3033 if (s)
3034 str++;
3035 else {
3036 ret = -EINVAL;
3037 goto free;
3038 }
3039
3040 s = strrchr(str, ')');
3041 if (s)
3042 *s = '\0';
3043 else {
3044 ret = -EINVAL; /* no closing ')' */
3045 goto free;
3046 }
3047
3048 flags |= HIST_FIELD_FL_EXPR;
3049 expr = create_hist_field(hist_data, NULL, flags, var_name);
3050 if (!expr) {
3051 ret = -ENOMEM;
3052 goto free;
3053 }
3054
3055 operand_flags = 0;
3056 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3057 if (IS_ERR(operand1)) {
3058 ret = PTR_ERR(operand1);
3059 goto free;
3060 }
3061 if (operand1->flags & HIST_FIELD_FL_STRING) {
3062 /* String type can not be the operand of unary operator. */
3063 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
3064 destroy_hist_field(operand1, 0);
3065 ret = -EINVAL;
3066 goto free;
3067 }
3068
3069 expr->flags |= operand1->flags &
3070 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3071 expr->fn = hist_field_unary_minus;
3072 expr->operands[0] = operand1;
3073 expr->operator = FIELD_OP_UNARY_MINUS;
3074 expr->name = expr_str(expr, 0);
3075 expr->type = kstrdup(operand1->type, GFP_KERNEL);
3076 if (!expr->type) {
3077 ret = -ENOMEM;
3078 goto free;
3079 }
3080
3081 return expr;
3082 free:
3083 destroy_hist_field(expr, 0);
3084 return ERR_PTR(ret);
3085 }
3086
check_expr_operands(struct trace_array * tr,struct hist_field * operand1,struct hist_field * operand2)3087 static int check_expr_operands(struct trace_array *tr,
3088 struct hist_field *operand1,
3089 struct hist_field *operand2)
3090 {
3091 unsigned long operand1_flags = operand1->flags;
3092 unsigned long operand2_flags = operand2->flags;
3093
3094 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
3095 (operand1_flags & HIST_FIELD_FL_ALIAS)) {
3096 struct hist_field *var;
3097
3098 var = find_var_field(operand1->var.hist_data, operand1->name);
3099 if (!var)
3100 return -EINVAL;
3101 operand1_flags = var->flags;
3102 }
3103
3104 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
3105 (operand2_flags & HIST_FIELD_FL_ALIAS)) {
3106 struct hist_field *var;
3107
3108 var = find_var_field(operand2->var.hist_data, operand2->name);
3109 if (!var)
3110 return -EINVAL;
3111 operand2_flags = var->flags;
3112 }
3113
3114 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
3115 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
3116 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
3117 return -EINVAL;
3118 }
3119
3120 return 0;
3121 }
3122
parse_expr(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int level)3123 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
3124 struct trace_event_file *file,
3125 char *str, unsigned long flags,
3126 char *var_name, unsigned int level)
3127 {
3128 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
3129 unsigned long operand_flags;
3130 int field_op, ret = -EINVAL;
3131 char *sep, *operand1_str;
3132
3133 if (level > 3) {
3134 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
3135 return ERR_PTR(-EINVAL);
3136 }
3137
3138 field_op = contains_operator(str);
3139
3140 if (field_op == FIELD_OP_NONE)
3141 return parse_atom(hist_data, file, str, &flags, var_name);
3142
3143 if (field_op == FIELD_OP_UNARY_MINUS)
3144 return parse_unary(hist_data, file, str, flags, var_name, ++level);
3145
3146 switch (field_op) {
3147 case FIELD_OP_MINUS:
3148 sep = "-";
3149 break;
3150 case FIELD_OP_PLUS:
3151 sep = "+";
3152 break;
3153 default:
3154 goto free;
3155 }
3156
3157 operand1_str = strsep(&str, sep);
3158 if (!operand1_str || !str)
3159 goto free;
3160
3161 operand_flags = 0;
3162 operand1 = parse_atom(hist_data, file, operand1_str,
3163 &operand_flags, NULL);
3164 if (IS_ERR(operand1)) {
3165 ret = PTR_ERR(operand1);
3166 operand1 = NULL;
3167 goto free;
3168 }
3169 if (operand1->flags & HIST_FIELD_FL_STRING) {
3170 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
3171 ret = -EINVAL;
3172 goto free;
3173 }
3174
3175 /* rest of string could be another expression e.g. b+c in a+b+c */
3176 operand_flags = 0;
3177 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
3178 if (IS_ERR(operand2)) {
3179 ret = PTR_ERR(operand2);
3180 operand2 = NULL;
3181 goto free;
3182 }
3183 if (operand2->flags & HIST_FIELD_FL_STRING) {
3184 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
3185 ret = -EINVAL;
3186 goto free;
3187 }
3188
3189 ret = check_expr_operands(file->tr, operand1, operand2);
3190 if (ret)
3191 goto free;
3192
3193 flags |= HIST_FIELD_FL_EXPR;
3194
3195 flags |= operand1->flags &
3196 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
3197
3198 expr = create_hist_field(hist_data, NULL, flags, var_name);
3199 if (!expr) {
3200 ret = -ENOMEM;
3201 goto free;
3202 }
3203
3204 operand1->read_once = true;
3205 operand2->read_once = true;
3206
3207 expr->operands[0] = operand1;
3208 expr->operands[1] = operand2;
3209
3210 /* The operand sizes should be the same, so just pick one */
3211 expr->size = operand1->size;
3212
3213 expr->operator = field_op;
3214 expr->name = expr_str(expr, 0);
3215 expr->type = kstrdup(operand1->type, GFP_KERNEL);
3216 if (!expr->type) {
3217 ret = -ENOMEM;
3218 goto free;
3219 }
3220
3221 switch (field_op) {
3222 case FIELD_OP_MINUS:
3223 expr->fn = hist_field_minus;
3224 break;
3225 case FIELD_OP_PLUS:
3226 expr->fn = hist_field_plus;
3227 break;
3228 default:
3229 ret = -EINVAL;
3230 goto free;
3231 }
3232
3233 return expr;
3234 free:
3235 destroy_hist_field(operand1, 0);
3236 destroy_hist_field(operand2, 0);
3237 destroy_hist_field(expr, 0);
3238
3239 return ERR_PTR(ret);
3240 }
3241
find_trigger_filter(struct hist_trigger_data * hist_data,struct trace_event_file * file)3242 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3243 struct trace_event_file *file)
3244 {
3245 struct event_trigger_data *test;
3246
3247 lockdep_assert_held(&event_mutex);
3248
3249 list_for_each_entry(test, &file->triggers, list) {
3250 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3251 if (test->private_data == hist_data)
3252 return test->filter_str;
3253 }
3254 }
3255
3256 return NULL;
3257 }
3258
3259 static struct event_command trigger_hist_cmd;
3260 static int event_hist_trigger_func(struct event_command *cmd_ops,
3261 struct trace_event_file *file,
3262 char *glob, char *cmd, char *param);
3263
compatible_keys(struct hist_trigger_data * target_hist_data,struct hist_trigger_data * hist_data,unsigned int n_keys)3264 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
3265 struct hist_trigger_data *hist_data,
3266 unsigned int n_keys)
3267 {
3268 struct hist_field *target_hist_field, *hist_field;
3269 unsigned int n, i, j;
3270
3271 if (hist_data->n_fields - hist_data->n_vals != n_keys)
3272 return false;
3273
3274 i = hist_data->n_vals;
3275 j = target_hist_data->n_vals;
3276
3277 for (n = 0; n < n_keys; n++) {
3278 hist_field = hist_data->fields[i + n];
3279 target_hist_field = target_hist_data->fields[j + n];
3280
3281 if (strcmp(hist_field->type, target_hist_field->type) != 0)
3282 return false;
3283 if (hist_field->size != target_hist_field->size)
3284 return false;
3285 if (hist_field->is_signed != target_hist_field->is_signed)
3286 return false;
3287 }
3288
3289 return true;
3290 }
3291
3292 static struct hist_trigger_data *
find_compatible_hist(struct hist_trigger_data * target_hist_data,struct trace_event_file * file)3293 find_compatible_hist(struct hist_trigger_data *target_hist_data,
3294 struct trace_event_file *file)
3295 {
3296 struct hist_trigger_data *hist_data;
3297 struct event_trigger_data *test;
3298 unsigned int n_keys;
3299
3300 lockdep_assert_held(&event_mutex);
3301
3302 n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3303
3304 list_for_each_entry(test, &file->triggers, list) {
3305 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3306 hist_data = test->private_data;
3307
3308 if (compatible_keys(target_hist_data, hist_data, n_keys))
3309 return hist_data;
3310 }
3311 }
3312
3313 return NULL;
3314 }
3315
event_file(struct trace_array * tr,char * system,char * event_name)3316 static struct trace_event_file *event_file(struct trace_array *tr,
3317 char *system, char *event_name)
3318 {
3319 struct trace_event_file *file;
3320
3321 file = __find_event_file(tr, system, event_name);
3322 if (!file)
3323 return ERR_PTR(-EINVAL);
3324
3325 return file;
3326 }
3327
3328 static struct hist_field *
find_synthetic_field_var(struct hist_trigger_data * target_hist_data,char * system,char * event_name,char * field_name)3329 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
3330 char *system, char *event_name, char *field_name)
3331 {
3332 struct hist_field *event_var;
3333 char *synthetic_name;
3334
3335 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3336 if (!synthetic_name)
3337 return ERR_PTR(-ENOMEM);
3338
3339 strcpy(synthetic_name, "synthetic_");
3340 strcat(synthetic_name, field_name);
3341
3342 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
3343
3344 kfree(synthetic_name);
3345
3346 return event_var;
3347 }
3348
3349 /**
3350 * create_field_var_hist - Automatically create a histogram and var for a field
3351 * @target_hist_data: The target hist trigger
3352 * @subsys_name: Optional subsystem name
3353 * @event_name: Optional event name
3354 * @field_name: The name of the field (and the resulting variable)
3355 *
3356 * Hist trigger actions fetch data from variables, not directly from
3357 * events. However, for convenience, users are allowed to directly
3358 * specify an event field in an action, which will be automatically
3359 * converted into a variable on their behalf.
3360
3361 * If a user specifies a field on an event that isn't the event the
3362 * histogram currently being defined (the target event histogram), the
3363 * only way that can be accomplished is if a new hist trigger is
3364 * created and the field variable defined on that.
3365 *
3366 * This function creates a new histogram compatible with the target
3367 * event (meaning a histogram with the same key as the target
3368 * histogram), and creates a variable for the specified field, but
3369 * with 'synthetic_' prepended to the variable name in order to avoid
3370 * collision with normal field variables.
3371 *
3372 * Return: The variable created for the field.
3373 */
3374 static struct hist_field *
create_field_var_hist(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * field_name)3375 create_field_var_hist(struct hist_trigger_data *target_hist_data,
3376 char *subsys_name, char *event_name, char *field_name)
3377 {
3378 struct trace_array *tr = target_hist_data->event_file->tr;
3379 struct hist_field *event_var = ERR_PTR(-EINVAL);
3380 struct hist_trigger_data *hist_data;
3381 unsigned int i, n, first = true;
3382 struct field_var_hist *var_hist;
3383 struct trace_event_file *file;
3384 struct hist_field *key_field;
3385 char *saved_filter;
3386 char *cmd;
3387 int ret;
3388
3389 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3390 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3391 return ERR_PTR(-EINVAL);
3392 }
3393
3394 file = event_file(tr, subsys_name, event_name);
3395
3396 if (IS_ERR(file)) {
3397 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3398 ret = PTR_ERR(file);
3399 return ERR_PTR(ret);
3400 }
3401
3402 /*
3403 * Look for a histogram compatible with target. We'll use the
3404 * found histogram specification to create a new matching
3405 * histogram with our variable on it. target_hist_data is not
3406 * yet a registered histogram so we can't use that.
3407 */
3408 hist_data = find_compatible_hist(target_hist_data, file);
3409 if (!hist_data) {
3410 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3411 return ERR_PTR(-EINVAL);
3412 }
3413
3414 /* See if a synthetic field variable has already been created */
3415 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3416 event_name, field_name);
3417 if (!IS_ERR_OR_NULL(event_var))
3418 return event_var;
3419
3420 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
3421 if (!var_hist)
3422 return ERR_PTR(-ENOMEM);
3423
3424 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
3425 if (!cmd) {
3426 kfree(var_hist);
3427 return ERR_PTR(-ENOMEM);
3428 }
3429
3430 /* Use the same keys as the compatible histogram */
3431 strcat(cmd, "keys=");
3432
3433 for_each_hist_key_field(i, hist_data) {
3434 key_field = hist_data->fields[i];
3435 if (!first)
3436 strcat(cmd, ",");
3437 strcat(cmd, key_field->field->name);
3438 first = false;
3439 }
3440
3441 /* Create the synthetic field variable specification */
3442 strcat(cmd, ":synthetic_");
3443 strcat(cmd, field_name);
3444 strcat(cmd, "=");
3445 strcat(cmd, field_name);
3446
3447 /* Use the same filter as the compatible histogram */
3448 saved_filter = find_trigger_filter(hist_data, file);
3449 if (saved_filter) {
3450 strcat(cmd, " if ");
3451 strcat(cmd, saved_filter);
3452 }
3453
3454 var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
3455 if (!var_hist->cmd) {
3456 kfree(cmd);
3457 kfree(var_hist);
3458 return ERR_PTR(-ENOMEM);
3459 }
3460
3461 /* Save the compatible histogram information */
3462 var_hist->hist_data = hist_data;
3463
3464 /* Create the new histogram with our variable */
3465 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
3466 "", "hist", cmd);
3467 if (ret) {
3468 kfree(cmd);
3469 kfree(var_hist->cmd);
3470 kfree(var_hist);
3471 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3472 return ERR_PTR(ret);
3473 }
3474
3475 kfree(cmd);
3476
3477 /* If we can't find the variable, something went wrong */
3478 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
3479 event_name, field_name);
3480 if (IS_ERR_OR_NULL(event_var)) {
3481 kfree(var_hist->cmd);
3482 kfree(var_hist);
3483 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3484 return ERR_PTR(-EINVAL);
3485 }
3486
3487 n = target_hist_data->n_field_var_hists;
3488 target_hist_data->field_var_hists[n] = var_hist;
3489 target_hist_data->n_field_var_hists++;
3490
3491 return event_var;
3492 }
3493
3494 static struct hist_field *
find_target_event_var(struct hist_trigger_data * hist_data,char * subsys_name,char * event_name,char * var_name)3495 find_target_event_var(struct hist_trigger_data *hist_data,
3496 char *subsys_name, char *event_name, char *var_name)
3497 {
3498 struct trace_event_file *file = hist_data->event_file;
3499 struct hist_field *hist_field = NULL;
3500
3501 if (subsys_name) {
3502 struct trace_event_call *call;
3503
3504 if (!event_name)
3505 return NULL;
3506
3507 call = file->event_call;
3508
3509 if (strcmp(subsys_name, call->class->system) != 0)
3510 return NULL;
3511
3512 if (strcmp(event_name, trace_event_name(call)) != 0)
3513 return NULL;
3514 }
3515
3516 hist_field = find_var_field(hist_data, var_name);
3517
3518 return hist_field;
3519 }
3520
__update_field_vars(struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * rec,struct field_var ** field_vars,unsigned int n_field_vars,unsigned int field_var_str_start)3521 static inline void __update_field_vars(struct tracing_map_elt *elt,
3522 struct ring_buffer_event *rbe,
3523 void *rec,
3524 struct field_var **field_vars,
3525 unsigned int n_field_vars,
3526 unsigned int field_var_str_start)
3527 {
3528 struct hist_elt_data *elt_data = elt->private_data;
3529 unsigned int i, j, var_idx;
3530 u64 var_val;
3531
3532 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3533 struct field_var *field_var = field_vars[i];
3534 struct hist_field *var = field_var->var;
3535 struct hist_field *val = field_var->val;
3536
3537 var_val = val->fn(val, elt, rbe, rec);
3538 var_idx = var->var.idx;
3539
3540 if (val->flags & HIST_FIELD_FL_STRING) {
3541 char *str = elt_data->field_var_str[j++];
3542 char *val_str = (char *)(uintptr_t)var_val;
3543
3544 strscpy(str, val_str, val->size);
3545 var_val = (u64)(uintptr_t)str;
3546 }
3547 tracing_map_set_var(elt, var_idx, var_val);
3548 }
3549 }
3550
update_field_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * rec)3551 static void update_field_vars(struct hist_trigger_data *hist_data,
3552 struct tracing_map_elt *elt,
3553 struct ring_buffer_event *rbe,
3554 void *rec)
3555 {
3556 __update_field_vars(elt, rbe, rec, hist_data->field_vars,
3557 hist_data->n_field_vars, 0);
3558 }
3559
save_track_data_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3560 static void save_track_data_vars(struct hist_trigger_data *hist_data,
3561 struct tracing_map_elt *elt, void *rec,
3562 struct ring_buffer_event *rbe, void *key,
3563 struct action_data *data, u64 *var_ref_vals)
3564 {
3565 __update_field_vars(elt, rbe, rec, hist_data->save_vars,
3566 hist_data->n_save_vars, hist_data->n_field_var_str);
3567 }
3568
create_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * name,int size,const char * type)3569 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3570 struct trace_event_file *file,
3571 char *name, int size, const char *type)
3572 {
3573 struct hist_field *var;
3574 int idx;
3575
3576 if (find_var(hist_data, file, name) && !hist_data->remove) {
3577 var = ERR_PTR(-EINVAL);
3578 goto out;
3579 }
3580
3581 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3582 if (!var) {
3583 var = ERR_PTR(-ENOMEM);
3584 goto out;
3585 }
3586
3587 idx = tracing_map_add_var(hist_data->map);
3588 if (idx < 0) {
3589 kfree(var);
3590 var = ERR_PTR(-EINVAL);
3591 goto out;
3592 }
3593
3594 var->ref = 1;
3595 var->flags = HIST_FIELD_FL_VAR;
3596 var->var.idx = idx;
3597 var->var.hist_data = var->hist_data = hist_data;
3598 var->size = size;
3599 var->var.name = kstrdup(name, GFP_KERNEL);
3600 var->type = kstrdup(type, GFP_KERNEL);
3601 if (!var->var.name || !var->type) {
3602 kfree(var->var.name);
3603 kfree(var->type);
3604 kfree(var);
3605 var = ERR_PTR(-ENOMEM);
3606 }
3607 out:
3608 return var;
3609 }
3610
create_field_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_name)3611 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3612 struct trace_event_file *file,
3613 char *field_name)
3614 {
3615 struct hist_field *val = NULL, *var = NULL;
3616 unsigned long flags = HIST_FIELD_FL_VAR;
3617 struct trace_array *tr = file->tr;
3618 struct field_var *field_var;
3619 int ret = 0;
3620
3621 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3622 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3623 ret = -EINVAL;
3624 goto err;
3625 }
3626
3627 val = parse_atom(hist_data, file, field_name, &flags, NULL);
3628 if (IS_ERR(val)) {
3629 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3630 ret = PTR_ERR(val);
3631 goto err;
3632 }
3633
3634 var = create_var(hist_data, file, field_name, val->size, val->type);
3635 if (IS_ERR(var)) {
3636 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3637 kfree(val);
3638 ret = PTR_ERR(var);
3639 goto err;
3640 }
3641
3642 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3643 if (!field_var) {
3644 kfree(val);
3645 kfree(var);
3646 ret = -ENOMEM;
3647 goto err;
3648 }
3649
3650 field_var->var = var;
3651 field_var->val = val;
3652 out:
3653 return field_var;
3654 err:
3655 field_var = ERR_PTR(ret);
3656 goto out;
3657 }
3658
3659 /**
3660 * create_target_field_var - Automatically create a variable for a field
3661 * @target_hist_data: The target hist trigger
3662 * @subsys_name: Optional subsystem name
3663 * @event_name: Optional event name
3664 * @var_name: The name of the field (and the resulting variable)
3665 *
3666 * Hist trigger actions fetch data from variables, not directly from
3667 * events. However, for convenience, users are allowed to directly
3668 * specify an event field in an action, which will be automatically
3669 * converted into a variable on their behalf.
3670
3671 * This function creates a field variable with the name var_name on
3672 * the hist trigger currently being defined on the target event. If
3673 * subsys_name and event_name are specified, this function simply
3674 * verifies that they do in fact match the target event subsystem and
3675 * event name.
3676 *
3677 * Return: The variable created for the field.
3678 */
3679 static struct field_var *
create_target_field_var(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * var_name)3680 create_target_field_var(struct hist_trigger_data *target_hist_data,
3681 char *subsys_name, char *event_name, char *var_name)
3682 {
3683 struct trace_event_file *file = target_hist_data->event_file;
3684
3685 if (subsys_name) {
3686 struct trace_event_call *call;
3687
3688 if (!event_name)
3689 return NULL;
3690
3691 call = file->event_call;
3692
3693 if (strcmp(subsys_name, call->class->system) != 0)
3694 return NULL;
3695
3696 if (strcmp(event_name, trace_event_name(call)) != 0)
3697 return NULL;
3698 }
3699
3700 return create_field_var(target_hist_data, file, var_name);
3701 }
3702
check_track_val_max(u64 track_val,u64 var_val)3703 static bool check_track_val_max(u64 track_val, u64 var_val)
3704 {
3705 if (var_val <= track_val)
3706 return false;
3707
3708 return true;
3709 }
3710
check_track_val_changed(u64 track_val,u64 var_val)3711 static bool check_track_val_changed(u64 track_val, u64 var_val)
3712 {
3713 if (var_val == track_val)
3714 return false;
3715
3716 return true;
3717 }
3718
get_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)3719 static u64 get_track_val(struct hist_trigger_data *hist_data,
3720 struct tracing_map_elt *elt,
3721 struct action_data *data)
3722 {
3723 unsigned int track_var_idx = data->track_data.track_var->var.idx;
3724 u64 track_val;
3725
3726 track_val = tracing_map_read_var(elt, track_var_idx);
3727
3728 return track_val;
3729 }
3730
save_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data,u64 var_val)3731 static void save_track_val(struct hist_trigger_data *hist_data,
3732 struct tracing_map_elt *elt,
3733 struct action_data *data, u64 var_val)
3734 {
3735 unsigned int track_var_idx = data->track_data.track_var->var.idx;
3736
3737 tracing_map_set_var(elt, track_var_idx, var_val);
3738 }
3739
save_track_data(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3740 static void save_track_data(struct hist_trigger_data *hist_data,
3741 struct tracing_map_elt *elt, void *rec,
3742 struct ring_buffer_event *rbe, void *key,
3743 struct action_data *data, u64 *var_ref_vals)
3744 {
3745 if (data->track_data.save_data)
3746 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3747 }
3748
check_track_val(struct tracing_map_elt * elt,struct action_data * data,u64 var_val)3749 static bool check_track_val(struct tracing_map_elt *elt,
3750 struct action_data *data,
3751 u64 var_val)
3752 {
3753 struct hist_trigger_data *hist_data;
3754 u64 track_val;
3755
3756 hist_data = data->track_data.track_var->hist_data;
3757 track_val = get_track_val(hist_data, elt, data);
3758
3759 return data->track_data.check_val(track_val, var_val);
3760 }
3761
3762 #ifdef CONFIG_TRACER_SNAPSHOT
cond_snapshot_update(struct trace_array * tr,void * cond_data)3763 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3764 {
3765 /* called with tr->max_lock held */
3766 struct track_data *track_data = tr->cond_snapshot->cond_data;
3767 struct hist_elt_data *elt_data, *track_elt_data;
3768 struct snapshot_context *context = cond_data;
3769 struct action_data *action;
3770 u64 track_val;
3771
3772 if (!track_data)
3773 return false;
3774
3775 action = track_data->action_data;
3776
3777 track_val = get_track_val(track_data->hist_data, context->elt,
3778 track_data->action_data);
3779
3780 if (!action->track_data.check_val(track_data->track_val, track_val))
3781 return false;
3782
3783 track_data->track_val = track_val;
3784 memcpy(track_data->key, context->key, track_data->key_len);
3785
3786 elt_data = context->elt->private_data;
3787 track_elt_data = track_data->elt.private_data;
3788 if (elt_data->comm)
3789 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
3790
3791 track_data->updated = true;
3792
3793 return true;
3794 }
3795
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3796 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3797 struct tracing_map_elt *elt, void *rec,
3798 struct ring_buffer_event *rbe, void *key,
3799 struct action_data *data,
3800 u64 *var_ref_vals)
3801 {
3802 struct trace_event_file *file = hist_data->event_file;
3803 struct snapshot_context context;
3804
3805 context.elt = elt;
3806 context.key = key;
3807
3808 tracing_snapshot_cond(file->tr, &context);
3809 }
3810
3811 static void hist_trigger_print_key(struct seq_file *m,
3812 struct hist_trigger_data *hist_data,
3813 void *key,
3814 struct tracing_map_elt *elt);
3815
snapshot_action(struct hist_trigger_data * hist_data)3816 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
3817 {
3818 unsigned int i;
3819
3820 if (!hist_data->n_actions)
3821 return NULL;
3822
3823 for (i = 0; i < hist_data->n_actions; i++) {
3824 struct action_data *data = hist_data->actions[i];
3825
3826 if (data->action == ACTION_SNAPSHOT)
3827 return data;
3828 }
3829
3830 return NULL;
3831 }
3832
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)3833 static void track_data_snapshot_print(struct seq_file *m,
3834 struct hist_trigger_data *hist_data)
3835 {
3836 struct trace_event_file *file = hist_data->event_file;
3837 struct track_data *track_data;
3838 struct action_data *action;
3839
3840 track_data = tracing_cond_snapshot_data(file->tr);
3841 if (!track_data)
3842 return;
3843
3844 if (!track_data->updated)
3845 return;
3846
3847 action = snapshot_action(hist_data);
3848 if (!action)
3849 return;
3850
3851 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3852 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
3853 action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
3854 action->track_data.var_str, track_data->track_val);
3855
3856 seq_puts(m, "\ttriggered by event with key: ");
3857 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
3858 seq_putc(m, '\n');
3859 }
3860 #else
cond_snapshot_update(struct trace_array * tr,void * cond_data)3861 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
3862 {
3863 return false;
3864 }
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3865 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
3866 struct tracing_map_elt *elt, void *rec,
3867 struct ring_buffer_event *rbe, void *key,
3868 struct action_data *data,
3869 u64 *var_ref_vals) {}
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)3870 static void track_data_snapshot_print(struct seq_file *m,
3871 struct hist_trigger_data *hist_data) {}
3872 #endif /* CONFIG_TRACER_SNAPSHOT */
3873
track_data_print(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)3874 static void track_data_print(struct seq_file *m,
3875 struct hist_trigger_data *hist_data,
3876 struct tracing_map_elt *elt,
3877 struct action_data *data)
3878 {
3879 u64 track_val = get_track_val(hist_data, elt, data);
3880 unsigned int i, save_var_idx;
3881
3882 if (data->handler == HANDLER_ONMAX)
3883 seq_printf(m, "\n\tmax: %10llu", track_val);
3884 else if (data->handler == HANDLER_ONCHANGE)
3885 seq_printf(m, "\n\tchanged: %10llu", track_val);
3886
3887 if (data->action == ACTION_SNAPSHOT)
3888 return;
3889
3890 for (i = 0; i < hist_data->n_save_vars; i++) {
3891 struct hist_field *save_val = hist_data->save_vars[i]->val;
3892 struct hist_field *save_var = hist_data->save_vars[i]->var;
3893 u64 val;
3894
3895 save_var_idx = save_var->var.idx;
3896
3897 val = tracing_map_read_var(elt, save_var_idx);
3898
3899 if (save_val->flags & HIST_FIELD_FL_STRING) {
3900 seq_printf(m, " %s: %-32s", save_var->var.name,
3901 (char *)(uintptr_t)(val));
3902 } else
3903 seq_printf(m, " %s: %10llu", save_var->var.name, val);
3904 }
3905 }
3906
ontrack_action(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3907 static void ontrack_action(struct hist_trigger_data *hist_data,
3908 struct tracing_map_elt *elt, void *rec,
3909 struct ring_buffer_event *rbe, void *key,
3910 struct action_data *data, u64 *var_ref_vals)
3911 {
3912 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3913
3914 if (check_track_val(elt, data, var_val)) {
3915 save_track_val(hist_data, elt, data, var_val);
3916 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3917 }
3918 }
3919
action_data_destroy(struct action_data * data)3920 static void action_data_destroy(struct action_data *data)
3921 {
3922 unsigned int i;
3923
3924 lockdep_assert_held(&event_mutex);
3925
3926 kfree(data->action_name);
3927
3928 for (i = 0; i < data->n_params; i++)
3929 kfree(data->params[i]);
3930
3931 if (data->synth_event)
3932 data->synth_event->ref--;
3933
3934 kfree(data->synth_event_name);
3935
3936 kfree(data);
3937 }
3938
track_data_destroy(struct hist_trigger_data * hist_data,struct action_data * data)3939 static void track_data_destroy(struct hist_trigger_data *hist_data,
3940 struct action_data *data)
3941 {
3942 struct trace_event_file *file = hist_data->event_file;
3943
3944 destroy_hist_field(data->track_data.track_var, 0);
3945
3946 if (data->action == ACTION_SNAPSHOT) {
3947 struct track_data *track_data;
3948
3949 track_data = tracing_cond_snapshot_data(file->tr);
3950 if (track_data && track_data->hist_data == hist_data) {
3951 tracing_snapshot_cond_disable(file->tr);
3952 track_data_free(track_data);
3953 }
3954 }
3955
3956 kfree(data->track_data.var_str);
3957
3958 action_data_destroy(data);
3959 }
3960
3961 static int action_create(struct hist_trigger_data *hist_data,
3962 struct action_data *data);
3963
track_data_create(struct hist_trigger_data * hist_data,struct action_data * data)3964 static int track_data_create(struct hist_trigger_data *hist_data,
3965 struct action_data *data)
3966 {
3967 struct hist_field *var_field, *ref_field, *track_var = NULL;
3968 struct trace_event_file *file = hist_data->event_file;
3969 struct trace_array *tr = file->tr;
3970 char *track_data_var_str;
3971 int ret = 0;
3972
3973 track_data_var_str = data->track_data.var_str;
3974 if (track_data_var_str[0] != '$') {
3975 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3976 return -EINVAL;
3977 }
3978 track_data_var_str++;
3979
3980 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3981 if (!var_field) {
3982 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3983 return -EINVAL;
3984 }
3985
3986 ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3987 if (!ref_field)
3988 return -ENOMEM;
3989
3990 data->track_data.var_ref = ref_field;
3991
3992 if (data->handler == HANDLER_ONMAX)
3993 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3994 if (IS_ERR(track_var)) {
3995 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3996 ret = PTR_ERR(track_var);
3997 goto out;
3998 }
3999
4000 if (data->handler == HANDLER_ONCHANGE)
4001 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
4002 if (IS_ERR(track_var)) {
4003 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
4004 ret = PTR_ERR(track_var);
4005 goto out;
4006 }
4007 data->track_data.track_var = track_var;
4008
4009 ret = action_create(hist_data, data);
4010 out:
4011 return ret;
4012 }
4013
parse_action_params(struct trace_array * tr,char * params,struct action_data * data)4014 static int parse_action_params(struct trace_array *tr, char *params,
4015 struct action_data *data)
4016 {
4017 char *param, *saved_param;
4018 bool first_param = true;
4019 int ret = 0;
4020
4021 while (params) {
4022 if (data->n_params >= SYNTH_FIELDS_MAX) {
4023 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
4024 ret = -EINVAL;
4025 goto out;
4026 }
4027
4028 param = strsep(¶ms, ",");
4029 if (!param) {
4030 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
4031 ret = -EINVAL;
4032 goto out;
4033 }
4034
4035 param = strstrip(param);
4036 if (strlen(param) < 2) {
4037 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
4038 ret = -EINVAL;
4039 goto out;
4040 }
4041
4042 saved_param = kstrdup(param, GFP_KERNEL);
4043 if (!saved_param) {
4044 ret = -ENOMEM;
4045 goto out;
4046 }
4047
4048 if (first_param && data->use_trace_keyword) {
4049 data->synth_event_name = saved_param;
4050 first_param = false;
4051 continue;
4052 }
4053 first_param = false;
4054
4055 data->params[data->n_params++] = saved_param;
4056 }
4057 out:
4058 return ret;
4059 }
4060
action_parse(struct trace_array * tr,char * str,struct action_data * data,enum handler_id handler)4061 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
4062 enum handler_id handler)
4063 {
4064 char *action_name;
4065 int ret = 0;
4066
4067 strsep(&str, ".");
4068 if (!str) {
4069 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
4070 ret = -EINVAL;
4071 goto out;
4072 }
4073
4074 action_name = strsep(&str, "(");
4075 if (!action_name || !str) {
4076 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
4077 ret = -EINVAL;
4078 goto out;
4079 }
4080
4081 if (str_has_prefix(action_name, "save")) {
4082 char *params = strsep(&str, ")");
4083
4084 if (!params) {
4085 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
4086 ret = -EINVAL;
4087 goto out;
4088 }
4089
4090 ret = parse_action_params(tr, params, data);
4091 if (ret)
4092 goto out;
4093
4094 if (handler == HANDLER_ONMAX)
4095 data->track_data.check_val = check_track_val_max;
4096 else if (handler == HANDLER_ONCHANGE)
4097 data->track_data.check_val = check_track_val_changed;
4098 else {
4099 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4100 ret = -EINVAL;
4101 goto out;
4102 }
4103
4104 data->track_data.save_data = save_track_data_vars;
4105 data->fn = ontrack_action;
4106 data->action = ACTION_SAVE;
4107 } else if (str_has_prefix(action_name, "snapshot")) {
4108 char *params = strsep(&str, ")");
4109
4110 if (!str) {
4111 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
4112 ret = -EINVAL;
4113 goto out;
4114 }
4115
4116 if (handler == HANDLER_ONMAX)
4117 data->track_data.check_val = check_track_val_max;
4118 else if (handler == HANDLER_ONCHANGE)
4119 data->track_data.check_val = check_track_val_changed;
4120 else {
4121 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
4122 ret = -EINVAL;
4123 goto out;
4124 }
4125
4126 data->track_data.save_data = save_track_data_snapshot;
4127 data->fn = ontrack_action;
4128 data->action = ACTION_SNAPSHOT;
4129 } else {
4130 char *params = strsep(&str, ")");
4131
4132 if (str_has_prefix(action_name, "trace"))
4133 data->use_trace_keyword = true;
4134
4135 if (params) {
4136 ret = parse_action_params(tr, params, data);
4137 if (ret)
4138 goto out;
4139 }
4140
4141 if (handler == HANDLER_ONMAX)
4142 data->track_data.check_val = check_track_val_max;
4143 else if (handler == HANDLER_ONCHANGE)
4144 data->track_data.check_val = check_track_val_changed;
4145
4146 if (handler != HANDLER_ONMATCH) {
4147 data->track_data.save_data = action_trace;
4148 data->fn = ontrack_action;
4149 } else
4150 data->fn = action_trace;
4151
4152 data->action = ACTION_TRACE;
4153 }
4154
4155 data->action_name = kstrdup(action_name, GFP_KERNEL);
4156 if (!data->action_name) {
4157 ret = -ENOMEM;
4158 goto out;
4159 }
4160
4161 data->handler = handler;
4162 out:
4163 return ret;
4164 }
4165
track_data_parse(struct hist_trigger_data * hist_data,char * str,enum handler_id handler)4166 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
4167 char *str, enum handler_id handler)
4168 {
4169 struct action_data *data;
4170 int ret = -EINVAL;
4171 char *var_str;
4172
4173 data = kzalloc(sizeof(*data), GFP_KERNEL);
4174 if (!data)
4175 return ERR_PTR(-ENOMEM);
4176
4177 var_str = strsep(&str, ")");
4178 if (!var_str || !str) {
4179 ret = -EINVAL;
4180 goto free;
4181 }
4182
4183 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
4184 if (!data->track_data.var_str) {
4185 ret = -ENOMEM;
4186 goto free;
4187 }
4188
4189 ret = action_parse(hist_data->event_file->tr, str, data, handler);
4190 if (ret)
4191 goto free;
4192 out:
4193 return data;
4194 free:
4195 track_data_destroy(hist_data, data);
4196 data = ERR_PTR(ret);
4197 goto out;
4198 }
4199
onmatch_destroy(struct action_data * data)4200 static void onmatch_destroy(struct action_data *data)
4201 {
4202 kfree(data->match_data.event);
4203 kfree(data->match_data.event_system);
4204
4205 action_data_destroy(data);
4206 }
4207
destroy_field_var(struct field_var * field_var)4208 static void destroy_field_var(struct field_var *field_var)
4209 {
4210 if (!field_var)
4211 return;
4212
4213 destroy_hist_field(field_var->var, 0);
4214 destroy_hist_field(field_var->val, 0);
4215
4216 kfree(field_var);
4217 }
4218
destroy_field_vars(struct hist_trigger_data * hist_data)4219 static void destroy_field_vars(struct hist_trigger_data *hist_data)
4220 {
4221 unsigned int i;
4222
4223 for (i = 0; i < hist_data->n_field_vars; i++)
4224 destroy_field_var(hist_data->field_vars[i]);
4225
4226 for (i = 0; i < hist_data->n_save_vars; i++)
4227 destroy_field_var(hist_data->save_vars[i]);
4228 }
4229
save_field_var(struct hist_trigger_data * hist_data,struct field_var * field_var)4230 static void save_field_var(struct hist_trigger_data *hist_data,
4231 struct field_var *field_var)
4232 {
4233 hist_data->field_vars[hist_data->n_field_vars++] = field_var;
4234
4235 if (field_var->val->flags & HIST_FIELD_FL_STRING)
4236 hist_data->n_field_var_str++;
4237 }
4238
4239
check_synth_field(struct synth_event * event,struct hist_field * hist_field,unsigned int field_pos)4240 static int check_synth_field(struct synth_event *event,
4241 struct hist_field *hist_field,
4242 unsigned int field_pos)
4243 {
4244 struct synth_field *field;
4245
4246 if (field_pos >= event->n_fields)
4247 return -EINVAL;
4248
4249 field = event->fields[field_pos];
4250
4251 if (strcmp(field->type, hist_field->type) != 0)
4252 return -EINVAL;
4253
4254 return 0;
4255 }
4256
4257 static struct hist_field *
trace_action_find_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)4258 trace_action_find_var(struct hist_trigger_data *hist_data,
4259 struct action_data *data,
4260 char *system, char *event, char *var)
4261 {
4262 struct trace_array *tr = hist_data->event_file->tr;
4263 struct hist_field *hist_field;
4264
4265 var++; /* skip '$' */
4266
4267 hist_field = find_target_event_var(hist_data, system, event, var);
4268 if (!hist_field) {
4269 if (!system && data->handler == HANDLER_ONMATCH) {
4270 system = data->match_data.event_system;
4271 event = data->match_data.event;
4272 }
4273
4274 hist_field = find_event_var(hist_data, system, event, var);
4275 }
4276
4277 if (!hist_field)
4278 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4279
4280 return hist_field;
4281 }
4282
4283 static struct hist_field *
trace_action_create_field_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)4284 trace_action_create_field_var(struct hist_trigger_data *hist_data,
4285 struct action_data *data, char *system,
4286 char *event, char *var)
4287 {
4288 struct hist_field *hist_field = NULL;
4289 struct field_var *field_var;
4290
4291 /*
4292 * First try to create a field var on the target event (the
4293 * currently being defined). This will create a variable for
4294 * unqualified fields on the target event, or if qualified,
4295 * target fields that have qualified names matching the target.
4296 */
4297 field_var = create_target_field_var(hist_data, system, event, var);
4298
4299 if (field_var && !IS_ERR(field_var)) {
4300 save_field_var(hist_data, field_var);
4301 hist_field = field_var->var;
4302 } else {
4303 field_var = NULL;
4304 /*
4305 * If no explicit system.event is specfied, default to
4306 * looking for fields on the onmatch(system.event.xxx)
4307 * event.
4308 */
4309 if (!system && data->handler == HANDLER_ONMATCH) {
4310 system = data->match_data.event_system;
4311 event = data->match_data.event;
4312 }
4313
4314 if (!event)
4315 goto free;
4316 /*
4317 * At this point, we're looking at a field on another
4318 * event. Because we can't modify a hist trigger on
4319 * another event to add a variable for a field, we need
4320 * to create a new trigger on that event and create the
4321 * variable at the same time.
4322 */
4323 hist_field = create_field_var_hist(hist_data, system, event, var);
4324 if (IS_ERR(hist_field))
4325 goto free;
4326 }
4327 out:
4328 return hist_field;
4329 free:
4330 destroy_field_var(field_var);
4331 hist_field = NULL;
4332 goto out;
4333 }
4334
trace_action_create(struct hist_trigger_data * hist_data,struct action_data * data)4335 static int trace_action_create(struct hist_trigger_data *hist_data,
4336 struct action_data *data)
4337 {
4338 struct trace_array *tr = hist_data->event_file->tr;
4339 char *event_name, *param, *system = NULL;
4340 struct hist_field *hist_field, *var_ref;
4341 unsigned int i;
4342 unsigned int field_pos = 0;
4343 struct synth_event *event;
4344 char *synth_event_name;
4345 int var_ref_idx, ret = 0;
4346
4347 lockdep_assert_held(&event_mutex);
4348
4349 /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
4350 if (data->n_params > SYNTH_FIELDS_MAX)
4351 return -EINVAL;
4352
4353 if (data->use_trace_keyword)
4354 synth_event_name = data->synth_event_name;
4355 else
4356 synth_event_name = data->action_name;
4357
4358 event = find_synth_event(synth_event_name);
4359 if (!event) {
4360 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4361 return -EINVAL;
4362 }
4363
4364 event->ref++;
4365
4366 for (i = 0; i < data->n_params; i++) {
4367 char *p;
4368
4369 p = param = kstrdup(data->params[i], GFP_KERNEL);
4370 if (!param) {
4371 ret = -ENOMEM;
4372 goto err;
4373 }
4374
4375 system = strsep(¶m, ".");
4376 if (!param) {
4377 param = (char *)system;
4378 system = event_name = NULL;
4379 } else {
4380 event_name = strsep(¶m, ".");
4381 if (!param) {
4382 kfree(p);
4383 ret = -EINVAL;
4384 goto err;
4385 }
4386 }
4387
4388 if (param[0] == '$')
4389 hist_field = trace_action_find_var(hist_data, data,
4390 system, event_name,
4391 param);
4392 else
4393 hist_field = trace_action_create_field_var(hist_data,
4394 data,
4395 system,
4396 event_name,
4397 param);
4398
4399 if (!hist_field) {
4400 kfree(p);
4401 ret = -EINVAL;
4402 goto err;
4403 }
4404
4405 if (check_synth_field(event, hist_field, field_pos) == 0) {
4406 var_ref = create_var_ref(hist_data, hist_field,
4407 system, event_name);
4408 if (!var_ref) {
4409 kfree(p);
4410 ret = -ENOMEM;
4411 goto err;
4412 }
4413
4414 var_ref_idx = find_var_ref_idx(hist_data, var_ref);
4415 if (WARN_ON(var_ref_idx < 0)) {
4416 kfree(p);
4417 ret = var_ref_idx;
4418 goto err;
4419 }
4420
4421 data->var_ref_idx[i] = var_ref_idx;
4422
4423 field_pos++;
4424 kfree(p);
4425 continue;
4426 }
4427
4428 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4429 kfree(p);
4430 ret = -EINVAL;
4431 goto err;
4432 }
4433
4434 if (field_pos != event->n_fields) {
4435 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4436 ret = -EINVAL;
4437 goto err;
4438 }
4439
4440 data->synth_event = event;
4441 out:
4442 return ret;
4443 err:
4444 event->ref--;
4445
4446 goto out;
4447 }
4448
action_create(struct hist_trigger_data * hist_data,struct action_data * data)4449 static int action_create(struct hist_trigger_data *hist_data,
4450 struct action_data *data)
4451 {
4452 struct trace_event_file *file = hist_data->event_file;
4453 struct trace_array *tr = file->tr;
4454 struct track_data *track_data;
4455 struct field_var *field_var;
4456 unsigned int i;
4457 char *param;
4458 int ret = 0;
4459
4460 if (data->action == ACTION_TRACE)
4461 return trace_action_create(hist_data, data);
4462
4463 if (data->action == ACTION_SNAPSHOT) {
4464 track_data = track_data_alloc(hist_data->key_size, data, hist_data);
4465 if (IS_ERR(track_data)) {
4466 ret = PTR_ERR(track_data);
4467 goto out;
4468 }
4469
4470 ret = tracing_snapshot_cond_enable(file->tr, track_data,
4471 cond_snapshot_update);
4472 if (ret)
4473 track_data_free(track_data);
4474
4475 goto out;
4476 }
4477
4478 if (data->action == ACTION_SAVE) {
4479 if (hist_data->n_save_vars) {
4480 ret = -EEXIST;
4481 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4482 goto out;
4483 }
4484
4485 for (i = 0; i < data->n_params; i++) {
4486 param = kstrdup(data->params[i], GFP_KERNEL);
4487 if (!param) {
4488 ret = -ENOMEM;
4489 goto out;
4490 }
4491
4492 field_var = create_target_field_var(hist_data, NULL, NULL, param);
4493 if (IS_ERR(field_var)) {
4494 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4495 errpos(param));
4496 ret = PTR_ERR(field_var);
4497 kfree(param);
4498 goto out;
4499 }
4500
4501 hist_data->save_vars[hist_data->n_save_vars++] = field_var;
4502 if (field_var->val->flags & HIST_FIELD_FL_STRING)
4503 hist_data->n_save_var_str++;
4504 kfree(param);
4505 }
4506 }
4507 out:
4508 return ret;
4509 }
4510
onmatch_create(struct hist_trigger_data * hist_data,struct action_data * data)4511 static int onmatch_create(struct hist_trigger_data *hist_data,
4512 struct action_data *data)
4513 {
4514 return action_create(hist_data, data);
4515 }
4516
onmatch_parse(struct trace_array * tr,char * str)4517 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4518 {
4519 char *match_event, *match_event_system;
4520 struct action_data *data;
4521 int ret = -EINVAL;
4522
4523 data = kzalloc(sizeof(*data), GFP_KERNEL);
4524 if (!data)
4525 return ERR_PTR(-ENOMEM);
4526
4527 match_event = strsep(&str, ")");
4528 if (!match_event || !str) {
4529 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4530 goto free;
4531 }
4532
4533 match_event_system = strsep(&match_event, ".");
4534 if (!match_event) {
4535 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4536 goto free;
4537 }
4538
4539 if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4540 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4541 goto free;
4542 }
4543
4544 data->match_data.event = kstrdup(match_event, GFP_KERNEL);
4545 if (!data->match_data.event) {
4546 ret = -ENOMEM;
4547 goto free;
4548 }
4549
4550 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
4551 if (!data->match_data.event_system) {
4552 ret = -ENOMEM;
4553 goto free;
4554 }
4555
4556 ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4557 if (ret)
4558 goto free;
4559 out:
4560 return data;
4561 free:
4562 onmatch_destroy(data);
4563 data = ERR_PTR(ret);
4564 goto out;
4565 }
4566
create_hitcount_val(struct hist_trigger_data * hist_data)4567 static int create_hitcount_val(struct hist_trigger_data *hist_data)
4568 {
4569 hist_data->fields[HITCOUNT_IDX] =
4570 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
4571 if (!hist_data->fields[HITCOUNT_IDX])
4572 return -ENOMEM;
4573
4574 hist_data->n_vals++;
4575 hist_data->n_fields++;
4576
4577 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
4578 return -EINVAL;
4579
4580 return 0;
4581 }
4582
__create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * field_str,unsigned long flags)4583 static int __create_val_field(struct hist_trigger_data *hist_data,
4584 unsigned int val_idx,
4585 struct trace_event_file *file,
4586 char *var_name, char *field_str,
4587 unsigned long flags)
4588 {
4589 struct hist_field *hist_field;
4590 int ret = 0;
4591
4592 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
4593 if (IS_ERR(hist_field)) {
4594 ret = PTR_ERR(hist_field);
4595 goto out;
4596 }
4597
4598 hist_data->fields[val_idx] = hist_field;
4599
4600 ++hist_data->n_vals;
4601 ++hist_data->n_fields;
4602
4603 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4604 ret = -EINVAL;
4605 out:
4606 return ret;
4607 }
4608
create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * field_str)4609 static int create_val_field(struct hist_trigger_data *hist_data,
4610 unsigned int val_idx,
4611 struct trace_event_file *file,
4612 char *field_str)
4613 {
4614 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
4615 return -EINVAL;
4616
4617 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
4618 }
4619
create_var_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * expr_str)4620 static int create_var_field(struct hist_trigger_data *hist_data,
4621 unsigned int val_idx,
4622 struct trace_event_file *file,
4623 char *var_name, char *expr_str)
4624 {
4625 struct trace_array *tr = hist_data->event_file->tr;
4626 unsigned long flags = 0;
4627 int ret;
4628
4629 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4630 return -EINVAL;
4631
4632 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4633 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4634 return -EINVAL;
4635 }
4636
4637 flags |= HIST_FIELD_FL_VAR;
4638 hist_data->n_vars++;
4639 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
4640 return -EINVAL;
4641
4642 ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
4643
4644 if (hist_data->fields[val_idx]->flags & HIST_FIELD_FL_STRING)
4645 hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++;
4646
4647 return ret;
4648 }
4649
create_val_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4650 static int create_val_fields(struct hist_trigger_data *hist_data,
4651 struct trace_event_file *file)
4652 {
4653 char *fields_str, *field_str;
4654 unsigned int i, j = 1;
4655 int ret;
4656
4657 ret = create_hitcount_val(hist_data);
4658 if (ret)
4659 goto out;
4660
4661 fields_str = hist_data->attrs->vals_str;
4662 if (!fields_str)
4663 goto out;
4664
4665 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
4666 j < TRACING_MAP_VALS_MAX; i++) {
4667 field_str = strsep(&fields_str, ",");
4668 if (!field_str)
4669 break;
4670
4671 if (strcmp(field_str, "hitcount") == 0)
4672 continue;
4673
4674 ret = create_val_field(hist_data, j++, file, field_str);
4675 if (ret)
4676 goto out;
4677 }
4678
4679 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
4680 ret = -EINVAL;
4681 out:
4682 return ret;
4683 }
4684
create_key_field(struct hist_trigger_data * hist_data,unsigned int key_idx,unsigned int key_offset,struct trace_event_file * file,char * field_str)4685 static int create_key_field(struct hist_trigger_data *hist_data,
4686 unsigned int key_idx,
4687 unsigned int key_offset,
4688 struct trace_event_file *file,
4689 char *field_str)
4690 {
4691 struct trace_array *tr = hist_data->event_file->tr;
4692 struct hist_field *hist_field = NULL;
4693 unsigned long flags = 0;
4694 unsigned int key_size;
4695 int ret = 0;
4696
4697 if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
4698 return -EINVAL;
4699
4700 flags |= HIST_FIELD_FL_KEY;
4701
4702 if (strcmp(field_str, "stacktrace") == 0) {
4703 flags |= HIST_FIELD_FL_STACKTRACE;
4704 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
4705 hist_field = create_hist_field(hist_data, NULL, flags, NULL);
4706 } else {
4707 hist_field = parse_expr(hist_data, file, field_str, flags,
4708 NULL, 0);
4709 if (IS_ERR(hist_field)) {
4710 ret = PTR_ERR(hist_field);
4711 goto out;
4712 }
4713
4714 if (field_has_hist_vars(hist_field, 0)) {
4715 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4716 destroy_hist_field(hist_field, 0);
4717 ret = -EINVAL;
4718 goto out;
4719 }
4720
4721 key_size = hist_field->size;
4722 }
4723
4724 hist_data->fields[key_idx] = hist_field;
4725
4726 key_size = ALIGN(key_size, sizeof(u64));
4727 hist_data->fields[key_idx]->size = key_size;
4728 hist_data->fields[key_idx]->offset = key_offset;
4729
4730 hist_data->key_size += key_size;
4731
4732 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
4733 ret = -EINVAL;
4734 goto out;
4735 }
4736
4737 hist_data->n_keys++;
4738 hist_data->n_fields++;
4739
4740 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
4741 return -EINVAL;
4742
4743 ret = key_size;
4744 out:
4745 return ret;
4746 }
4747
create_key_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4748 static int create_key_fields(struct hist_trigger_data *hist_data,
4749 struct trace_event_file *file)
4750 {
4751 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
4752 char *fields_str, *field_str;
4753 int ret = -EINVAL;
4754
4755 fields_str = hist_data->attrs->keys_str;
4756 if (!fields_str)
4757 goto out;
4758
4759 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
4760 field_str = strsep(&fields_str, ",");
4761 if (!field_str)
4762 break;
4763 ret = create_key_field(hist_data, i, key_offset,
4764 file, field_str);
4765 if (ret < 0)
4766 goto out;
4767 key_offset += ret;
4768 }
4769 if (fields_str) {
4770 ret = -EINVAL;
4771 goto out;
4772 }
4773 ret = 0;
4774 out:
4775 return ret;
4776 }
4777
create_var_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4778 static int create_var_fields(struct hist_trigger_data *hist_data,
4779 struct trace_event_file *file)
4780 {
4781 unsigned int i, j = hist_data->n_vals;
4782 int ret = 0;
4783
4784 unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4785
4786 for (i = 0; i < n_vars; i++) {
4787 char *var_name = hist_data->attrs->var_defs.name[i];
4788 char *expr = hist_data->attrs->var_defs.expr[i];
4789
4790 ret = create_var_field(hist_data, j++, file, var_name, expr);
4791 if (ret)
4792 goto out;
4793 }
4794 out:
4795 return ret;
4796 }
4797
free_var_defs(struct hist_trigger_data * hist_data)4798 static void free_var_defs(struct hist_trigger_data *hist_data)
4799 {
4800 unsigned int i;
4801
4802 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4803 kfree(hist_data->attrs->var_defs.name[i]);
4804 kfree(hist_data->attrs->var_defs.expr[i]);
4805 }
4806
4807 hist_data->attrs->var_defs.n_vars = 0;
4808 }
4809
parse_var_defs(struct hist_trigger_data * hist_data)4810 static int parse_var_defs(struct hist_trigger_data *hist_data)
4811 {
4812 struct trace_array *tr = hist_data->event_file->tr;
4813 char *s, *str, *var_name, *field_str;
4814 unsigned int i, j, n_vars = 0;
4815 int ret = 0;
4816
4817 for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4818 str = hist_data->attrs->assignment_str[i];
4819 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4820 field_str = strsep(&str, ",");
4821 if (!field_str)
4822 break;
4823
4824 var_name = strsep(&field_str, "=");
4825 if (!var_name || !field_str) {
4826 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4827 errpos(var_name));
4828 ret = -EINVAL;
4829 goto free;
4830 }
4831
4832 if (n_vars == TRACING_MAP_VARS_MAX) {
4833 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4834 ret = -EINVAL;
4835 goto free;
4836 }
4837
4838 s = kstrdup(var_name, GFP_KERNEL);
4839 if (!s) {
4840 ret = -ENOMEM;
4841 goto free;
4842 }
4843 hist_data->attrs->var_defs.name[n_vars] = s;
4844
4845 s = kstrdup(field_str, GFP_KERNEL);
4846 if (!s) {
4847 kfree(hist_data->attrs->var_defs.name[n_vars]);
4848 hist_data->attrs->var_defs.name[n_vars] = NULL;
4849 ret = -ENOMEM;
4850 goto free;
4851 }
4852 hist_data->attrs->var_defs.expr[n_vars++] = s;
4853
4854 hist_data->attrs->var_defs.n_vars = n_vars;
4855 }
4856 }
4857
4858 return ret;
4859 free:
4860 free_var_defs(hist_data);
4861
4862 return ret;
4863 }
4864
create_hist_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)4865 static int create_hist_fields(struct hist_trigger_data *hist_data,
4866 struct trace_event_file *file)
4867 {
4868 int ret;
4869
4870 ret = parse_var_defs(hist_data);
4871 if (ret)
4872 goto out;
4873
4874 ret = create_val_fields(hist_data, file);
4875 if (ret)
4876 goto out;
4877
4878 ret = create_var_fields(hist_data, file);
4879 if (ret)
4880 goto out;
4881
4882 ret = create_key_fields(hist_data, file);
4883 if (ret)
4884 goto out;
4885 out:
4886 free_var_defs(hist_data);
4887
4888 return ret;
4889 }
4890
is_descending(const char * str)4891 static int is_descending(const char *str)
4892 {
4893 if (!str)
4894 return 0;
4895
4896 if (strcmp(str, "descending") == 0)
4897 return 1;
4898
4899 if (strcmp(str, "ascending") == 0)
4900 return 0;
4901
4902 return -EINVAL;
4903 }
4904
create_sort_keys(struct hist_trigger_data * hist_data)4905 static int create_sort_keys(struct hist_trigger_data *hist_data)
4906 {
4907 char *fields_str = hist_data->attrs->sort_key_str;
4908 struct tracing_map_sort_key *sort_key;
4909 int descending, ret = 0;
4910 unsigned int i, j, k;
4911
4912 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4913
4914 if (!fields_str)
4915 goto out;
4916
4917 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4918 struct hist_field *hist_field;
4919 char *field_str, *field_name;
4920 const char *test_name;
4921
4922 sort_key = &hist_data->sort_keys[i];
4923
4924 field_str = strsep(&fields_str, ",");
4925 if (!field_str)
4926 break;
4927
4928 if (!*field_str) {
4929 ret = -EINVAL;
4930 break;
4931 }
4932
4933 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4934 ret = -EINVAL;
4935 break;
4936 }
4937
4938 field_name = strsep(&field_str, ".");
4939 if (!field_name || !*field_name) {
4940 ret = -EINVAL;
4941 break;
4942 }
4943
4944 if (strcmp(field_name, "hitcount") == 0) {
4945 descending = is_descending(field_str);
4946 if (descending < 0) {
4947 ret = descending;
4948 break;
4949 }
4950 sort_key->descending = descending;
4951 continue;
4952 }
4953
4954 for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4955 unsigned int idx;
4956
4957 hist_field = hist_data->fields[j];
4958 if (hist_field->flags & HIST_FIELD_FL_VAR)
4959 continue;
4960
4961 idx = k++;
4962
4963 test_name = hist_field_name(hist_field, 0);
4964
4965 if (strcmp(field_name, test_name) == 0) {
4966 sort_key->field_idx = idx;
4967 descending = is_descending(field_str);
4968 if (descending < 0) {
4969 ret = descending;
4970 goto out;
4971 }
4972 sort_key->descending = descending;
4973 break;
4974 }
4975 }
4976 if (j == hist_data->n_fields) {
4977 ret = -EINVAL;
4978 break;
4979 }
4980 }
4981
4982 hist_data->n_sort_keys = i;
4983 out:
4984 return ret;
4985 }
4986
destroy_actions(struct hist_trigger_data * hist_data)4987 static void destroy_actions(struct hist_trigger_data *hist_data)
4988 {
4989 unsigned int i;
4990
4991 for (i = 0; i < hist_data->n_actions; i++) {
4992 struct action_data *data = hist_data->actions[i];
4993
4994 if (data->handler == HANDLER_ONMATCH)
4995 onmatch_destroy(data);
4996 else if (data->handler == HANDLER_ONMAX ||
4997 data->handler == HANDLER_ONCHANGE)
4998 track_data_destroy(hist_data, data);
4999 else
5000 kfree(data);
5001 }
5002 }
5003
parse_actions(struct hist_trigger_data * hist_data)5004 static int parse_actions(struct hist_trigger_data *hist_data)
5005 {
5006 struct trace_array *tr = hist_data->event_file->tr;
5007 struct action_data *data;
5008 unsigned int i;
5009 int ret = 0;
5010 char *str;
5011 int len;
5012
5013 for (i = 0; i < hist_data->attrs->n_actions; i++) {
5014 str = hist_data->attrs->action_str[i];
5015
5016 if ((len = str_has_prefix(str, "onmatch("))) {
5017 char *action_str = str + len;
5018
5019 data = onmatch_parse(tr, action_str);
5020 if (IS_ERR(data)) {
5021 ret = PTR_ERR(data);
5022 break;
5023 }
5024 } else if ((len = str_has_prefix(str, "onmax("))) {
5025 char *action_str = str + len;
5026
5027 data = track_data_parse(hist_data, action_str,
5028 HANDLER_ONMAX);
5029 if (IS_ERR(data)) {
5030 ret = PTR_ERR(data);
5031 break;
5032 }
5033 } else if ((len = str_has_prefix(str, "onchange("))) {
5034 char *action_str = str + len;
5035
5036 data = track_data_parse(hist_data, action_str,
5037 HANDLER_ONCHANGE);
5038 if (IS_ERR(data)) {
5039 ret = PTR_ERR(data);
5040 break;
5041 }
5042 } else {
5043 ret = -EINVAL;
5044 break;
5045 }
5046
5047 hist_data->actions[hist_data->n_actions++] = data;
5048 }
5049
5050 return ret;
5051 }
5052
create_actions(struct hist_trigger_data * hist_data)5053 static int create_actions(struct hist_trigger_data *hist_data)
5054 {
5055 struct action_data *data;
5056 unsigned int i;
5057 int ret = 0;
5058
5059 for (i = 0; i < hist_data->attrs->n_actions; i++) {
5060 data = hist_data->actions[i];
5061
5062 if (data->handler == HANDLER_ONMATCH) {
5063 ret = onmatch_create(hist_data, data);
5064 if (ret)
5065 break;
5066 } else if (data->handler == HANDLER_ONMAX ||
5067 data->handler == HANDLER_ONCHANGE) {
5068 ret = track_data_create(hist_data, data);
5069 if (ret)
5070 break;
5071 } else {
5072 ret = -EINVAL;
5073 break;
5074 }
5075 }
5076
5077 return ret;
5078 }
5079
print_actions(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt)5080 static void print_actions(struct seq_file *m,
5081 struct hist_trigger_data *hist_data,
5082 struct tracing_map_elt *elt)
5083 {
5084 unsigned int i;
5085
5086 for (i = 0; i < hist_data->n_actions; i++) {
5087 struct action_data *data = hist_data->actions[i];
5088
5089 if (data->action == ACTION_SNAPSHOT)
5090 continue;
5091
5092 if (data->handler == HANDLER_ONMAX ||
5093 data->handler == HANDLER_ONCHANGE)
5094 track_data_print(m, hist_data, elt, data);
5095 }
5096 }
5097
print_action_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)5098 static void print_action_spec(struct seq_file *m,
5099 struct hist_trigger_data *hist_data,
5100 struct action_data *data)
5101 {
5102 unsigned int i;
5103
5104 if (data->action == ACTION_SAVE) {
5105 for (i = 0; i < hist_data->n_save_vars; i++) {
5106 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
5107 if (i < hist_data->n_save_vars - 1)
5108 seq_puts(m, ",");
5109 }
5110 } else if (data->action == ACTION_TRACE) {
5111 if (data->use_trace_keyword)
5112 seq_printf(m, "%s", data->synth_event_name);
5113 for (i = 0; i < data->n_params; i++) {
5114 if (i || data->use_trace_keyword)
5115 seq_puts(m, ",");
5116 seq_printf(m, "%s", data->params[i]);
5117 }
5118 }
5119 }
5120
print_track_data_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)5121 static void print_track_data_spec(struct seq_file *m,
5122 struct hist_trigger_data *hist_data,
5123 struct action_data *data)
5124 {
5125 if (data->handler == HANDLER_ONMAX)
5126 seq_puts(m, ":onmax(");
5127 else if (data->handler == HANDLER_ONCHANGE)
5128 seq_puts(m, ":onchange(");
5129 seq_printf(m, "%s", data->track_data.var_str);
5130 seq_printf(m, ").%s(", data->action_name);
5131
5132 print_action_spec(m, hist_data, data);
5133
5134 seq_puts(m, ")");
5135 }
5136
print_onmatch_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)5137 static void print_onmatch_spec(struct seq_file *m,
5138 struct hist_trigger_data *hist_data,
5139 struct action_data *data)
5140 {
5141 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
5142 data->match_data.event);
5143
5144 seq_printf(m, "%s(", data->action_name);
5145
5146 print_action_spec(m, hist_data, data);
5147
5148 seq_puts(m, ")");
5149 }
5150
actions_match(struct hist_trigger_data * hist_data,struct hist_trigger_data * hist_data_test)5151 static bool actions_match(struct hist_trigger_data *hist_data,
5152 struct hist_trigger_data *hist_data_test)
5153 {
5154 unsigned int i, j;
5155
5156 if (hist_data->n_actions != hist_data_test->n_actions)
5157 return false;
5158
5159 for (i = 0; i < hist_data->n_actions; i++) {
5160 struct action_data *data = hist_data->actions[i];
5161 struct action_data *data_test = hist_data_test->actions[i];
5162 char *action_name, *action_name_test;
5163
5164 if (data->handler != data_test->handler)
5165 return false;
5166 if (data->action != data_test->action)
5167 return false;
5168
5169 if (data->n_params != data_test->n_params)
5170 return false;
5171
5172 for (j = 0; j < data->n_params; j++) {
5173 if (strcmp(data->params[j], data_test->params[j]) != 0)
5174 return false;
5175 }
5176
5177 if (data->use_trace_keyword)
5178 action_name = data->synth_event_name;
5179 else
5180 action_name = data->action_name;
5181
5182 if (data_test->use_trace_keyword)
5183 action_name_test = data_test->synth_event_name;
5184 else
5185 action_name_test = data_test->action_name;
5186
5187 if (strcmp(action_name, action_name_test) != 0)
5188 return false;
5189
5190 if (data->handler == HANDLER_ONMATCH) {
5191 if (strcmp(data->match_data.event_system,
5192 data_test->match_data.event_system) != 0)
5193 return false;
5194 if (strcmp(data->match_data.event,
5195 data_test->match_data.event) != 0)
5196 return false;
5197 } else if (data->handler == HANDLER_ONMAX ||
5198 data->handler == HANDLER_ONCHANGE) {
5199 if (strcmp(data->track_data.var_str,
5200 data_test->track_data.var_str) != 0)
5201 return false;
5202 }
5203 }
5204
5205 return true;
5206 }
5207
5208
print_actions_spec(struct seq_file * m,struct hist_trigger_data * hist_data)5209 static void print_actions_spec(struct seq_file *m,
5210 struct hist_trigger_data *hist_data)
5211 {
5212 unsigned int i;
5213
5214 for (i = 0; i < hist_data->n_actions; i++) {
5215 struct action_data *data = hist_data->actions[i];
5216
5217 if (data->handler == HANDLER_ONMATCH)
5218 print_onmatch_spec(m, hist_data, data);
5219 else if (data->handler == HANDLER_ONMAX ||
5220 data->handler == HANDLER_ONCHANGE)
5221 print_track_data_spec(m, hist_data, data);
5222 }
5223 }
5224
destroy_field_var_hists(struct hist_trigger_data * hist_data)5225 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
5226 {
5227 unsigned int i;
5228
5229 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5230 kfree(hist_data->field_var_hists[i]->cmd);
5231 kfree(hist_data->field_var_hists[i]);
5232 }
5233 }
5234
destroy_hist_data(struct hist_trigger_data * hist_data)5235 static void destroy_hist_data(struct hist_trigger_data *hist_data)
5236 {
5237 if (!hist_data)
5238 return;
5239
5240 destroy_hist_trigger_attrs(hist_data->attrs);
5241 destroy_hist_fields(hist_data);
5242 tracing_map_destroy(hist_data->map);
5243
5244 destroy_actions(hist_data);
5245 destroy_field_vars(hist_data);
5246 destroy_field_var_hists(hist_data);
5247
5248 kfree(hist_data);
5249 }
5250
create_tracing_map_fields(struct hist_trigger_data * hist_data)5251 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
5252 {
5253 struct tracing_map *map = hist_data->map;
5254 struct ftrace_event_field *field;
5255 struct hist_field *hist_field;
5256 int i, idx = 0;
5257
5258 for_each_hist_field(i, hist_data) {
5259 hist_field = hist_data->fields[i];
5260 if (hist_field->flags & HIST_FIELD_FL_KEY) {
5261 tracing_map_cmp_fn_t cmp_fn;
5262
5263 field = hist_field->field;
5264
5265 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
5266 cmp_fn = tracing_map_cmp_none;
5267 else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
5268 cmp_fn = tracing_map_cmp_num(hist_field->size,
5269 hist_field->is_signed);
5270 else if (is_string_field(field))
5271 cmp_fn = tracing_map_cmp_string;
5272 else
5273 cmp_fn = tracing_map_cmp_num(field->size,
5274 field->is_signed);
5275 idx = tracing_map_add_key_field(map,
5276 hist_field->offset,
5277 cmp_fn);
5278 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
5279 idx = tracing_map_add_sum_field(map);
5280
5281 if (idx < 0)
5282 return idx;
5283
5284 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5285 idx = tracing_map_add_var(map);
5286 if (idx < 0)
5287 return idx;
5288 hist_field->var.idx = idx;
5289 hist_field->var.hist_data = hist_data;
5290 }
5291 }
5292
5293 return 0;
5294 }
5295
5296 static struct hist_trigger_data *
create_hist_data(unsigned int map_bits,struct hist_trigger_attrs * attrs,struct trace_event_file * file,bool remove)5297 create_hist_data(unsigned int map_bits,
5298 struct hist_trigger_attrs *attrs,
5299 struct trace_event_file *file,
5300 bool remove)
5301 {
5302 const struct tracing_map_ops *map_ops = NULL;
5303 struct hist_trigger_data *hist_data;
5304 int ret = 0;
5305
5306 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
5307 if (!hist_data)
5308 return ERR_PTR(-ENOMEM);
5309
5310 hist_data->attrs = attrs;
5311 hist_data->remove = remove;
5312 hist_data->event_file = file;
5313
5314 ret = parse_actions(hist_data);
5315 if (ret)
5316 goto free;
5317
5318 ret = create_hist_fields(hist_data, file);
5319 if (ret)
5320 goto free;
5321
5322 ret = create_sort_keys(hist_data);
5323 if (ret)
5324 goto free;
5325
5326 map_ops = &hist_trigger_elt_data_ops;
5327
5328 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
5329 map_ops, hist_data);
5330 if (IS_ERR(hist_data->map)) {
5331 ret = PTR_ERR(hist_data->map);
5332 hist_data->map = NULL;
5333 goto free;
5334 }
5335
5336 ret = create_tracing_map_fields(hist_data);
5337 if (ret)
5338 goto free;
5339 out:
5340 return hist_data;
5341 free:
5342 hist_data->attrs = NULL;
5343
5344 destroy_hist_data(hist_data);
5345
5346 hist_data = ERR_PTR(ret);
5347
5348 goto out;
5349 }
5350
hist_trigger_elt_update(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,u64 * var_ref_vals)5351 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
5352 struct tracing_map_elt *elt, void *rec,
5353 struct ring_buffer_event *rbe,
5354 u64 *var_ref_vals)
5355 {
5356 struct hist_elt_data *elt_data;
5357 struct hist_field *hist_field;
5358 unsigned int i, var_idx;
5359 u64 hist_val;
5360
5361 elt_data = elt->private_data;
5362 elt_data->var_ref_vals = var_ref_vals;
5363
5364 for_each_hist_val_field(i, hist_data) {
5365 hist_field = hist_data->fields[i];
5366 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5367 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5368 var_idx = hist_field->var.idx;
5369
5370 if (hist_field->flags & HIST_FIELD_FL_STRING) {
5371 unsigned int str_start, var_str_idx, idx;
5372 char *str, *val_str;
5373
5374 str_start = hist_data->n_field_var_str +
5375 hist_data->n_save_var_str;
5376 var_str_idx = hist_field->var_str_idx;
5377 idx = str_start + var_str_idx;
5378
5379 str = elt_data->field_var_str[idx];
5380 val_str = (char *)(uintptr_t)hist_val;
5381 strscpy(str, val_str, hist_field->size);
5382
5383 hist_val = (u64)(uintptr_t)str;
5384 }
5385 tracing_map_set_var(elt, var_idx, hist_val);
5386 continue;
5387 }
5388 tracing_map_update_sum(elt, i, hist_val);
5389 }
5390
5391 for_each_hist_key_field(i, hist_data) {
5392 hist_field = hist_data->fields[i];
5393 if (hist_field->flags & HIST_FIELD_FL_VAR) {
5394 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
5395 var_idx = hist_field->var.idx;
5396 tracing_map_set_var(elt, var_idx, hist_val);
5397 }
5398 }
5399
5400 update_field_vars(hist_data, elt, rbe, rec);
5401 }
5402
add_to_key(char * compound_key,void * key,struct hist_field * key_field,void * rec)5403 static inline void add_to_key(char *compound_key, void *key,
5404 struct hist_field *key_field, void *rec)
5405 {
5406 size_t size = key_field->size;
5407
5408 if (key_field->flags & HIST_FIELD_FL_STRING) {
5409 struct ftrace_event_field *field;
5410
5411 field = key_field->field;
5412 if (field->filter_type == FILTER_DYN_STRING)
5413 size = *(u32 *)(rec + field->offset) >> 16;
5414 else if (field->filter_type == FILTER_STATIC_STRING)
5415 size = field->size;
5416
5417 /* ensure NULL-termination */
5418 if (size > key_field->size - 1)
5419 size = key_field->size - 1;
5420
5421 strncpy(compound_key + key_field->offset, (char *)key, size);
5422 } else
5423 memcpy(compound_key + key_field->offset, key, size);
5424 }
5425
5426 static void
hist_trigger_actions(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,u64 * var_ref_vals)5427 hist_trigger_actions(struct hist_trigger_data *hist_data,
5428 struct tracing_map_elt *elt, void *rec,
5429 struct ring_buffer_event *rbe, void *key,
5430 u64 *var_ref_vals)
5431 {
5432 struct action_data *data;
5433 unsigned int i;
5434
5435 for (i = 0; i < hist_data->n_actions; i++) {
5436 data = hist_data->actions[i];
5437 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
5438 }
5439 }
5440
event_hist_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * rbe)5441 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
5442 struct ring_buffer_event *rbe)
5443 {
5444 struct hist_trigger_data *hist_data = data->private_data;
5445 bool use_compound_key = (hist_data->n_keys > 1);
5446 unsigned long entries[HIST_STACKTRACE_DEPTH];
5447 u64 var_ref_vals[TRACING_MAP_VARS_MAX];
5448 char compound_key[HIST_KEY_SIZE_MAX];
5449 struct tracing_map_elt *elt = NULL;
5450 struct hist_field *key_field;
5451 u64 field_contents;
5452 void *key = NULL;
5453 unsigned int i;
5454
5455 memset(compound_key, 0, hist_data->key_size);
5456
5457 for_each_hist_key_field(i, hist_data) {
5458 key_field = hist_data->fields[i];
5459
5460 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5461 memset(entries, 0, HIST_STACKTRACE_SIZE);
5462 stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
5463 HIST_STACKTRACE_SKIP);
5464 key = entries;
5465 } else {
5466 field_contents = key_field->fn(key_field, elt, rbe, rec);
5467 if (key_field->flags & HIST_FIELD_FL_STRING) {
5468 key = (void *)(unsigned long)field_contents;
5469 use_compound_key = true;
5470 } else
5471 key = (void *)&field_contents;
5472 }
5473
5474 if (use_compound_key)
5475 add_to_key(compound_key, key, key_field, rec);
5476 }
5477
5478 if (use_compound_key)
5479 key = compound_key;
5480
5481 if (hist_data->n_var_refs &&
5482 !resolve_var_refs(hist_data, key, var_ref_vals, false))
5483 return;
5484
5485 elt = tracing_map_insert(hist_data->map, key);
5486 if (!elt)
5487 return;
5488
5489 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
5490
5491 if (resolve_var_refs(hist_data, key, var_ref_vals, true))
5492 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
5493 }
5494
hist_trigger_stacktrace_print(struct seq_file * m,unsigned long * stacktrace_entries,unsigned int max_entries)5495 static void hist_trigger_stacktrace_print(struct seq_file *m,
5496 unsigned long *stacktrace_entries,
5497 unsigned int max_entries)
5498 {
5499 char str[KSYM_SYMBOL_LEN];
5500 unsigned int spaces = 8;
5501 unsigned int i;
5502
5503 for (i = 0; i < max_entries; i++) {
5504 if (!stacktrace_entries[i])
5505 return;
5506
5507 seq_printf(m, "%*c", 1 + spaces, ' ');
5508 sprint_symbol(str, stacktrace_entries[i]);
5509 seq_printf(m, "%s\n", str);
5510 }
5511 }
5512
hist_trigger_print_key(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)5513 static void hist_trigger_print_key(struct seq_file *m,
5514 struct hist_trigger_data *hist_data,
5515 void *key,
5516 struct tracing_map_elt *elt)
5517 {
5518 struct hist_field *key_field;
5519 char str[KSYM_SYMBOL_LEN];
5520 bool multiline = false;
5521 const char *field_name;
5522 unsigned int i;
5523 u64 uval;
5524
5525 seq_puts(m, "{ ");
5526
5527 for_each_hist_key_field(i, hist_data) {
5528 key_field = hist_data->fields[i];
5529
5530 if (i > hist_data->n_vals)
5531 seq_puts(m, ", ");
5532
5533 field_name = hist_field_name(key_field, 0);
5534
5535 if (key_field->flags & HIST_FIELD_FL_HEX) {
5536 uval = *(u64 *)(key + key_field->offset);
5537 seq_printf(m, "%s: %llx", field_name, uval);
5538 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
5539 uval = *(u64 *)(key + key_field->offset);
5540 sprint_symbol_no_offset(str, uval);
5541 seq_printf(m, "%s: [%llx] %-45s", field_name,
5542 uval, str);
5543 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
5544 uval = *(u64 *)(key + key_field->offset);
5545 sprint_symbol(str, uval);
5546 seq_printf(m, "%s: [%llx] %-55s", field_name,
5547 uval, str);
5548 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
5549 struct hist_elt_data *elt_data = elt->private_data;
5550 char *comm;
5551
5552 if (WARN_ON_ONCE(!elt_data))
5553 return;
5554
5555 comm = elt_data->comm;
5556
5557 uval = *(u64 *)(key + key_field->offset);
5558 seq_printf(m, "%s: %-16s[%10llu]", field_name,
5559 comm, uval);
5560 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
5561 const char *syscall_name;
5562
5563 uval = *(u64 *)(key + key_field->offset);
5564 syscall_name = get_syscall_name(uval);
5565 if (!syscall_name)
5566 syscall_name = "unknown_syscall";
5567
5568 seq_printf(m, "%s: %-30s[%3llu]", field_name,
5569 syscall_name, uval);
5570 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
5571 seq_puts(m, "stacktrace:\n");
5572 hist_trigger_stacktrace_print(m,
5573 key + key_field->offset,
5574 HIST_STACKTRACE_DEPTH);
5575 multiline = true;
5576 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
5577 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
5578 *(u64 *)(key + key_field->offset));
5579 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
5580 seq_printf(m, "%s: %-50s", field_name,
5581 (char *)(key + key_field->offset));
5582 } else {
5583 uval = *(u64 *)(key + key_field->offset);
5584 seq_printf(m, "%s: %10llu", field_name, uval);
5585 }
5586 }
5587
5588 if (!multiline)
5589 seq_puts(m, " ");
5590
5591 seq_puts(m, "}");
5592 }
5593
hist_trigger_entry_print(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)5594 static void hist_trigger_entry_print(struct seq_file *m,
5595 struct hist_trigger_data *hist_data,
5596 void *key,
5597 struct tracing_map_elt *elt)
5598 {
5599 const char *field_name;
5600 unsigned int i;
5601
5602 hist_trigger_print_key(m, hist_data, key, elt);
5603
5604 seq_printf(m, " hitcount: %10llu",
5605 tracing_map_read_sum(elt, HITCOUNT_IDX));
5606
5607 for (i = 1; i < hist_data->n_vals; i++) {
5608 field_name = hist_field_name(hist_data->fields[i], 0);
5609
5610 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
5611 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
5612 continue;
5613
5614 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
5615 seq_printf(m, " %s: %10llx", field_name,
5616 tracing_map_read_sum(elt, i));
5617 } else {
5618 seq_printf(m, " %s: %10llu", field_name,
5619 tracing_map_read_sum(elt, i));
5620 }
5621 }
5622
5623 print_actions(m, hist_data, elt);
5624
5625 seq_puts(m, "\n");
5626 }
5627
print_entries(struct seq_file * m,struct hist_trigger_data * hist_data)5628 static int print_entries(struct seq_file *m,
5629 struct hist_trigger_data *hist_data)
5630 {
5631 struct tracing_map_sort_entry **sort_entries = NULL;
5632 struct tracing_map *map = hist_data->map;
5633 int i, n_entries;
5634
5635 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
5636 hist_data->n_sort_keys,
5637 &sort_entries);
5638 if (n_entries < 0)
5639 return n_entries;
5640
5641 for (i = 0; i < n_entries; i++)
5642 hist_trigger_entry_print(m, hist_data,
5643 sort_entries[i]->key,
5644 sort_entries[i]->elt);
5645
5646 tracing_map_destroy_sort_entries(sort_entries, n_entries);
5647
5648 return n_entries;
5649 }
5650
hist_trigger_show(struct seq_file * m,struct event_trigger_data * data,int n)5651 static void hist_trigger_show(struct seq_file *m,
5652 struct event_trigger_data *data, int n)
5653 {
5654 struct hist_trigger_data *hist_data;
5655 int n_entries;
5656
5657 if (n > 0)
5658 seq_puts(m, "\n\n");
5659
5660 seq_puts(m, "# event histogram\n#\n# trigger info: ");
5661 data->ops->print(m, data->ops, data);
5662 seq_puts(m, "#\n\n");
5663
5664 hist_data = data->private_data;
5665 n_entries = print_entries(m, hist_data);
5666 if (n_entries < 0)
5667 n_entries = 0;
5668
5669 track_data_snapshot_print(m, hist_data);
5670
5671 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
5672 (u64)atomic64_read(&hist_data->map->hits),
5673 n_entries, (u64)atomic64_read(&hist_data->map->drops));
5674 }
5675
hist_show(struct seq_file * m,void * v)5676 static int hist_show(struct seq_file *m, void *v)
5677 {
5678 struct event_trigger_data *data;
5679 struct trace_event_file *event_file;
5680 int n = 0, ret = 0;
5681
5682 mutex_lock(&event_mutex);
5683
5684 event_file = event_file_data(m->private);
5685 if (unlikely(!event_file)) {
5686 ret = -ENODEV;
5687 goto out_unlock;
5688 }
5689
5690 list_for_each_entry(data, &event_file->triggers, list) {
5691 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5692 hist_trigger_show(m, data, n++);
5693 }
5694
5695 out_unlock:
5696 mutex_unlock(&event_mutex);
5697
5698 return ret;
5699 }
5700
event_hist_open(struct inode * inode,struct file * file)5701 static int event_hist_open(struct inode *inode, struct file *file)
5702 {
5703 int ret;
5704
5705 ret = security_locked_down(LOCKDOWN_TRACEFS);
5706 if (ret)
5707 return ret;
5708
5709 return single_open(file, hist_show, file);
5710 }
5711
5712 const struct file_operations event_hist_fops = {
5713 .open = event_hist_open,
5714 .read = seq_read,
5715 .llseek = seq_lseek,
5716 .release = single_release,
5717 };
5718
hist_field_print(struct seq_file * m,struct hist_field * hist_field)5719 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5720 {
5721 const char *field_name = hist_field_name(hist_field, 0);
5722
5723 if (hist_field->var.name)
5724 seq_printf(m, "%s=", hist_field->var.name);
5725
5726 if (hist_field->flags & HIST_FIELD_FL_CPU)
5727 seq_puts(m, "common_cpu");
5728 else if (field_name) {
5729 if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5730 hist_field->flags & HIST_FIELD_FL_ALIAS)
5731 seq_putc(m, '$');
5732 seq_printf(m, "%s", field_name);
5733 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5734 seq_puts(m, "common_timestamp");
5735
5736 if (hist_field->flags) {
5737 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5738 !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5739 const char *flags = get_hist_field_flags(hist_field);
5740
5741 if (flags)
5742 seq_printf(m, ".%s", flags);
5743 }
5744 }
5745 }
5746
event_hist_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)5747 static int event_hist_trigger_print(struct seq_file *m,
5748 struct event_trigger_ops *ops,
5749 struct event_trigger_data *data)
5750 {
5751 struct hist_trigger_data *hist_data = data->private_data;
5752 struct hist_field *field;
5753 bool have_var = false;
5754 unsigned int i;
5755
5756 seq_puts(m, "hist:");
5757
5758 if (data->name)
5759 seq_printf(m, "%s:", data->name);
5760
5761 seq_puts(m, "keys=");
5762
5763 for_each_hist_key_field(i, hist_data) {
5764 field = hist_data->fields[i];
5765
5766 if (i > hist_data->n_vals)
5767 seq_puts(m, ",");
5768
5769 if (field->flags & HIST_FIELD_FL_STACKTRACE)
5770 seq_puts(m, "stacktrace");
5771 else
5772 hist_field_print(m, field);
5773 }
5774
5775 seq_puts(m, ":vals=");
5776
5777 for_each_hist_val_field(i, hist_data) {
5778 field = hist_data->fields[i];
5779 if (field->flags & HIST_FIELD_FL_VAR) {
5780 have_var = true;
5781 continue;
5782 }
5783
5784 if (i == HITCOUNT_IDX)
5785 seq_puts(m, "hitcount");
5786 else {
5787 seq_puts(m, ",");
5788 hist_field_print(m, field);
5789 }
5790 }
5791
5792 if (have_var) {
5793 unsigned int n = 0;
5794
5795 seq_puts(m, ":");
5796
5797 for_each_hist_val_field(i, hist_data) {
5798 field = hist_data->fields[i];
5799
5800 if (field->flags & HIST_FIELD_FL_VAR) {
5801 if (n++)
5802 seq_puts(m, ",");
5803 hist_field_print(m, field);
5804 }
5805 }
5806 }
5807
5808 seq_puts(m, ":sort=");
5809
5810 for (i = 0; i < hist_data->n_sort_keys; i++) {
5811 struct tracing_map_sort_key *sort_key;
5812 unsigned int idx, first_key_idx;
5813
5814 /* skip VAR vals */
5815 first_key_idx = hist_data->n_vals - hist_data->n_vars;
5816
5817 sort_key = &hist_data->sort_keys[i];
5818 idx = sort_key->field_idx;
5819
5820 if (WARN_ON(idx >= HIST_FIELDS_MAX))
5821 return -EINVAL;
5822
5823 if (i > 0)
5824 seq_puts(m, ",");
5825
5826 if (idx == HITCOUNT_IDX)
5827 seq_puts(m, "hitcount");
5828 else {
5829 if (idx >= first_key_idx)
5830 idx += hist_data->n_vars;
5831 hist_field_print(m, hist_data->fields[idx]);
5832 }
5833
5834 if (sort_key->descending)
5835 seq_puts(m, ".descending");
5836 }
5837 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5838 if (hist_data->enable_timestamps)
5839 seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5840
5841 print_actions_spec(m, hist_data);
5842
5843 if (data->filter_str)
5844 seq_printf(m, " if %s", data->filter_str);
5845
5846 if (data->paused)
5847 seq_puts(m, " [paused]");
5848 else
5849 seq_puts(m, " [active]");
5850
5851 seq_putc(m, '\n');
5852
5853 return 0;
5854 }
5855
event_hist_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)5856 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5857 struct event_trigger_data *data)
5858 {
5859 struct hist_trigger_data *hist_data = data->private_data;
5860
5861 if (!data->ref && hist_data->attrs->name)
5862 save_named_trigger(hist_data->attrs->name, data);
5863
5864 data->ref++;
5865
5866 return 0;
5867 }
5868
unregister_field_var_hists(struct hist_trigger_data * hist_data)5869 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5870 {
5871 struct trace_event_file *file;
5872 unsigned int i;
5873 char *cmd;
5874 int ret;
5875
5876 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5877 file = hist_data->field_var_hists[i]->hist_data->event_file;
5878 cmd = hist_data->field_var_hists[i]->cmd;
5879 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5880 "!hist", "hist", cmd);
5881 }
5882 }
5883
event_hist_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)5884 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5885 struct event_trigger_data *data)
5886 {
5887 struct hist_trigger_data *hist_data = data->private_data;
5888
5889 if (WARN_ON_ONCE(data->ref <= 0))
5890 return;
5891
5892 data->ref--;
5893 if (!data->ref) {
5894 if (data->name)
5895 del_named_trigger(data);
5896
5897 trigger_data_free(data);
5898
5899 remove_hist_vars(hist_data);
5900
5901 unregister_field_var_hists(hist_data);
5902
5903 destroy_hist_data(hist_data);
5904 }
5905 }
5906
5907 static struct event_trigger_ops event_hist_trigger_ops = {
5908 .func = event_hist_trigger,
5909 .print = event_hist_trigger_print,
5910 .init = event_hist_trigger_init,
5911 .free = event_hist_trigger_free,
5912 };
5913
event_hist_trigger_named_init(struct event_trigger_ops * ops,struct event_trigger_data * data)5914 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5915 struct event_trigger_data *data)
5916 {
5917 data->ref++;
5918
5919 save_named_trigger(data->named_data->name, data);
5920
5921 event_hist_trigger_init(ops, data->named_data);
5922
5923 return 0;
5924 }
5925
event_hist_trigger_named_free(struct event_trigger_ops * ops,struct event_trigger_data * data)5926 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5927 struct event_trigger_data *data)
5928 {
5929 if (WARN_ON_ONCE(data->ref <= 0))
5930 return;
5931
5932 event_hist_trigger_free(ops, data->named_data);
5933
5934 data->ref--;
5935 if (!data->ref) {
5936 del_named_trigger(data);
5937 trigger_data_free(data);
5938 }
5939 }
5940
5941 static struct event_trigger_ops event_hist_trigger_named_ops = {
5942 .func = event_hist_trigger,
5943 .print = event_hist_trigger_print,
5944 .init = event_hist_trigger_named_init,
5945 .free = event_hist_trigger_named_free,
5946 };
5947
event_hist_get_trigger_ops(char * cmd,char * param)5948 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5949 char *param)
5950 {
5951 return &event_hist_trigger_ops;
5952 }
5953
hist_clear(struct event_trigger_data * data)5954 static void hist_clear(struct event_trigger_data *data)
5955 {
5956 struct hist_trigger_data *hist_data = data->private_data;
5957
5958 if (data->name)
5959 pause_named_trigger(data);
5960
5961 tracepoint_synchronize_unregister();
5962
5963 tracing_map_clear(hist_data->map);
5964
5965 if (data->name)
5966 unpause_named_trigger(data);
5967 }
5968
compatible_field(struct ftrace_event_field * field,struct ftrace_event_field * test_field)5969 static bool compatible_field(struct ftrace_event_field *field,
5970 struct ftrace_event_field *test_field)
5971 {
5972 if (field == test_field)
5973 return true;
5974 if (field == NULL || test_field == NULL)
5975 return false;
5976 if (strcmp(field->name, test_field->name) != 0)
5977 return false;
5978 if (strcmp(field->type, test_field->type) != 0)
5979 return false;
5980 if (field->size != test_field->size)
5981 return false;
5982 if (field->is_signed != test_field->is_signed)
5983 return false;
5984
5985 return true;
5986 }
5987
hist_trigger_match(struct event_trigger_data * data,struct event_trigger_data * data_test,struct event_trigger_data * named_data,bool ignore_filter)5988 static bool hist_trigger_match(struct event_trigger_data *data,
5989 struct event_trigger_data *data_test,
5990 struct event_trigger_data *named_data,
5991 bool ignore_filter)
5992 {
5993 struct tracing_map_sort_key *sort_key, *sort_key_test;
5994 struct hist_trigger_data *hist_data, *hist_data_test;
5995 struct hist_field *key_field, *key_field_test;
5996 unsigned int i;
5997
5998 if (named_data && (named_data != data_test) &&
5999 (named_data != data_test->named_data))
6000 return false;
6001
6002 if (!named_data && is_named_trigger(data_test))
6003 return false;
6004
6005 hist_data = data->private_data;
6006 hist_data_test = data_test->private_data;
6007
6008 if (hist_data->n_vals != hist_data_test->n_vals ||
6009 hist_data->n_fields != hist_data_test->n_fields ||
6010 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
6011 return false;
6012
6013 if (!ignore_filter) {
6014 if ((data->filter_str && !data_test->filter_str) ||
6015 (!data->filter_str && data_test->filter_str))
6016 return false;
6017 }
6018
6019 for_each_hist_field(i, hist_data) {
6020 key_field = hist_data->fields[i];
6021 key_field_test = hist_data_test->fields[i];
6022
6023 if (key_field->flags != key_field_test->flags)
6024 return false;
6025 if (!compatible_field(key_field->field, key_field_test->field))
6026 return false;
6027 if (key_field->offset != key_field_test->offset)
6028 return false;
6029 if (key_field->size != key_field_test->size)
6030 return false;
6031 if (key_field->is_signed != key_field_test->is_signed)
6032 return false;
6033 if (!!key_field->var.name != !!key_field_test->var.name)
6034 return false;
6035 if (key_field->var.name &&
6036 strcmp(key_field->var.name, key_field_test->var.name) != 0)
6037 return false;
6038 }
6039
6040 for (i = 0; i < hist_data->n_sort_keys; i++) {
6041 sort_key = &hist_data->sort_keys[i];
6042 sort_key_test = &hist_data_test->sort_keys[i];
6043
6044 if (sort_key->field_idx != sort_key_test->field_idx ||
6045 sort_key->descending != sort_key_test->descending)
6046 return false;
6047 }
6048
6049 if (!ignore_filter && data->filter_str &&
6050 (strcmp(data->filter_str, data_test->filter_str) != 0))
6051 return false;
6052
6053 if (!actions_match(hist_data, hist_data_test))
6054 return false;
6055
6056 return true;
6057 }
6058
hist_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)6059 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
6060 struct event_trigger_data *data,
6061 struct trace_event_file *file)
6062 {
6063 struct hist_trigger_data *hist_data = data->private_data;
6064 struct event_trigger_data *test, *named_data = NULL;
6065 struct trace_array *tr = file->tr;
6066 int ret = 0;
6067
6068 if (hist_data->attrs->name) {
6069 named_data = find_named_trigger(hist_data->attrs->name);
6070 if (named_data) {
6071 if (!hist_trigger_match(data, named_data, named_data,
6072 true)) {
6073 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
6074 ret = -EINVAL;
6075 goto out;
6076 }
6077 }
6078 }
6079
6080 if (hist_data->attrs->name && !named_data)
6081 goto new;
6082
6083 lockdep_assert_held(&event_mutex);
6084
6085 list_for_each_entry(test, &file->triggers, list) {
6086 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6087 if (!hist_trigger_match(data, test, named_data, false))
6088 continue;
6089 if (hist_data->attrs->pause)
6090 test->paused = true;
6091 else if (hist_data->attrs->cont)
6092 test->paused = false;
6093 else if (hist_data->attrs->clear)
6094 hist_clear(test);
6095 else {
6096 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
6097 ret = -EEXIST;
6098 }
6099 goto out;
6100 }
6101 }
6102 new:
6103 if (hist_data->attrs->cont || hist_data->attrs->clear) {
6104 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
6105 ret = -ENOENT;
6106 goto out;
6107 }
6108
6109 if (hist_data->attrs->pause)
6110 data->paused = true;
6111
6112 if (named_data) {
6113 data->private_data = named_data->private_data;
6114 set_named_trigger_data(data, named_data);
6115 data->ops = &event_hist_trigger_named_ops;
6116 }
6117
6118 if (data->ops->init) {
6119 ret = data->ops->init(data->ops, data);
6120 if (ret < 0)
6121 goto out;
6122 }
6123
6124 if (hist_data->enable_timestamps) {
6125 char *clock = hist_data->attrs->clock;
6126
6127 ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
6128 if (ret) {
6129 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
6130 goto out;
6131 }
6132
6133 tracing_set_time_stamp_abs(file->tr, true);
6134 }
6135
6136 if (named_data)
6137 destroy_hist_data(hist_data);
6138
6139 ret++;
6140 out:
6141 return ret;
6142 }
6143
hist_trigger_enable(struct event_trigger_data * data,struct trace_event_file * file)6144 static int hist_trigger_enable(struct event_trigger_data *data,
6145 struct trace_event_file *file)
6146 {
6147 int ret = 0;
6148
6149 list_add_tail_rcu(&data->list, &file->triggers);
6150
6151 update_cond_flag(file);
6152
6153 if (trace_event_trigger_enable_disable(file, 1) < 0) {
6154 list_del_rcu(&data->list);
6155 update_cond_flag(file);
6156 ret--;
6157 }
6158
6159 return ret;
6160 }
6161
have_hist_trigger_match(struct event_trigger_data * data,struct trace_event_file * file)6162 static bool have_hist_trigger_match(struct event_trigger_data *data,
6163 struct trace_event_file *file)
6164 {
6165 struct hist_trigger_data *hist_data = data->private_data;
6166 struct event_trigger_data *test, *named_data = NULL;
6167 bool match = false;
6168
6169 lockdep_assert_held(&event_mutex);
6170
6171 if (hist_data->attrs->name)
6172 named_data = find_named_trigger(hist_data->attrs->name);
6173
6174 list_for_each_entry(test, &file->triggers, list) {
6175 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6176 if (hist_trigger_match(data, test, named_data, false)) {
6177 match = true;
6178 break;
6179 }
6180 }
6181 }
6182
6183 return match;
6184 }
6185
hist_trigger_check_refs(struct event_trigger_data * data,struct trace_event_file * file)6186 static bool hist_trigger_check_refs(struct event_trigger_data *data,
6187 struct trace_event_file *file)
6188 {
6189 struct hist_trigger_data *hist_data = data->private_data;
6190 struct event_trigger_data *test, *named_data = NULL;
6191
6192 lockdep_assert_held(&event_mutex);
6193
6194 if (hist_data->attrs->name)
6195 named_data = find_named_trigger(hist_data->attrs->name);
6196
6197 list_for_each_entry(test, &file->triggers, list) {
6198 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6199 if (!hist_trigger_match(data, test, named_data, false))
6200 continue;
6201 hist_data = test->private_data;
6202 if (check_var_refs(hist_data))
6203 return true;
6204 break;
6205 }
6206 }
6207
6208 return false;
6209 }
6210
hist_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)6211 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
6212 struct event_trigger_data *data,
6213 struct trace_event_file *file)
6214 {
6215 struct hist_trigger_data *hist_data = data->private_data;
6216 struct event_trigger_data *test, *named_data = NULL;
6217 bool unregistered = false;
6218
6219 lockdep_assert_held(&event_mutex);
6220
6221 if (hist_data->attrs->name)
6222 named_data = find_named_trigger(hist_data->attrs->name);
6223
6224 list_for_each_entry(test, &file->triggers, list) {
6225 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6226 if (!hist_trigger_match(data, test, named_data, false))
6227 continue;
6228 unregistered = true;
6229 list_del_rcu(&test->list);
6230 trace_event_trigger_enable_disable(file, 0);
6231 update_cond_flag(file);
6232 break;
6233 }
6234 }
6235
6236 if (unregistered && test->ops->free)
6237 test->ops->free(test->ops, test);
6238
6239 if (hist_data->enable_timestamps) {
6240 if (!hist_data->remove || unregistered)
6241 tracing_set_time_stamp_abs(file->tr, false);
6242 }
6243 }
6244
hist_file_check_refs(struct trace_event_file * file)6245 static bool hist_file_check_refs(struct trace_event_file *file)
6246 {
6247 struct hist_trigger_data *hist_data;
6248 struct event_trigger_data *test;
6249
6250 lockdep_assert_held(&event_mutex);
6251
6252 list_for_each_entry(test, &file->triggers, list) {
6253 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6254 hist_data = test->private_data;
6255 if (check_var_refs(hist_data))
6256 return true;
6257 }
6258 }
6259
6260 return false;
6261 }
6262
hist_unreg_all(struct trace_event_file * file)6263 static void hist_unreg_all(struct trace_event_file *file)
6264 {
6265 struct event_trigger_data *test, *n;
6266 struct hist_trigger_data *hist_data;
6267 struct synth_event *se;
6268 const char *se_name;
6269
6270 lockdep_assert_held(&event_mutex);
6271
6272 if (hist_file_check_refs(file))
6273 return;
6274
6275 list_for_each_entry_safe(test, n, &file->triggers, list) {
6276 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6277 hist_data = test->private_data;
6278 list_del_rcu(&test->list);
6279 trace_event_trigger_enable_disable(file, 0);
6280
6281 se_name = trace_event_name(file->event_call);
6282 se = find_synth_event(se_name);
6283 if (se)
6284 se->ref--;
6285
6286 update_cond_flag(file);
6287 if (hist_data->enable_timestamps)
6288 tracing_set_time_stamp_abs(file->tr, false);
6289 if (test->ops->free)
6290 test->ops->free(test->ops, test);
6291 }
6292 }
6293 }
6294
event_hist_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)6295 static int event_hist_trigger_func(struct event_command *cmd_ops,
6296 struct trace_event_file *file,
6297 char *glob, char *cmd, char *param)
6298 {
6299 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
6300 struct event_trigger_data *trigger_data;
6301 struct hist_trigger_attrs *attrs;
6302 struct event_trigger_ops *trigger_ops;
6303 struct hist_trigger_data *hist_data;
6304 struct synth_event *se;
6305 const char *se_name;
6306 bool remove = false;
6307 char *trigger, *p;
6308 int ret = 0;
6309
6310 lockdep_assert_held(&event_mutex);
6311
6312 if (glob && strlen(glob)) {
6313 hist_err_clear();
6314 last_cmd_set(file, param);
6315 }
6316
6317 if (!param)
6318 return -EINVAL;
6319
6320 if (glob[0] == '!')
6321 remove = true;
6322
6323 /*
6324 * separate the trigger from the filter (k:v [if filter])
6325 * allowing for whitespace in the trigger
6326 */
6327 p = trigger = param;
6328 do {
6329 p = strstr(p, "if");
6330 if (!p)
6331 break;
6332 if (p == param)
6333 return -EINVAL;
6334 if (*(p - 1) != ' ' && *(p - 1) != '\t') {
6335 p++;
6336 continue;
6337 }
6338 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
6339 return -EINVAL;
6340 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
6341 p++;
6342 continue;
6343 }
6344 break;
6345 } while (p);
6346
6347 if (!p)
6348 param = NULL;
6349 else {
6350 *(p - 1) = '\0';
6351 param = strstrip(p);
6352 trigger = strstrip(trigger);
6353 }
6354
6355 attrs = parse_hist_trigger_attrs(file->tr, trigger);
6356 if (IS_ERR(attrs))
6357 return PTR_ERR(attrs);
6358
6359 if (attrs->map_bits)
6360 hist_trigger_bits = attrs->map_bits;
6361
6362 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
6363 if (IS_ERR(hist_data)) {
6364 destroy_hist_trigger_attrs(attrs);
6365 return PTR_ERR(hist_data);
6366 }
6367
6368 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
6369
6370 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
6371 if (!trigger_data) {
6372 ret = -ENOMEM;
6373 goto out_free;
6374 }
6375
6376 trigger_data->count = -1;
6377 trigger_data->ops = trigger_ops;
6378 trigger_data->cmd_ops = cmd_ops;
6379
6380 INIT_LIST_HEAD(&trigger_data->list);
6381 RCU_INIT_POINTER(trigger_data->filter, NULL);
6382
6383 trigger_data->private_data = hist_data;
6384
6385 /* if param is non-empty, it's supposed to be a filter */
6386 if (param && cmd_ops->set_filter) {
6387 ret = cmd_ops->set_filter(param, trigger_data, file);
6388 if (ret < 0)
6389 goto out_free;
6390 }
6391
6392 if (remove) {
6393 if (!have_hist_trigger_match(trigger_data, file))
6394 goto out_free;
6395
6396 if (hist_trigger_check_refs(trigger_data, file)) {
6397 ret = -EBUSY;
6398 goto out_free;
6399 }
6400
6401 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6402 se_name = trace_event_name(file->event_call);
6403 se = find_synth_event(se_name);
6404 if (se)
6405 se->ref--;
6406 ret = 0;
6407 goto out_free;
6408 }
6409
6410 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
6411 /*
6412 * The above returns on success the # of triggers registered,
6413 * but if it didn't register any it returns zero. Consider no
6414 * triggers registered a failure too.
6415 */
6416 if (!ret) {
6417 if (!(attrs->pause || attrs->cont || attrs->clear))
6418 ret = -ENOENT;
6419 goto out_free;
6420 } else if (ret < 0)
6421 goto out_free;
6422
6423 if (get_named_trigger_data(trigger_data))
6424 goto enable;
6425
6426 ret = create_actions(hist_data);
6427 if (ret)
6428 goto out_unreg;
6429
6430 if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
6431 ret = save_hist_vars(hist_data);
6432 if (ret)
6433 goto out_unreg;
6434 }
6435
6436 ret = tracing_map_init(hist_data->map);
6437 if (ret)
6438 goto out_unreg;
6439 enable:
6440 ret = hist_trigger_enable(trigger_data, file);
6441 if (ret)
6442 goto out_unreg;
6443
6444 se_name = trace_event_name(file->event_call);
6445 se = find_synth_event(se_name);
6446 if (se)
6447 se->ref++;
6448 /* Just return zero, not the number of registered triggers */
6449 ret = 0;
6450 out:
6451 if (ret == 0 && glob[0])
6452 hist_err_clear();
6453
6454 return ret;
6455 out_unreg:
6456 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
6457 out_free:
6458 if (cmd_ops->set_filter)
6459 cmd_ops->set_filter(NULL, trigger_data, NULL);
6460
6461 remove_hist_vars(hist_data);
6462
6463 kfree(trigger_data);
6464
6465 destroy_hist_data(hist_data);
6466 goto out;
6467 }
6468
6469 static struct event_command trigger_hist_cmd = {
6470 .name = "hist",
6471 .trigger_type = ETT_EVENT_HIST,
6472 .flags = EVENT_CMD_FL_NEEDS_REC,
6473 .func = event_hist_trigger_func,
6474 .reg = hist_register_trigger,
6475 .unreg = hist_unregister_trigger,
6476 .unreg_all = hist_unreg_all,
6477 .get_trigger_ops = event_hist_get_trigger_ops,
6478 .set_filter = set_trigger_filter,
6479 };
6480
register_trigger_hist_cmd(void)6481 __init int register_trigger_hist_cmd(void)
6482 {
6483 int ret;
6484
6485 ret = register_event_command(&trigger_hist_cmd);
6486 WARN_ON(ret < 0);
6487
6488 return ret;
6489 }
6490
6491 static void
hist_enable_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)6492 hist_enable_trigger(struct event_trigger_data *data, void *rec,
6493 struct ring_buffer_event *event)
6494 {
6495 struct enable_trigger_data *enable_data = data->private_data;
6496 struct event_trigger_data *test;
6497
6498 list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
6499 lockdep_is_held(&event_mutex)) {
6500 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
6501 if (enable_data->enable)
6502 test->paused = false;
6503 else
6504 test->paused = true;
6505 }
6506 }
6507 }
6508
6509 static void
hist_enable_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)6510 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
6511 struct ring_buffer_event *event)
6512 {
6513 if (!data->count)
6514 return;
6515
6516 if (data->count != -1)
6517 (data->count)--;
6518
6519 hist_enable_trigger(data, rec, event);
6520 }
6521
6522 static struct event_trigger_ops hist_enable_trigger_ops = {
6523 .func = hist_enable_trigger,
6524 .print = event_enable_trigger_print,
6525 .init = event_trigger_init,
6526 .free = event_enable_trigger_free,
6527 };
6528
6529 static struct event_trigger_ops hist_enable_count_trigger_ops = {
6530 .func = hist_enable_count_trigger,
6531 .print = event_enable_trigger_print,
6532 .init = event_trigger_init,
6533 .free = event_enable_trigger_free,
6534 };
6535
6536 static struct event_trigger_ops hist_disable_trigger_ops = {
6537 .func = hist_enable_trigger,
6538 .print = event_enable_trigger_print,
6539 .init = event_trigger_init,
6540 .free = event_enable_trigger_free,
6541 };
6542
6543 static struct event_trigger_ops hist_disable_count_trigger_ops = {
6544 .func = hist_enable_count_trigger,
6545 .print = event_enable_trigger_print,
6546 .init = event_trigger_init,
6547 .free = event_enable_trigger_free,
6548 };
6549
6550 static struct event_trigger_ops *
hist_enable_get_trigger_ops(char * cmd,char * param)6551 hist_enable_get_trigger_ops(char *cmd, char *param)
6552 {
6553 struct event_trigger_ops *ops;
6554 bool enable;
6555
6556 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
6557
6558 if (enable)
6559 ops = param ? &hist_enable_count_trigger_ops :
6560 &hist_enable_trigger_ops;
6561 else
6562 ops = param ? &hist_disable_count_trigger_ops :
6563 &hist_disable_trigger_ops;
6564
6565 return ops;
6566 }
6567
hist_enable_unreg_all(struct trace_event_file * file)6568 static void hist_enable_unreg_all(struct trace_event_file *file)
6569 {
6570 struct event_trigger_data *test, *n;
6571
6572 list_for_each_entry_safe(test, n, &file->triggers, list) {
6573 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
6574 list_del_rcu(&test->list);
6575 update_cond_flag(file);
6576 trace_event_trigger_enable_disable(file, 0);
6577 if (test->ops->free)
6578 test->ops->free(test->ops, test);
6579 }
6580 }
6581 }
6582
6583 static struct event_command trigger_hist_enable_cmd = {
6584 .name = ENABLE_HIST_STR,
6585 .trigger_type = ETT_HIST_ENABLE,
6586 .func = event_enable_trigger_func,
6587 .reg = event_enable_register_trigger,
6588 .unreg = event_enable_unregister_trigger,
6589 .unreg_all = hist_enable_unreg_all,
6590 .get_trigger_ops = hist_enable_get_trigger_ops,
6591 .set_filter = set_trigger_filter,
6592 };
6593
6594 static struct event_command trigger_hist_disable_cmd = {
6595 .name = DISABLE_HIST_STR,
6596 .trigger_type = ETT_HIST_ENABLE,
6597 .func = event_enable_trigger_func,
6598 .reg = event_enable_register_trigger,
6599 .unreg = event_enable_unregister_trigger,
6600 .unreg_all = hist_enable_unreg_all,
6601 .get_trigger_ops = hist_enable_get_trigger_ops,
6602 .set_filter = set_trigger_filter,
6603 };
6604
unregister_trigger_hist_enable_disable_cmds(void)6605 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6606 {
6607 unregister_event_command(&trigger_hist_enable_cmd);
6608 unregister_event_command(&trigger_hist_disable_cmd);
6609 }
6610
register_trigger_hist_enable_disable_cmds(void)6611 __init int register_trigger_hist_enable_disable_cmds(void)
6612 {
6613 int ret;
6614
6615 ret = register_event_command(&trigger_hist_enable_cmd);
6616 if (WARN_ON(ret < 0))
6617 return ret;
6618 ret = register_event_command(&trigger_hist_disable_cmd);
6619 if (WARN_ON(ret < 0))
6620 unregister_trigger_hist_enable_disable_cmds();
6621
6622 return ret;
6623 }
6624
trace_events_hist_init(void)6625 static __init int trace_events_hist_init(void)
6626 {
6627 struct dentry *entry = NULL;
6628 struct dentry *d_tracer;
6629 int err = 0;
6630
6631 err = dyn_event_register(&synth_event_ops);
6632 if (err) {
6633 pr_warn("Could not register synth_event_ops\n");
6634 return err;
6635 }
6636
6637 d_tracer = tracing_init_dentry();
6638 if (IS_ERR(d_tracer)) {
6639 err = PTR_ERR(d_tracer);
6640 goto err;
6641 }
6642
6643 entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
6644 NULL, &synth_events_fops);
6645 if (!entry) {
6646 err = -ENODEV;
6647 goto err;
6648 }
6649
6650 return err;
6651 err:
6652 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
6653
6654 return err;
6655 }
6656
6657 fs_initcall(trace_events_hist_init);
6658