1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_hist - trace event hist triggers
4 *
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20
21 #include "tracing_map.h"
22 #include "trace_synth.h"
23
24 #define ERRORS \
25 C(NONE, "No error"), \
26 C(DUPLICATE_VAR, "Variable already defined"), \
27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 C(TOO_MANY_VARS, "Too many variables defined"), \
29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
51 C(TOO_MANY_PARAMS, "Too many action params"), \
52 C(PARAM_NOT_FOUND, "Couldn't find param"), \
53 C(INVALID_PARAM, "Invalid action param"), \
54 C(ACTION_NOT_FOUND, "No action found"), \
55 C(NO_SAVE_PARAMS, "No params found for save()"), \
56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 C(ACTION_MISMATCH, "Handler doesn't support action"), \
58 C(NO_CLOSING_PAREN, "No closing paren found"), \
59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
62 C(VAR_NOT_FOUND, "Couldn't find variable"), \
63 C(FIELD_NOT_FOUND, "Couldn't find field"), \
64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
66 C(EMPTY_SORT_FIELD, "Empty sort field"), \
67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"),
70
71 #undef C
72 #define C(a, b) HIST_ERR_##a
73
74 enum { ERRORS };
75
76 #undef C
77 #define C(a, b) b
78
79 static const char *err_text[] = { ERRORS };
80
81 struct hist_field;
82
83 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
84 struct tracing_map_elt *elt,
85 struct ring_buffer_event *rbe,
86 void *event);
87
88 #define HIST_FIELD_OPERANDS_MAX 2
89 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
90 #define HIST_ACTIONS_MAX 8
91
92 enum field_op_id {
93 FIELD_OP_NONE,
94 FIELD_OP_PLUS,
95 FIELD_OP_MINUS,
96 FIELD_OP_UNARY_MINUS,
97 };
98
99 /*
100 * A hist_var (histogram variable) contains variable information for
101 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
102 * flag set. A hist_var has a variable name e.g. ts0, and is
103 * associated with a given histogram trigger, as specified by
104 * hist_data. The hist_var idx is the unique index assigned to the
105 * variable by the hist trigger's tracing_map. The idx is what is
106 * used to set a variable's value and, by a variable reference, to
107 * retrieve it.
108 */
109 struct hist_var {
110 char *name;
111 struct hist_trigger_data *hist_data;
112 unsigned int idx;
113 };
114
115 struct hist_field {
116 struct ftrace_event_field *field;
117 unsigned long flags;
118 hist_field_fn_t fn;
119 unsigned int ref;
120 unsigned int size;
121 unsigned int offset;
122 unsigned int is_signed;
123 const char *type;
124 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
125 struct hist_trigger_data *hist_data;
126
127 /*
128 * Variable fields contain variable-specific info in var.
129 */
130 struct hist_var var;
131 enum field_op_id operator;
132 char *system;
133 char *event_name;
134
135 /*
136 * The name field is used for EXPR and VAR_REF fields. VAR
137 * fields contain the variable name in var.name.
138 */
139 char *name;
140
141 /*
142 * When a histogram trigger is hit, if it has any references
143 * to variables, the values of those variables are collected
144 * into a var_ref_vals array by resolve_var_refs(). The
145 * current value of each variable is read from the tracing_map
146 * using the hist field's hist_var.idx and entered into the
147 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
148 */
149 unsigned int var_ref_idx;
150 bool read_once;
151
152 unsigned int var_str_idx;
153 };
154
hist_field_none(struct hist_field * field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)155 static u64 hist_field_none(struct hist_field *field,
156 struct tracing_map_elt *elt,
157 struct ring_buffer_event *rbe,
158 void *event)
159 {
160 return 0;
161 }
162
hist_field_counter(struct hist_field * field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)163 static u64 hist_field_counter(struct hist_field *field,
164 struct tracing_map_elt *elt,
165 struct ring_buffer_event *rbe,
166 void *event)
167 {
168 return 1;
169 }
170
hist_field_string(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)171 static u64 hist_field_string(struct hist_field *hist_field,
172 struct tracing_map_elt *elt,
173 struct ring_buffer_event *rbe,
174 void *event)
175 {
176 char *addr = (char *)(event + hist_field->field->offset);
177
178 return (u64)(unsigned long)addr;
179 }
180
hist_field_dynstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)181 static u64 hist_field_dynstring(struct hist_field *hist_field,
182 struct tracing_map_elt *elt,
183 struct ring_buffer_event *rbe,
184 void *event)
185 {
186 u32 str_item = *(u32 *)(event + hist_field->field->offset);
187 int str_loc = str_item & 0xffff;
188 char *addr = (char *)(event + str_loc);
189
190 return (u64)(unsigned long)addr;
191 }
192
hist_field_pstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)193 static u64 hist_field_pstring(struct hist_field *hist_field,
194 struct tracing_map_elt *elt,
195 struct ring_buffer_event *rbe,
196 void *event)
197 {
198 char **addr = (char **)(event + hist_field->field->offset);
199
200 return (u64)(unsigned long)*addr;
201 }
202
hist_field_log2(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)203 static u64 hist_field_log2(struct hist_field *hist_field,
204 struct tracing_map_elt *elt,
205 struct ring_buffer_event *rbe,
206 void *event)
207 {
208 struct hist_field *operand = hist_field->operands[0];
209
210 u64 val = operand->fn(operand, elt, rbe, event);
211
212 return (u64) ilog2(roundup_pow_of_two(val));
213 }
214
hist_field_plus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)215 static u64 hist_field_plus(struct hist_field *hist_field,
216 struct tracing_map_elt *elt,
217 struct ring_buffer_event *rbe,
218 void *event)
219 {
220 struct hist_field *operand1 = hist_field->operands[0];
221 struct hist_field *operand2 = hist_field->operands[1];
222
223 u64 val1 = operand1->fn(operand1, elt, rbe, event);
224 u64 val2 = operand2->fn(operand2, elt, rbe, event);
225
226 return val1 + val2;
227 }
228
hist_field_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)229 static u64 hist_field_minus(struct hist_field *hist_field,
230 struct tracing_map_elt *elt,
231 struct ring_buffer_event *rbe,
232 void *event)
233 {
234 struct hist_field *operand1 = hist_field->operands[0];
235 struct hist_field *operand2 = hist_field->operands[1];
236
237 u64 val1 = operand1->fn(operand1, elt, rbe, event);
238 u64 val2 = operand2->fn(operand2, elt, rbe, event);
239
240 return val1 - val2;
241 }
242
hist_field_unary_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)243 static u64 hist_field_unary_minus(struct hist_field *hist_field,
244 struct tracing_map_elt *elt,
245 struct ring_buffer_event *rbe,
246 void *event)
247 {
248 struct hist_field *operand = hist_field->operands[0];
249
250 s64 sval = (s64)operand->fn(operand, elt, rbe, event);
251 u64 val = (u64)-sval;
252
253 return val;
254 }
255
256 #define DEFINE_HIST_FIELD_FN(type) \
257 static u64 hist_field_##type(struct hist_field *hist_field, \
258 struct tracing_map_elt *elt, \
259 struct ring_buffer_event *rbe, \
260 void *event) \
261 { \
262 type *addr = (type *)(event + hist_field->field->offset); \
263 \
264 return (u64)(unsigned long)*addr; \
265 }
266
267 DEFINE_HIST_FIELD_FN(s64);
268 DEFINE_HIST_FIELD_FN(u64);
269 DEFINE_HIST_FIELD_FN(s32);
270 DEFINE_HIST_FIELD_FN(u32);
271 DEFINE_HIST_FIELD_FN(s16);
272 DEFINE_HIST_FIELD_FN(u16);
273 DEFINE_HIST_FIELD_FN(s8);
274 DEFINE_HIST_FIELD_FN(u8);
275
276 #define for_each_hist_field(i, hist_data) \
277 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
278
279 #define for_each_hist_val_field(i, hist_data) \
280 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
281
282 #define for_each_hist_key_field(i, hist_data) \
283 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
284
285 #define HIST_STACKTRACE_DEPTH 16
286 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
287 #define HIST_STACKTRACE_SKIP 5
288
289 #define HITCOUNT_IDX 0
290 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
291
292 enum hist_field_flags {
293 HIST_FIELD_FL_HITCOUNT = 1 << 0,
294 HIST_FIELD_FL_KEY = 1 << 1,
295 HIST_FIELD_FL_STRING = 1 << 2,
296 HIST_FIELD_FL_HEX = 1 << 3,
297 HIST_FIELD_FL_SYM = 1 << 4,
298 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
299 HIST_FIELD_FL_EXECNAME = 1 << 6,
300 HIST_FIELD_FL_SYSCALL = 1 << 7,
301 HIST_FIELD_FL_STACKTRACE = 1 << 8,
302 HIST_FIELD_FL_LOG2 = 1 << 9,
303 HIST_FIELD_FL_TIMESTAMP = 1 << 10,
304 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
305 HIST_FIELD_FL_VAR = 1 << 12,
306 HIST_FIELD_FL_EXPR = 1 << 13,
307 HIST_FIELD_FL_VAR_REF = 1 << 14,
308 HIST_FIELD_FL_CPU = 1 << 15,
309 HIST_FIELD_FL_ALIAS = 1 << 16,
310 };
311
312 struct var_defs {
313 unsigned int n_vars;
314 char *name[TRACING_MAP_VARS_MAX];
315 char *expr[TRACING_MAP_VARS_MAX];
316 };
317
318 struct hist_trigger_attrs {
319 char *keys_str;
320 char *vals_str;
321 char *sort_key_str;
322 char *name;
323 char *clock;
324 bool pause;
325 bool cont;
326 bool clear;
327 bool ts_in_usecs;
328 unsigned int map_bits;
329
330 char *assignment_str[TRACING_MAP_VARS_MAX];
331 unsigned int n_assignments;
332
333 char *action_str[HIST_ACTIONS_MAX];
334 unsigned int n_actions;
335
336 struct var_defs var_defs;
337 };
338
339 struct field_var {
340 struct hist_field *var;
341 struct hist_field *val;
342 };
343
344 struct field_var_hist {
345 struct hist_trigger_data *hist_data;
346 char *cmd;
347 };
348
349 struct hist_trigger_data {
350 struct hist_field *fields[HIST_FIELDS_MAX];
351 unsigned int n_vals;
352 unsigned int n_keys;
353 unsigned int n_fields;
354 unsigned int n_vars;
355 unsigned int n_var_str;
356 unsigned int key_size;
357 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
358 unsigned int n_sort_keys;
359 struct trace_event_file *event_file;
360 struct hist_trigger_attrs *attrs;
361 struct tracing_map *map;
362 bool enable_timestamps;
363 bool remove;
364 struct hist_field *var_refs[TRACING_MAP_VARS_MAX];
365 unsigned int n_var_refs;
366
367 struct action_data *actions[HIST_ACTIONS_MAX];
368 unsigned int n_actions;
369
370 struct field_var *field_vars[SYNTH_FIELDS_MAX];
371 unsigned int n_field_vars;
372 unsigned int n_field_var_str;
373 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
374 unsigned int n_field_var_hists;
375
376 struct field_var *save_vars[SYNTH_FIELDS_MAX];
377 unsigned int n_save_vars;
378 unsigned int n_save_var_str;
379 };
380
381 struct action_data;
382
383 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
384 struct tracing_map_elt *elt, void *rec,
385 struct ring_buffer_event *rbe, void *key,
386 struct action_data *data, u64 *var_ref_vals);
387
388 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
389
390 enum handler_id {
391 HANDLER_ONMATCH = 1,
392 HANDLER_ONMAX,
393 HANDLER_ONCHANGE,
394 };
395
396 enum action_id {
397 ACTION_SAVE = 1,
398 ACTION_TRACE,
399 ACTION_SNAPSHOT,
400 };
401
402 struct action_data {
403 enum handler_id handler;
404 enum action_id action;
405 char *action_name;
406 action_fn_t fn;
407
408 unsigned int n_params;
409 char *params[SYNTH_FIELDS_MAX];
410
411 /*
412 * When a histogram trigger is hit, the values of any
413 * references to variables, including variables being passed
414 * as parameters to synthetic events, are collected into a
415 * var_ref_vals array. This var_ref_idx array is an array of
416 * indices into the var_ref_vals array, one for each synthetic
417 * event param, and is passed to the synthetic event
418 * invocation.
419 */
420 unsigned int var_ref_idx[SYNTH_FIELDS_MAX];
421 struct synth_event *synth_event;
422 bool use_trace_keyword;
423 char *synth_event_name;
424
425 union {
426 struct {
427 char *event;
428 char *event_system;
429 } match_data;
430
431 struct {
432 /*
433 * var_str contains the $-unstripped variable
434 * name referenced by var_ref, and used when
435 * printing the action. Because var_ref
436 * creation is deferred to create_actions(),
437 * we need a per-action way to save it until
438 * then, thus var_str.
439 */
440 char *var_str;
441
442 /*
443 * var_ref refers to the variable being
444 * tracked e.g onmax($var).
445 */
446 struct hist_field *var_ref;
447
448 /*
449 * track_var contains the 'invisible' tracking
450 * variable created to keep the current
451 * e.g. max value.
452 */
453 struct hist_field *track_var;
454
455 check_track_val_fn_t check_val;
456 action_fn_t save_data;
457 } track_data;
458 };
459 };
460
461 struct track_data {
462 u64 track_val;
463 bool updated;
464
465 unsigned int key_len;
466 void *key;
467 struct tracing_map_elt elt;
468
469 struct action_data *action_data;
470 struct hist_trigger_data *hist_data;
471 };
472
473 struct hist_elt_data {
474 char *comm;
475 u64 *var_ref_vals;
476 char *field_var_str[SYNTH_FIELDS_MAX];
477 };
478
479 struct snapshot_context {
480 struct tracing_map_elt *elt;
481 void *key;
482 };
483
track_data_free(struct track_data * track_data)484 static void track_data_free(struct track_data *track_data)
485 {
486 struct hist_elt_data *elt_data;
487
488 if (!track_data)
489 return;
490
491 kfree(track_data->key);
492
493 elt_data = track_data->elt.private_data;
494 if (elt_data) {
495 kfree(elt_data->comm);
496 kfree(elt_data);
497 }
498
499 kfree(track_data);
500 }
501
track_data_alloc(unsigned int key_len,struct action_data * action_data,struct hist_trigger_data * hist_data)502 static struct track_data *track_data_alloc(unsigned int key_len,
503 struct action_data *action_data,
504 struct hist_trigger_data *hist_data)
505 {
506 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
507 struct hist_elt_data *elt_data;
508
509 if (!data)
510 return ERR_PTR(-ENOMEM);
511
512 data->key = kzalloc(key_len, GFP_KERNEL);
513 if (!data->key) {
514 track_data_free(data);
515 return ERR_PTR(-ENOMEM);
516 }
517
518 data->key_len = key_len;
519 data->action_data = action_data;
520 data->hist_data = hist_data;
521
522 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
523 if (!elt_data) {
524 track_data_free(data);
525 return ERR_PTR(-ENOMEM);
526 }
527
528 data->elt.private_data = elt_data;
529
530 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
531 if (!elt_data->comm) {
532 track_data_free(data);
533 return ERR_PTR(-ENOMEM);
534 }
535
536 return data;
537 }
538
539 static char last_cmd[MAX_FILTER_STR_VAL];
540 static char last_cmd_loc[MAX_FILTER_STR_VAL];
541
errpos(char * str)542 static int errpos(char *str)
543 {
544 return err_pos(last_cmd, str);
545 }
546
last_cmd_set(struct trace_event_file * file,char * str)547 static void last_cmd_set(struct trace_event_file *file, char *str)
548 {
549 const char *system = NULL, *name = NULL;
550 struct trace_event_call *call;
551
552 if (!str)
553 return;
554
555 strcpy(last_cmd, "hist:");
556 strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
557
558 if (file) {
559 call = file->event_call;
560 system = call->class->system;
561 if (system) {
562 name = trace_event_name(call);
563 if (!name)
564 system = NULL;
565 }
566 }
567
568 if (system)
569 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
570 }
571
hist_err(struct trace_array * tr,u8 err_type,u8 err_pos)572 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
573 {
574 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
575 err_type, err_pos);
576 }
577
hist_err_clear(void)578 static void hist_err_clear(void)
579 {
580 last_cmd[0] = '\0';
581 last_cmd_loc[0] = '\0';
582 }
583
584 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
585 unsigned int *var_ref_idx);
586
trace_synth(struct synth_event * event,u64 * var_ref_vals,unsigned int * var_ref_idx)587 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
588 unsigned int *var_ref_idx)
589 {
590 struct tracepoint *tp = event->tp;
591
592 if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
593 struct tracepoint_func *probe_func_ptr;
594 synth_probe_func_t probe_func;
595 void *__data;
596
597 if (!(cpu_online(raw_smp_processor_id())))
598 return;
599
600 probe_func_ptr = rcu_dereference_sched((tp)->funcs);
601 if (probe_func_ptr) {
602 do {
603 probe_func = probe_func_ptr->func;
604 __data = probe_func_ptr->data;
605 probe_func(__data, var_ref_vals, var_ref_idx);
606 } while ((++probe_func_ptr)->func);
607 }
608 }
609 }
610
action_trace(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)611 static void action_trace(struct hist_trigger_data *hist_data,
612 struct tracing_map_elt *elt, void *rec,
613 struct ring_buffer_event *rbe, void *key,
614 struct action_data *data, u64 *var_ref_vals)
615 {
616 struct synth_event *event = data->synth_event;
617
618 trace_synth(event, var_ref_vals, data->var_ref_idx);
619 }
620
621 struct hist_var_data {
622 struct list_head list;
623 struct hist_trigger_data *hist_data;
624 };
625
hist_field_timestamp(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)626 static u64 hist_field_timestamp(struct hist_field *hist_field,
627 struct tracing_map_elt *elt,
628 struct ring_buffer_event *rbe,
629 void *event)
630 {
631 struct hist_trigger_data *hist_data = hist_field->hist_data;
632 struct trace_array *tr = hist_data->event_file->tr;
633
634 u64 ts = ring_buffer_event_time_stamp(rbe);
635
636 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
637 ts = ns2usecs(ts);
638
639 return ts;
640 }
641
hist_field_cpu(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)642 static u64 hist_field_cpu(struct hist_field *hist_field,
643 struct tracing_map_elt *elt,
644 struct ring_buffer_event *rbe,
645 void *event)
646 {
647 int cpu = smp_processor_id();
648
649 return cpu;
650 }
651
652 /**
653 * check_field_for_var_ref - Check if a VAR_REF field references a variable
654 * @hist_field: The VAR_REF field to check
655 * @var_data: The hist trigger that owns the variable
656 * @var_idx: The trigger variable identifier
657 *
658 * Check the given VAR_REF field to see whether or not it references
659 * the given variable associated with the given trigger.
660 *
661 * Return: The VAR_REF field if it does reference the variable, NULL if not
662 */
663 static struct hist_field *
check_field_for_var_ref(struct hist_field * hist_field,struct hist_trigger_data * var_data,unsigned int var_idx)664 check_field_for_var_ref(struct hist_field *hist_field,
665 struct hist_trigger_data *var_data,
666 unsigned int var_idx)
667 {
668 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
669
670 if (hist_field && hist_field->var.idx == var_idx &&
671 hist_field->var.hist_data == var_data)
672 return hist_field;
673
674 return NULL;
675 }
676
677 /**
678 * find_var_ref - Check if a trigger has a reference to a trigger variable
679 * @hist_data: The hist trigger that might have a reference to the variable
680 * @var_data: The hist trigger that owns the variable
681 * @var_idx: The trigger variable identifier
682 *
683 * Check the list of var_refs[] on the first hist trigger to see
684 * whether any of them are references to the variable on the second
685 * trigger.
686 *
687 * Return: The VAR_REF field referencing the variable if so, NULL if not
688 */
find_var_ref(struct hist_trigger_data * hist_data,struct hist_trigger_data * var_data,unsigned int var_idx)689 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
690 struct hist_trigger_data *var_data,
691 unsigned int var_idx)
692 {
693 struct hist_field *hist_field;
694 unsigned int i;
695
696 for (i = 0; i < hist_data->n_var_refs; i++) {
697 hist_field = hist_data->var_refs[i];
698 if (check_field_for_var_ref(hist_field, var_data, var_idx))
699 return hist_field;
700 }
701
702 return NULL;
703 }
704
705 /**
706 * find_any_var_ref - Check if there is a reference to a given trigger variable
707 * @hist_data: The hist trigger
708 * @var_idx: The trigger variable identifier
709 *
710 * Check to see whether the given variable is currently referenced by
711 * any other trigger.
712 *
713 * The trigger the variable is defined on is explicitly excluded - the
714 * assumption being that a self-reference doesn't prevent a trigger
715 * from being removed.
716 *
717 * Return: The VAR_REF field referencing the variable if so, NULL if not
718 */
find_any_var_ref(struct hist_trigger_data * hist_data,unsigned int var_idx)719 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
720 unsigned int var_idx)
721 {
722 struct trace_array *tr = hist_data->event_file->tr;
723 struct hist_field *found = NULL;
724 struct hist_var_data *var_data;
725
726 list_for_each_entry(var_data, &tr->hist_vars, list) {
727 if (var_data->hist_data == hist_data)
728 continue;
729 found = find_var_ref(var_data->hist_data, hist_data, var_idx);
730 if (found)
731 break;
732 }
733
734 return found;
735 }
736
737 /**
738 * check_var_refs - Check if there is a reference to any of trigger's variables
739 * @hist_data: The hist trigger
740 *
741 * A trigger can define one or more variables. If any one of them is
742 * currently referenced by any other trigger, this function will
743 * determine that.
744
745 * Typically used to determine whether or not a trigger can be removed
746 * - if there are any references to a trigger's variables, it cannot.
747 *
748 * Return: True if there is a reference to any of trigger's variables
749 */
check_var_refs(struct hist_trigger_data * hist_data)750 static bool check_var_refs(struct hist_trigger_data *hist_data)
751 {
752 struct hist_field *field;
753 bool found = false;
754 int i;
755
756 for_each_hist_field(i, hist_data) {
757 field = hist_data->fields[i];
758 if (field && field->flags & HIST_FIELD_FL_VAR) {
759 if (find_any_var_ref(hist_data, field->var.idx)) {
760 found = true;
761 break;
762 }
763 }
764 }
765
766 return found;
767 }
768
find_hist_vars(struct hist_trigger_data * hist_data)769 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
770 {
771 struct trace_array *tr = hist_data->event_file->tr;
772 struct hist_var_data *var_data, *found = NULL;
773
774 list_for_each_entry(var_data, &tr->hist_vars, list) {
775 if (var_data->hist_data == hist_data) {
776 found = var_data;
777 break;
778 }
779 }
780
781 return found;
782 }
783
field_has_hist_vars(struct hist_field * hist_field,unsigned int level)784 static bool field_has_hist_vars(struct hist_field *hist_field,
785 unsigned int level)
786 {
787 int i;
788
789 if (level > 3)
790 return false;
791
792 if (!hist_field)
793 return false;
794
795 if (hist_field->flags & HIST_FIELD_FL_VAR ||
796 hist_field->flags & HIST_FIELD_FL_VAR_REF)
797 return true;
798
799 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
800 struct hist_field *operand;
801
802 operand = hist_field->operands[i];
803 if (field_has_hist_vars(operand, level + 1))
804 return true;
805 }
806
807 return false;
808 }
809
has_hist_vars(struct hist_trigger_data * hist_data)810 static bool has_hist_vars(struct hist_trigger_data *hist_data)
811 {
812 struct hist_field *hist_field;
813 int i;
814
815 for_each_hist_field(i, hist_data) {
816 hist_field = hist_data->fields[i];
817 if (field_has_hist_vars(hist_field, 0))
818 return true;
819 }
820
821 return false;
822 }
823
save_hist_vars(struct hist_trigger_data * hist_data)824 static int save_hist_vars(struct hist_trigger_data *hist_data)
825 {
826 struct trace_array *tr = hist_data->event_file->tr;
827 struct hist_var_data *var_data;
828
829 var_data = find_hist_vars(hist_data);
830 if (var_data)
831 return 0;
832
833 if (tracing_check_open_get_tr(tr))
834 return -ENODEV;
835
836 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
837 if (!var_data) {
838 trace_array_put(tr);
839 return -ENOMEM;
840 }
841
842 var_data->hist_data = hist_data;
843 list_add(&var_data->list, &tr->hist_vars);
844
845 return 0;
846 }
847
remove_hist_vars(struct hist_trigger_data * hist_data)848 static void remove_hist_vars(struct hist_trigger_data *hist_data)
849 {
850 struct trace_array *tr = hist_data->event_file->tr;
851 struct hist_var_data *var_data;
852
853 var_data = find_hist_vars(hist_data);
854 if (!var_data)
855 return;
856
857 if (WARN_ON(check_var_refs(hist_data)))
858 return;
859
860 list_del(&var_data->list);
861
862 kfree(var_data);
863
864 trace_array_put(tr);
865 }
866
find_var_field(struct hist_trigger_data * hist_data,const char * var_name)867 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
868 const char *var_name)
869 {
870 struct hist_field *hist_field, *found = NULL;
871 int i;
872
873 for_each_hist_field(i, hist_data) {
874 hist_field = hist_data->fields[i];
875 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
876 strcmp(hist_field->var.name, var_name) == 0) {
877 found = hist_field;
878 break;
879 }
880 }
881
882 return found;
883 }
884
find_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,const char * var_name)885 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
886 struct trace_event_file *file,
887 const char *var_name)
888 {
889 struct hist_trigger_data *test_data;
890 struct event_trigger_data *test;
891 struct hist_field *hist_field;
892
893 lockdep_assert_held(&event_mutex);
894
895 hist_field = find_var_field(hist_data, var_name);
896 if (hist_field)
897 return hist_field;
898
899 list_for_each_entry(test, &file->triggers, list) {
900 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
901 test_data = test->private_data;
902 hist_field = find_var_field(test_data, var_name);
903 if (hist_field)
904 return hist_field;
905 }
906 }
907
908 return NULL;
909 }
910
find_var_file(struct trace_array * tr,char * system,char * event_name,char * var_name)911 static struct trace_event_file *find_var_file(struct trace_array *tr,
912 char *system,
913 char *event_name,
914 char *var_name)
915 {
916 struct hist_trigger_data *var_hist_data;
917 struct hist_var_data *var_data;
918 struct trace_event_file *file, *found = NULL;
919
920 if (system)
921 return find_event_file(tr, system, event_name);
922
923 list_for_each_entry(var_data, &tr->hist_vars, list) {
924 var_hist_data = var_data->hist_data;
925 file = var_hist_data->event_file;
926 if (file == found)
927 continue;
928
929 if (find_var_field(var_hist_data, var_name)) {
930 if (found) {
931 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
932 return NULL;
933 }
934
935 found = file;
936 }
937 }
938
939 return found;
940 }
941
find_file_var(struct trace_event_file * file,const char * var_name)942 static struct hist_field *find_file_var(struct trace_event_file *file,
943 const char *var_name)
944 {
945 struct hist_trigger_data *test_data;
946 struct event_trigger_data *test;
947 struct hist_field *hist_field;
948
949 lockdep_assert_held(&event_mutex);
950
951 list_for_each_entry(test, &file->triggers, list) {
952 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
953 test_data = test->private_data;
954 hist_field = find_var_field(test_data, var_name);
955 if (hist_field)
956 return hist_field;
957 }
958 }
959
960 return NULL;
961 }
962
963 static struct hist_field *
find_match_var(struct hist_trigger_data * hist_data,char * var_name)964 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
965 {
966 struct trace_array *tr = hist_data->event_file->tr;
967 struct hist_field *hist_field, *found = NULL;
968 struct trace_event_file *file;
969 unsigned int i;
970
971 for (i = 0; i < hist_data->n_actions; i++) {
972 struct action_data *data = hist_data->actions[i];
973
974 if (data->handler == HANDLER_ONMATCH) {
975 char *system = data->match_data.event_system;
976 char *event_name = data->match_data.event;
977
978 file = find_var_file(tr, system, event_name, var_name);
979 if (!file)
980 continue;
981 hist_field = find_file_var(file, var_name);
982 if (hist_field) {
983 if (found) {
984 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
985 errpos(var_name));
986 return ERR_PTR(-EINVAL);
987 }
988
989 found = hist_field;
990 }
991 }
992 }
993 return found;
994 }
995
find_event_var(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)996 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
997 char *system,
998 char *event_name,
999 char *var_name)
1000 {
1001 struct trace_array *tr = hist_data->event_file->tr;
1002 struct hist_field *hist_field = NULL;
1003 struct trace_event_file *file;
1004
1005 if (!system || !event_name) {
1006 hist_field = find_match_var(hist_data, var_name);
1007 if (IS_ERR(hist_field))
1008 return NULL;
1009 if (hist_field)
1010 return hist_field;
1011 }
1012
1013 file = find_var_file(tr, system, event_name, var_name);
1014 if (!file)
1015 return NULL;
1016
1017 hist_field = find_file_var(file, var_name);
1018
1019 return hist_field;
1020 }
1021
hist_field_var_ref(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)1022 static u64 hist_field_var_ref(struct hist_field *hist_field,
1023 struct tracing_map_elt *elt,
1024 struct ring_buffer_event *rbe,
1025 void *event)
1026 {
1027 struct hist_elt_data *elt_data;
1028 u64 var_val = 0;
1029
1030 if (WARN_ON_ONCE(!elt))
1031 return var_val;
1032
1033 elt_data = elt->private_data;
1034 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1035
1036 return var_val;
1037 }
1038
resolve_var_refs(struct hist_trigger_data * hist_data,void * key,u64 * var_ref_vals,bool self)1039 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1040 u64 *var_ref_vals, bool self)
1041 {
1042 struct hist_trigger_data *var_data;
1043 struct tracing_map_elt *var_elt;
1044 struct hist_field *hist_field;
1045 unsigned int i, var_idx;
1046 bool resolved = true;
1047 u64 var_val = 0;
1048
1049 for (i = 0; i < hist_data->n_var_refs; i++) {
1050 hist_field = hist_data->var_refs[i];
1051 var_idx = hist_field->var.idx;
1052 var_data = hist_field->var.hist_data;
1053
1054 if (var_data == NULL) {
1055 resolved = false;
1056 break;
1057 }
1058
1059 if ((self && var_data != hist_data) ||
1060 (!self && var_data == hist_data))
1061 continue;
1062
1063 var_elt = tracing_map_lookup(var_data->map, key);
1064 if (!var_elt) {
1065 resolved = false;
1066 break;
1067 }
1068
1069 if (!tracing_map_var_set(var_elt, var_idx)) {
1070 resolved = false;
1071 break;
1072 }
1073
1074 if (self || !hist_field->read_once)
1075 var_val = tracing_map_read_var(var_elt, var_idx);
1076 else
1077 var_val = tracing_map_read_var_once(var_elt, var_idx);
1078
1079 var_ref_vals[i] = var_val;
1080 }
1081
1082 return resolved;
1083 }
1084
hist_field_name(struct hist_field * field,unsigned int level)1085 static const char *hist_field_name(struct hist_field *field,
1086 unsigned int level)
1087 {
1088 const char *field_name = "";
1089
1090 if (level > 1)
1091 return field_name;
1092
1093 if (field->field)
1094 field_name = field->field->name;
1095 else if (field->flags & HIST_FIELD_FL_LOG2 ||
1096 field->flags & HIST_FIELD_FL_ALIAS)
1097 field_name = hist_field_name(field->operands[0], ++level);
1098 else if (field->flags & HIST_FIELD_FL_CPU)
1099 field_name = "common_cpu";
1100 else if (field->flags & HIST_FIELD_FL_EXPR ||
1101 field->flags & HIST_FIELD_FL_VAR_REF) {
1102 if (field->system) {
1103 static char full_name[MAX_FILTER_STR_VAL];
1104
1105 strcat(full_name, field->system);
1106 strcat(full_name, ".");
1107 strcat(full_name, field->event_name);
1108 strcat(full_name, ".");
1109 strcat(full_name, field->name);
1110 field_name = full_name;
1111 } else
1112 field_name = field->name;
1113 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1114 field_name = "common_timestamp";
1115
1116 if (field_name == NULL)
1117 field_name = "";
1118
1119 return field_name;
1120 }
1121
select_value_fn(int field_size,int field_is_signed)1122 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
1123 {
1124 hist_field_fn_t fn = NULL;
1125
1126 switch (field_size) {
1127 case 8:
1128 if (field_is_signed)
1129 fn = hist_field_s64;
1130 else
1131 fn = hist_field_u64;
1132 break;
1133 case 4:
1134 if (field_is_signed)
1135 fn = hist_field_s32;
1136 else
1137 fn = hist_field_u32;
1138 break;
1139 case 2:
1140 if (field_is_signed)
1141 fn = hist_field_s16;
1142 else
1143 fn = hist_field_u16;
1144 break;
1145 case 1:
1146 if (field_is_signed)
1147 fn = hist_field_s8;
1148 else
1149 fn = hist_field_u8;
1150 break;
1151 }
1152
1153 return fn;
1154 }
1155
parse_map_size(char * str)1156 static int parse_map_size(char *str)
1157 {
1158 unsigned long size, map_bits;
1159 int ret;
1160
1161 ret = kstrtoul(str, 0, &size);
1162 if (ret)
1163 goto out;
1164
1165 map_bits = ilog2(roundup_pow_of_two(size));
1166 if (map_bits < TRACING_MAP_BITS_MIN ||
1167 map_bits > TRACING_MAP_BITS_MAX)
1168 ret = -EINVAL;
1169 else
1170 ret = map_bits;
1171 out:
1172 return ret;
1173 }
1174
destroy_hist_trigger_attrs(struct hist_trigger_attrs * attrs)1175 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
1176 {
1177 unsigned int i;
1178
1179 if (!attrs)
1180 return;
1181
1182 for (i = 0; i < attrs->n_assignments; i++)
1183 kfree(attrs->assignment_str[i]);
1184
1185 for (i = 0; i < attrs->n_actions; i++)
1186 kfree(attrs->action_str[i]);
1187
1188 kfree(attrs->name);
1189 kfree(attrs->sort_key_str);
1190 kfree(attrs->keys_str);
1191 kfree(attrs->vals_str);
1192 kfree(attrs->clock);
1193 kfree(attrs);
1194 }
1195
parse_action(char * str,struct hist_trigger_attrs * attrs)1196 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
1197 {
1198 int ret = -EINVAL;
1199
1200 if (attrs->n_actions >= HIST_ACTIONS_MAX)
1201 return ret;
1202
1203 if ((str_has_prefix(str, "onmatch(")) ||
1204 (str_has_prefix(str, "onmax(")) ||
1205 (str_has_prefix(str, "onchange("))) {
1206 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
1207 if (!attrs->action_str[attrs->n_actions]) {
1208 ret = -ENOMEM;
1209 return ret;
1210 }
1211 attrs->n_actions++;
1212 ret = 0;
1213 }
1214 return ret;
1215 }
1216
parse_assignment(struct trace_array * tr,char * str,struct hist_trigger_attrs * attrs)1217 static int parse_assignment(struct trace_array *tr,
1218 char *str, struct hist_trigger_attrs *attrs)
1219 {
1220 int len, ret = 0;
1221
1222 if ((len = str_has_prefix(str, "key=")) ||
1223 (len = str_has_prefix(str, "keys="))) {
1224 attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
1225 if (!attrs->keys_str) {
1226 ret = -ENOMEM;
1227 goto out;
1228 }
1229 } else if ((len = str_has_prefix(str, "val=")) ||
1230 (len = str_has_prefix(str, "vals=")) ||
1231 (len = str_has_prefix(str, "values="))) {
1232 attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
1233 if (!attrs->vals_str) {
1234 ret = -ENOMEM;
1235 goto out;
1236 }
1237 } else if ((len = str_has_prefix(str, "sort="))) {
1238 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
1239 if (!attrs->sort_key_str) {
1240 ret = -ENOMEM;
1241 goto out;
1242 }
1243 } else if (str_has_prefix(str, "name=")) {
1244 attrs->name = kstrdup(str, GFP_KERNEL);
1245 if (!attrs->name) {
1246 ret = -ENOMEM;
1247 goto out;
1248 }
1249 } else if ((len = str_has_prefix(str, "clock="))) {
1250 str += len;
1251
1252 str = strstrip(str);
1253 attrs->clock = kstrdup(str, GFP_KERNEL);
1254 if (!attrs->clock) {
1255 ret = -ENOMEM;
1256 goto out;
1257 }
1258 } else if ((len = str_has_prefix(str, "size="))) {
1259 int map_bits = parse_map_size(str + len);
1260
1261 if (map_bits < 0) {
1262 ret = map_bits;
1263 goto out;
1264 }
1265 attrs->map_bits = map_bits;
1266 } else {
1267 char *assignment;
1268
1269 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
1270 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
1271 ret = -EINVAL;
1272 goto out;
1273 }
1274
1275 assignment = kstrdup(str, GFP_KERNEL);
1276 if (!assignment) {
1277 ret = -ENOMEM;
1278 goto out;
1279 }
1280
1281 attrs->assignment_str[attrs->n_assignments++] = assignment;
1282 }
1283 out:
1284 return ret;
1285 }
1286
1287 static struct hist_trigger_attrs *
parse_hist_trigger_attrs(struct trace_array * tr,char * trigger_str)1288 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
1289 {
1290 struct hist_trigger_attrs *attrs;
1291 int ret = 0;
1292
1293 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
1294 if (!attrs)
1295 return ERR_PTR(-ENOMEM);
1296
1297 while (trigger_str) {
1298 char *str = strsep(&trigger_str, ":");
1299 char *rhs;
1300
1301 rhs = strchr(str, '=');
1302 if (rhs) {
1303 if (!strlen(++rhs)) {
1304 ret = -EINVAL;
1305 hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
1306 goto free;
1307 }
1308 ret = parse_assignment(tr, str, attrs);
1309 if (ret)
1310 goto free;
1311 } else if (strcmp(str, "pause") == 0)
1312 attrs->pause = true;
1313 else if ((strcmp(str, "cont") == 0) ||
1314 (strcmp(str, "continue") == 0))
1315 attrs->cont = true;
1316 else if (strcmp(str, "clear") == 0)
1317 attrs->clear = true;
1318 else {
1319 ret = parse_action(str, attrs);
1320 if (ret)
1321 goto free;
1322 }
1323 }
1324
1325 if (!attrs->keys_str) {
1326 ret = -EINVAL;
1327 goto free;
1328 }
1329
1330 if (!attrs->clock) {
1331 attrs->clock = kstrdup("global", GFP_KERNEL);
1332 if (!attrs->clock) {
1333 ret = -ENOMEM;
1334 goto free;
1335 }
1336 }
1337
1338 return attrs;
1339 free:
1340 destroy_hist_trigger_attrs(attrs);
1341
1342 return ERR_PTR(ret);
1343 }
1344
save_comm(char * comm,struct task_struct * task)1345 static inline void save_comm(char *comm, struct task_struct *task)
1346 {
1347 if (!task->pid) {
1348 strcpy(comm, "<idle>");
1349 return;
1350 }
1351
1352 if (WARN_ON_ONCE(task->pid < 0)) {
1353 strcpy(comm, "<XXX>");
1354 return;
1355 }
1356
1357 strncpy(comm, task->comm, TASK_COMM_LEN);
1358 }
1359
hist_elt_data_free(struct hist_elt_data * elt_data)1360 static void hist_elt_data_free(struct hist_elt_data *elt_data)
1361 {
1362 unsigned int i;
1363
1364 for (i = 0; i < SYNTH_FIELDS_MAX; i++)
1365 kfree(elt_data->field_var_str[i]);
1366
1367 kfree(elt_data->comm);
1368 kfree(elt_data);
1369 }
1370
hist_trigger_elt_data_free(struct tracing_map_elt * elt)1371 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
1372 {
1373 struct hist_elt_data *elt_data = elt->private_data;
1374
1375 hist_elt_data_free(elt_data);
1376 }
1377
hist_trigger_elt_data_alloc(struct tracing_map_elt * elt)1378 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
1379 {
1380 struct hist_trigger_data *hist_data = elt->map->private_data;
1381 unsigned int size = TASK_COMM_LEN;
1382 struct hist_elt_data *elt_data;
1383 struct hist_field *key_field;
1384 unsigned int i, n_str;
1385
1386 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
1387 if (!elt_data)
1388 return -ENOMEM;
1389
1390 for_each_hist_key_field(i, hist_data) {
1391 key_field = hist_data->fields[i];
1392
1393 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
1394 elt_data->comm = kzalloc(size, GFP_KERNEL);
1395 if (!elt_data->comm) {
1396 kfree(elt_data);
1397 return -ENOMEM;
1398 }
1399 break;
1400 }
1401 }
1402
1403 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str +
1404 hist_data->n_var_str;
1405 if (n_str > SYNTH_FIELDS_MAX) {
1406 hist_elt_data_free(elt_data);
1407 return -EINVAL;
1408 }
1409
1410 BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1));
1411
1412 size = STR_VAR_LEN_MAX;
1413
1414 for (i = 0; i < n_str; i++) {
1415 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
1416 if (!elt_data->field_var_str[i]) {
1417 hist_elt_data_free(elt_data);
1418 return -ENOMEM;
1419 }
1420 }
1421
1422 elt->private_data = elt_data;
1423
1424 return 0;
1425 }
1426
hist_trigger_elt_data_init(struct tracing_map_elt * elt)1427 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
1428 {
1429 struct hist_elt_data *elt_data = elt->private_data;
1430
1431 if (elt_data->comm)
1432 save_comm(elt_data->comm, current);
1433 }
1434
1435 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
1436 .elt_alloc = hist_trigger_elt_data_alloc,
1437 .elt_free = hist_trigger_elt_data_free,
1438 .elt_init = hist_trigger_elt_data_init,
1439 };
1440
get_hist_field_flags(struct hist_field * hist_field)1441 static const char *get_hist_field_flags(struct hist_field *hist_field)
1442 {
1443 const char *flags_str = NULL;
1444
1445 if (hist_field->flags & HIST_FIELD_FL_HEX)
1446 flags_str = "hex";
1447 else if (hist_field->flags & HIST_FIELD_FL_SYM)
1448 flags_str = "sym";
1449 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1450 flags_str = "sym-offset";
1451 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1452 flags_str = "execname";
1453 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1454 flags_str = "syscall";
1455 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
1456 flags_str = "log2";
1457 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
1458 flags_str = "usecs";
1459
1460 return flags_str;
1461 }
1462
expr_field_str(struct hist_field * field,char * expr)1463 static void expr_field_str(struct hist_field *field, char *expr)
1464 {
1465 if (field->flags & HIST_FIELD_FL_VAR_REF)
1466 strcat(expr, "$");
1467
1468 strcat(expr, hist_field_name(field, 0));
1469
1470 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
1471 const char *flags_str = get_hist_field_flags(field);
1472
1473 if (flags_str) {
1474 strcat(expr, ".");
1475 strcat(expr, flags_str);
1476 }
1477 }
1478 }
1479
expr_str(struct hist_field * field,unsigned int level)1480 static char *expr_str(struct hist_field *field, unsigned int level)
1481 {
1482 char *expr;
1483
1484 if (level > 1)
1485 return NULL;
1486
1487 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
1488 if (!expr)
1489 return NULL;
1490
1491 if (!field->operands[0]) {
1492 expr_field_str(field, expr);
1493 return expr;
1494 }
1495
1496 if (field->operator == FIELD_OP_UNARY_MINUS) {
1497 char *subexpr;
1498
1499 strcat(expr, "-(");
1500 subexpr = expr_str(field->operands[0], ++level);
1501 if (!subexpr) {
1502 kfree(expr);
1503 return NULL;
1504 }
1505 strcat(expr, subexpr);
1506 strcat(expr, ")");
1507
1508 kfree(subexpr);
1509
1510 return expr;
1511 }
1512
1513 expr_field_str(field->operands[0], expr);
1514
1515 switch (field->operator) {
1516 case FIELD_OP_MINUS:
1517 strcat(expr, "-");
1518 break;
1519 case FIELD_OP_PLUS:
1520 strcat(expr, "+");
1521 break;
1522 default:
1523 kfree(expr);
1524 return NULL;
1525 }
1526
1527 expr_field_str(field->operands[1], expr);
1528
1529 return expr;
1530 }
1531
contains_operator(char * str)1532 static int contains_operator(char *str)
1533 {
1534 enum field_op_id field_op = FIELD_OP_NONE;
1535 char *op;
1536
1537 op = strpbrk(str, "+-");
1538 if (!op)
1539 return FIELD_OP_NONE;
1540
1541 switch (*op) {
1542 case '-':
1543 /*
1544 * Unfortunately, the modifier ".sym-offset"
1545 * can confuse things.
1546 */
1547 if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11))
1548 return FIELD_OP_NONE;
1549
1550 if (*str == '-')
1551 field_op = FIELD_OP_UNARY_MINUS;
1552 else
1553 field_op = FIELD_OP_MINUS;
1554 break;
1555 case '+':
1556 field_op = FIELD_OP_PLUS;
1557 break;
1558 default:
1559 break;
1560 }
1561
1562 return field_op;
1563 }
1564
get_hist_field(struct hist_field * hist_field)1565 static void get_hist_field(struct hist_field *hist_field)
1566 {
1567 hist_field->ref++;
1568 }
1569
__destroy_hist_field(struct hist_field * hist_field)1570 static void __destroy_hist_field(struct hist_field *hist_field)
1571 {
1572 if (--hist_field->ref > 1)
1573 return;
1574
1575 kfree(hist_field->var.name);
1576 kfree(hist_field->name);
1577 kfree(hist_field->type);
1578
1579 kfree(hist_field->system);
1580 kfree(hist_field->event_name);
1581
1582 kfree(hist_field);
1583 }
1584
destroy_hist_field(struct hist_field * hist_field,unsigned int level)1585 static void destroy_hist_field(struct hist_field *hist_field,
1586 unsigned int level)
1587 {
1588 unsigned int i;
1589
1590 if (level > 3)
1591 return;
1592
1593 if (!hist_field)
1594 return;
1595
1596 if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
1597 return; /* var refs will be destroyed separately */
1598
1599 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
1600 destroy_hist_field(hist_field->operands[i], level + 1);
1601
1602 __destroy_hist_field(hist_field);
1603 }
1604
create_hist_field(struct hist_trigger_data * hist_data,struct ftrace_event_field * field,unsigned long flags,char * var_name)1605 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
1606 struct ftrace_event_field *field,
1607 unsigned long flags,
1608 char *var_name)
1609 {
1610 struct hist_field *hist_field;
1611
1612 if (field && is_function_field(field))
1613 return NULL;
1614
1615 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
1616 if (!hist_field)
1617 return NULL;
1618
1619 hist_field->ref = 1;
1620
1621 hist_field->hist_data = hist_data;
1622
1623 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
1624 goto out; /* caller will populate */
1625
1626 if (flags & HIST_FIELD_FL_VAR_REF) {
1627 hist_field->fn = hist_field_var_ref;
1628 goto out;
1629 }
1630
1631 if (flags & HIST_FIELD_FL_HITCOUNT) {
1632 hist_field->fn = hist_field_counter;
1633 hist_field->size = sizeof(u64);
1634 hist_field->type = kstrdup("u64", GFP_KERNEL);
1635 if (!hist_field->type)
1636 goto free;
1637 goto out;
1638 }
1639
1640 if (flags & HIST_FIELD_FL_STACKTRACE) {
1641 hist_field->fn = hist_field_none;
1642 goto out;
1643 }
1644
1645 if (flags & HIST_FIELD_FL_LOG2) {
1646 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
1647 hist_field->fn = hist_field_log2;
1648 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
1649 hist_field->size = hist_field->operands[0]->size;
1650 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
1651 if (!hist_field->type)
1652 goto free;
1653 goto out;
1654 }
1655
1656 if (flags & HIST_FIELD_FL_TIMESTAMP) {
1657 hist_field->fn = hist_field_timestamp;
1658 hist_field->size = sizeof(u64);
1659 hist_field->type = kstrdup("u64", GFP_KERNEL);
1660 if (!hist_field->type)
1661 goto free;
1662 goto out;
1663 }
1664
1665 if (flags & HIST_FIELD_FL_CPU) {
1666 hist_field->fn = hist_field_cpu;
1667 hist_field->size = sizeof(int);
1668 hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
1669 if (!hist_field->type)
1670 goto free;
1671 goto out;
1672 }
1673
1674 if (WARN_ON_ONCE(!field))
1675 goto out;
1676
1677 /* Pointers to strings are just pointers and dangerous to dereference */
1678 if (is_string_field(field) &&
1679 (field->filter_type != FILTER_PTR_STRING)) {
1680 flags |= HIST_FIELD_FL_STRING;
1681
1682 hist_field->size = MAX_FILTER_STR_VAL;
1683 hist_field->type = kstrdup(field->type, GFP_KERNEL);
1684 if (!hist_field->type)
1685 goto free;
1686
1687 if (field->filter_type == FILTER_STATIC_STRING) {
1688 hist_field->fn = hist_field_string;
1689 hist_field->size = field->size;
1690 } else if (field->filter_type == FILTER_DYN_STRING)
1691 hist_field->fn = hist_field_dynstring;
1692 else
1693 hist_field->fn = hist_field_pstring;
1694 } else {
1695 hist_field->size = field->size;
1696 hist_field->is_signed = field->is_signed;
1697 hist_field->type = kstrdup(field->type, GFP_KERNEL);
1698 if (!hist_field->type)
1699 goto free;
1700
1701 hist_field->fn = select_value_fn(field->size,
1702 field->is_signed);
1703 if (!hist_field->fn) {
1704 destroy_hist_field(hist_field, 0);
1705 return NULL;
1706 }
1707 }
1708 out:
1709 hist_field->field = field;
1710 hist_field->flags = flags;
1711
1712 if (var_name) {
1713 hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
1714 if (!hist_field->var.name)
1715 goto free;
1716 }
1717
1718 return hist_field;
1719 free:
1720 destroy_hist_field(hist_field, 0);
1721 return NULL;
1722 }
1723
destroy_hist_fields(struct hist_trigger_data * hist_data)1724 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
1725 {
1726 unsigned int i;
1727
1728 for (i = 0; i < HIST_FIELDS_MAX; i++) {
1729 if (hist_data->fields[i]) {
1730 destroy_hist_field(hist_data->fields[i], 0);
1731 hist_data->fields[i] = NULL;
1732 }
1733 }
1734
1735 for (i = 0; i < hist_data->n_var_refs; i++) {
1736 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
1737 __destroy_hist_field(hist_data->var_refs[i]);
1738 hist_data->var_refs[i] = NULL;
1739 }
1740 }
1741
init_var_ref(struct hist_field * ref_field,struct hist_field * var_field,char * system,char * event_name)1742 static int init_var_ref(struct hist_field *ref_field,
1743 struct hist_field *var_field,
1744 char *system, char *event_name)
1745 {
1746 int err = 0;
1747
1748 ref_field->var.idx = var_field->var.idx;
1749 ref_field->var.hist_data = var_field->hist_data;
1750 ref_field->size = var_field->size;
1751 ref_field->is_signed = var_field->is_signed;
1752 ref_field->flags |= var_field->flags &
1753 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
1754
1755 if (system) {
1756 ref_field->system = kstrdup(system, GFP_KERNEL);
1757 if (!ref_field->system)
1758 return -ENOMEM;
1759 }
1760
1761 if (event_name) {
1762 ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
1763 if (!ref_field->event_name) {
1764 err = -ENOMEM;
1765 goto free;
1766 }
1767 }
1768
1769 if (var_field->var.name) {
1770 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
1771 if (!ref_field->name) {
1772 err = -ENOMEM;
1773 goto free;
1774 }
1775 } else if (var_field->name) {
1776 ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
1777 if (!ref_field->name) {
1778 err = -ENOMEM;
1779 goto free;
1780 }
1781 }
1782
1783 ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
1784 if (!ref_field->type) {
1785 err = -ENOMEM;
1786 goto free;
1787 }
1788 out:
1789 return err;
1790 free:
1791 kfree(ref_field->system);
1792 ref_field->system = NULL;
1793 kfree(ref_field->event_name);
1794 ref_field->event_name = NULL;
1795 kfree(ref_field->name);
1796 ref_field->name = NULL;
1797
1798 goto out;
1799 }
1800
find_var_ref_idx(struct hist_trigger_data * hist_data,struct hist_field * var_field)1801 static int find_var_ref_idx(struct hist_trigger_data *hist_data,
1802 struct hist_field *var_field)
1803 {
1804 struct hist_field *ref_field;
1805 int i;
1806
1807 for (i = 0; i < hist_data->n_var_refs; i++) {
1808 ref_field = hist_data->var_refs[i];
1809 if (ref_field->var.idx == var_field->var.idx &&
1810 ref_field->var.hist_data == var_field->hist_data)
1811 return i;
1812 }
1813
1814 return -ENOENT;
1815 }
1816
1817 /**
1818 * create_var_ref - Create a variable reference and attach it to trigger
1819 * @hist_data: The trigger that will be referencing the variable
1820 * @var_field: The VAR field to create a reference to
1821 * @system: The optional system string
1822 * @event_name: The optional event_name string
1823 *
1824 * Given a variable hist_field, create a VAR_REF hist_field that
1825 * represents a reference to it.
1826 *
1827 * This function also adds the reference to the trigger that
1828 * now references the variable.
1829 *
1830 * Return: The VAR_REF field if successful, NULL if not
1831 */
create_var_ref(struct hist_trigger_data * hist_data,struct hist_field * var_field,char * system,char * event_name)1832 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
1833 struct hist_field *var_field,
1834 char *system, char *event_name)
1835 {
1836 unsigned long flags = HIST_FIELD_FL_VAR_REF;
1837 struct hist_field *ref_field;
1838 int i;
1839
1840 /* Check if the variable already exists */
1841 for (i = 0; i < hist_data->n_var_refs; i++) {
1842 ref_field = hist_data->var_refs[i];
1843 if (ref_field->var.idx == var_field->var.idx &&
1844 ref_field->var.hist_data == var_field->hist_data) {
1845 get_hist_field(ref_field);
1846 return ref_field;
1847 }
1848 }
1849 /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
1850 if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
1851 return NULL;
1852 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
1853 if (ref_field) {
1854 if (init_var_ref(ref_field, var_field, system, event_name)) {
1855 destroy_hist_field(ref_field, 0);
1856 return NULL;
1857 }
1858
1859 hist_data->var_refs[hist_data->n_var_refs] = ref_field;
1860 ref_field->var_ref_idx = hist_data->n_var_refs++;
1861 }
1862
1863 return ref_field;
1864 }
1865
is_var_ref(char * var_name)1866 static bool is_var_ref(char *var_name)
1867 {
1868 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
1869 return false;
1870
1871 return true;
1872 }
1873
field_name_from_var(struct hist_trigger_data * hist_data,char * var_name)1874 static char *field_name_from_var(struct hist_trigger_data *hist_data,
1875 char *var_name)
1876 {
1877 char *name, *field;
1878 unsigned int i;
1879
1880 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
1881 name = hist_data->attrs->var_defs.name[i];
1882
1883 if (strcmp(var_name, name) == 0) {
1884 field = hist_data->attrs->var_defs.expr[i];
1885 if (contains_operator(field) || is_var_ref(field))
1886 continue;
1887 return field;
1888 }
1889 }
1890
1891 return NULL;
1892 }
1893
local_field_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)1894 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
1895 char *system, char *event_name,
1896 char *var_name)
1897 {
1898 struct trace_event_call *call;
1899
1900 if (system && event_name) {
1901 call = hist_data->event_file->event_call;
1902
1903 if (strcmp(system, call->class->system) != 0)
1904 return NULL;
1905
1906 if (strcmp(event_name, trace_event_name(call)) != 0)
1907 return NULL;
1908 }
1909
1910 if (!!system != !!event_name)
1911 return NULL;
1912
1913 if (!is_var_ref(var_name))
1914 return NULL;
1915
1916 var_name++;
1917
1918 return field_name_from_var(hist_data, var_name);
1919 }
1920
parse_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)1921 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
1922 char *system, char *event_name,
1923 char *var_name)
1924 {
1925 struct hist_field *var_field = NULL, *ref_field = NULL;
1926 struct trace_array *tr = hist_data->event_file->tr;
1927
1928 if (!is_var_ref(var_name))
1929 return NULL;
1930
1931 var_name++;
1932
1933 var_field = find_event_var(hist_data, system, event_name, var_name);
1934 if (var_field)
1935 ref_field = create_var_ref(hist_data, var_field,
1936 system, event_name);
1937
1938 if (!ref_field)
1939 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
1940
1941 return ref_field;
1942 }
1943
1944 static struct ftrace_event_field *
parse_field(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_str,unsigned long * flags)1945 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
1946 char *field_str, unsigned long *flags)
1947 {
1948 struct ftrace_event_field *field = NULL;
1949 char *field_name, *modifier, *str;
1950 struct trace_array *tr = file->tr;
1951
1952 modifier = str = kstrdup(field_str, GFP_KERNEL);
1953 if (!modifier)
1954 return ERR_PTR(-ENOMEM);
1955
1956 field_name = strsep(&modifier, ".");
1957 if (modifier) {
1958 if (strcmp(modifier, "hex") == 0)
1959 *flags |= HIST_FIELD_FL_HEX;
1960 else if (strcmp(modifier, "sym") == 0)
1961 *flags |= HIST_FIELD_FL_SYM;
1962 else if (strcmp(modifier, "sym-offset") == 0)
1963 *flags |= HIST_FIELD_FL_SYM_OFFSET;
1964 else if ((strcmp(modifier, "execname") == 0) &&
1965 (strcmp(field_name, "common_pid") == 0))
1966 *flags |= HIST_FIELD_FL_EXECNAME;
1967 else if (strcmp(modifier, "syscall") == 0)
1968 *flags |= HIST_FIELD_FL_SYSCALL;
1969 else if (strcmp(modifier, "log2") == 0)
1970 *flags |= HIST_FIELD_FL_LOG2;
1971 else if (strcmp(modifier, "usecs") == 0)
1972 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
1973 else {
1974 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
1975 field = ERR_PTR(-EINVAL);
1976 goto out;
1977 }
1978 }
1979
1980 if (strcmp(field_name, "common_timestamp") == 0) {
1981 *flags |= HIST_FIELD_FL_TIMESTAMP;
1982 hist_data->enable_timestamps = true;
1983 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
1984 hist_data->attrs->ts_in_usecs = true;
1985 } else if (strcmp(field_name, "common_cpu") == 0)
1986 *flags |= HIST_FIELD_FL_CPU;
1987 else {
1988 field = trace_find_event_field(file->event_call, field_name);
1989 if (!field || !field->size) {
1990 /*
1991 * For backward compatibility, if field_name
1992 * was "cpu", then we treat this the same as
1993 * common_cpu. This also works for "CPU".
1994 */
1995 if (field && field->filter_type == FILTER_CPU) {
1996 *flags |= HIST_FIELD_FL_CPU;
1997 } else {
1998 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
1999 errpos(field_name));
2000 field = ERR_PTR(-EINVAL);
2001 goto out;
2002 }
2003 }
2004 }
2005 out:
2006 kfree(str);
2007
2008 return field;
2009 }
2010
create_alias(struct hist_trigger_data * hist_data,struct hist_field * var_ref,char * var_name)2011 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2012 struct hist_field *var_ref,
2013 char *var_name)
2014 {
2015 struct hist_field *alias = NULL;
2016 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2017
2018 alias = create_hist_field(hist_data, NULL, flags, var_name);
2019 if (!alias)
2020 return NULL;
2021
2022 alias->fn = var_ref->fn;
2023 alias->operands[0] = var_ref;
2024
2025 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2026 destroy_hist_field(alias, 0);
2027 return NULL;
2028 }
2029
2030 alias->var_ref_idx = var_ref->var_ref_idx;
2031
2032 return alias;
2033 }
2034
parse_atom(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long * flags,char * var_name)2035 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2036 struct trace_event_file *file, char *str,
2037 unsigned long *flags, char *var_name)
2038 {
2039 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2040 struct ftrace_event_field *field = NULL;
2041 struct hist_field *hist_field = NULL;
2042 int ret = 0;
2043
2044 s = strchr(str, '.');
2045 if (s) {
2046 s = strchr(++s, '.');
2047 if (s) {
2048 ref_system = strsep(&str, ".");
2049 if (!str) {
2050 ret = -EINVAL;
2051 goto out;
2052 }
2053 ref_event = strsep(&str, ".");
2054 if (!str) {
2055 ret = -EINVAL;
2056 goto out;
2057 }
2058 ref_var = str;
2059 }
2060 }
2061
2062 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2063 if (!s) {
2064 hist_field = parse_var_ref(hist_data, ref_system,
2065 ref_event, ref_var);
2066 if (hist_field) {
2067 if (var_name) {
2068 hist_field = create_alias(hist_data, hist_field, var_name);
2069 if (!hist_field) {
2070 ret = -ENOMEM;
2071 goto out;
2072 }
2073 }
2074 return hist_field;
2075 }
2076 } else
2077 str = s;
2078
2079 field = parse_field(hist_data, file, str, flags);
2080 if (IS_ERR(field)) {
2081 ret = PTR_ERR(field);
2082 goto out;
2083 }
2084
2085 hist_field = create_hist_field(hist_data, field, *flags, var_name);
2086 if (!hist_field) {
2087 ret = -ENOMEM;
2088 goto out;
2089 }
2090
2091 return hist_field;
2092 out:
2093 return ERR_PTR(ret);
2094 }
2095
2096 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2097 struct trace_event_file *file,
2098 char *str, unsigned long flags,
2099 char *var_name, unsigned int level);
2100
parse_unary(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int level)2101 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2102 struct trace_event_file *file,
2103 char *str, unsigned long flags,
2104 char *var_name, unsigned int level)
2105 {
2106 struct hist_field *operand1, *expr = NULL;
2107 unsigned long operand_flags;
2108 int ret = 0;
2109 char *s;
2110
2111 /* we support only -(xxx) i.e. explicit parens required */
2112
2113 if (level > 3) {
2114 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2115 ret = -EINVAL;
2116 goto free;
2117 }
2118
2119 str++; /* skip leading '-' */
2120
2121 s = strchr(str, '(');
2122 if (s)
2123 str++;
2124 else {
2125 ret = -EINVAL;
2126 goto free;
2127 }
2128
2129 s = strrchr(str, ')');
2130 if (s)
2131 *s = '\0';
2132 else {
2133 ret = -EINVAL; /* no closing ')' */
2134 goto free;
2135 }
2136
2137 flags |= HIST_FIELD_FL_EXPR;
2138 expr = create_hist_field(hist_data, NULL, flags, var_name);
2139 if (!expr) {
2140 ret = -ENOMEM;
2141 goto free;
2142 }
2143
2144 operand_flags = 0;
2145 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2146 if (IS_ERR(operand1)) {
2147 ret = PTR_ERR(operand1);
2148 goto free;
2149 }
2150 if (operand1->flags & HIST_FIELD_FL_STRING) {
2151 /* String type can not be the operand of unary operator. */
2152 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
2153 destroy_hist_field(operand1, 0);
2154 ret = -EINVAL;
2155 goto free;
2156 }
2157
2158 expr->flags |= operand1->flags &
2159 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2160 expr->fn = hist_field_unary_minus;
2161 expr->operands[0] = operand1;
2162 expr->size = operand1->size;
2163 expr->is_signed = operand1->is_signed;
2164 expr->operator = FIELD_OP_UNARY_MINUS;
2165 expr->name = expr_str(expr, 0);
2166 expr->type = kstrdup(operand1->type, GFP_KERNEL);
2167 if (!expr->type) {
2168 ret = -ENOMEM;
2169 goto free;
2170 }
2171
2172 return expr;
2173 free:
2174 destroy_hist_field(expr, 0);
2175 return ERR_PTR(ret);
2176 }
2177
check_expr_operands(struct trace_array * tr,struct hist_field * operand1,struct hist_field * operand2)2178 static int check_expr_operands(struct trace_array *tr,
2179 struct hist_field *operand1,
2180 struct hist_field *operand2)
2181 {
2182 unsigned long operand1_flags = operand1->flags;
2183 unsigned long operand2_flags = operand2->flags;
2184
2185 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2186 (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2187 struct hist_field *var;
2188
2189 var = find_var_field(operand1->var.hist_data, operand1->name);
2190 if (!var)
2191 return -EINVAL;
2192 operand1_flags = var->flags;
2193 }
2194
2195 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2196 (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2197 struct hist_field *var;
2198
2199 var = find_var_field(operand2->var.hist_data, operand2->name);
2200 if (!var)
2201 return -EINVAL;
2202 operand2_flags = var->flags;
2203 }
2204
2205 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2206 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2207 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
2208 return -EINVAL;
2209 }
2210
2211 return 0;
2212 }
2213
parse_expr(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int level)2214 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2215 struct trace_event_file *file,
2216 char *str, unsigned long flags,
2217 char *var_name, unsigned int level)
2218 {
2219 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2220 unsigned long operand_flags;
2221 int field_op, ret = -EINVAL;
2222 char *sep, *operand1_str;
2223
2224 if (level > 3) {
2225 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2226 return ERR_PTR(-EINVAL);
2227 }
2228
2229 field_op = contains_operator(str);
2230
2231 if (field_op == FIELD_OP_NONE)
2232 return parse_atom(hist_data, file, str, &flags, var_name);
2233
2234 if (field_op == FIELD_OP_UNARY_MINUS)
2235 return parse_unary(hist_data, file, str, flags, var_name, ++level);
2236
2237 switch (field_op) {
2238 case FIELD_OP_MINUS:
2239 sep = "-";
2240 break;
2241 case FIELD_OP_PLUS:
2242 sep = "+";
2243 break;
2244 default:
2245 goto free;
2246 }
2247
2248 operand1_str = strsep(&str, sep);
2249 if (!operand1_str || !str)
2250 goto free;
2251
2252 operand_flags = 0;
2253 operand1 = parse_atom(hist_data, file, operand1_str,
2254 &operand_flags, NULL);
2255 if (IS_ERR(operand1)) {
2256 ret = PTR_ERR(operand1);
2257 operand1 = NULL;
2258 goto free;
2259 }
2260 if (operand1->flags & HIST_FIELD_FL_STRING) {
2261 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
2262 ret = -EINVAL;
2263 goto free;
2264 }
2265
2266 /* rest of string could be another expression e.g. b+c in a+b+c */
2267 operand_flags = 0;
2268 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2269 if (IS_ERR(operand2)) {
2270 ret = PTR_ERR(operand2);
2271 operand2 = NULL;
2272 goto free;
2273 }
2274 if (operand2->flags & HIST_FIELD_FL_STRING) {
2275 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
2276 ret = -EINVAL;
2277 goto free;
2278 }
2279
2280 ret = check_expr_operands(file->tr, operand1, operand2);
2281 if (ret)
2282 goto free;
2283
2284 flags |= HIST_FIELD_FL_EXPR;
2285
2286 flags |= operand1->flags &
2287 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2288
2289 expr = create_hist_field(hist_data, NULL, flags, var_name);
2290 if (!expr) {
2291 ret = -ENOMEM;
2292 goto free;
2293 }
2294
2295 operand1->read_once = true;
2296 operand2->read_once = true;
2297
2298 expr->operands[0] = operand1;
2299 expr->operands[1] = operand2;
2300
2301 /* The operand sizes should be the same, so just pick one */
2302 expr->size = operand1->size;
2303 expr->is_signed = operand1->is_signed;
2304
2305 expr->operator = field_op;
2306 expr->name = expr_str(expr, 0);
2307 expr->type = kstrdup(operand1->type, GFP_KERNEL);
2308 if (!expr->type) {
2309 ret = -ENOMEM;
2310 goto free;
2311 }
2312
2313 switch (field_op) {
2314 case FIELD_OP_MINUS:
2315 expr->fn = hist_field_minus;
2316 break;
2317 case FIELD_OP_PLUS:
2318 expr->fn = hist_field_plus;
2319 break;
2320 default:
2321 ret = -EINVAL;
2322 goto free;
2323 }
2324
2325 return expr;
2326 free:
2327 destroy_hist_field(operand1, 0);
2328 destroy_hist_field(operand2, 0);
2329 destroy_hist_field(expr, 0);
2330
2331 return ERR_PTR(ret);
2332 }
2333
find_trigger_filter(struct hist_trigger_data * hist_data,struct trace_event_file * file)2334 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
2335 struct trace_event_file *file)
2336 {
2337 struct event_trigger_data *test;
2338
2339 lockdep_assert_held(&event_mutex);
2340
2341 list_for_each_entry(test, &file->triggers, list) {
2342 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2343 if (test->private_data == hist_data)
2344 return test->filter_str;
2345 }
2346 }
2347
2348 return NULL;
2349 }
2350
2351 static struct event_command trigger_hist_cmd;
2352 static int event_hist_trigger_func(struct event_command *cmd_ops,
2353 struct trace_event_file *file,
2354 char *glob, char *cmd, char *param);
2355
compatible_keys(struct hist_trigger_data * target_hist_data,struct hist_trigger_data * hist_data,unsigned int n_keys)2356 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
2357 struct hist_trigger_data *hist_data,
2358 unsigned int n_keys)
2359 {
2360 struct hist_field *target_hist_field, *hist_field;
2361 unsigned int n, i, j;
2362
2363 if (hist_data->n_fields - hist_data->n_vals != n_keys)
2364 return false;
2365
2366 i = hist_data->n_vals;
2367 j = target_hist_data->n_vals;
2368
2369 for (n = 0; n < n_keys; n++) {
2370 hist_field = hist_data->fields[i + n];
2371 target_hist_field = target_hist_data->fields[j + n];
2372
2373 if (strcmp(hist_field->type, target_hist_field->type) != 0)
2374 return false;
2375 if (hist_field->size != target_hist_field->size)
2376 return false;
2377 if (hist_field->is_signed != target_hist_field->is_signed)
2378 return false;
2379 }
2380
2381 return true;
2382 }
2383
2384 static struct hist_trigger_data *
find_compatible_hist(struct hist_trigger_data * target_hist_data,struct trace_event_file * file)2385 find_compatible_hist(struct hist_trigger_data *target_hist_data,
2386 struct trace_event_file *file)
2387 {
2388 struct hist_trigger_data *hist_data;
2389 struct event_trigger_data *test;
2390 unsigned int n_keys;
2391
2392 lockdep_assert_held(&event_mutex);
2393
2394 n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
2395
2396 list_for_each_entry(test, &file->triggers, list) {
2397 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2398 hist_data = test->private_data;
2399
2400 if (compatible_keys(target_hist_data, hist_data, n_keys))
2401 return hist_data;
2402 }
2403 }
2404
2405 return NULL;
2406 }
2407
event_file(struct trace_array * tr,char * system,char * event_name)2408 static struct trace_event_file *event_file(struct trace_array *tr,
2409 char *system, char *event_name)
2410 {
2411 struct trace_event_file *file;
2412
2413 file = __find_event_file(tr, system, event_name);
2414 if (!file)
2415 return ERR_PTR(-EINVAL);
2416
2417 return file;
2418 }
2419
2420 static struct hist_field *
find_synthetic_field_var(struct hist_trigger_data * target_hist_data,char * system,char * event_name,char * field_name)2421 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
2422 char *system, char *event_name, char *field_name)
2423 {
2424 struct hist_field *event_var;
2425 char *synthetic_name;
2426
2427 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2428 if (!synthetic_name)
2429 return ERR_PTR(-ENOMEM);
2430
2431 strcpy(synthetic_name, "synthetic_");
2432 strcat(synthetic_name, field_name);
2433
2434 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
2435
2436 kfree(synthetic_name);
2437
2438 return event_var;
2439 }
2440
2441 /**
2442 * create_field_var_hist - Automatically create a histogram and var for a field
2443 * @target_hist_data: The target hist trigger
2444 * @subsys_name: Optional subsystem name
2445 * @event_name: Optional event name
2446 * @field_name: The name of the field (and the resulting variable)
2447 *
2448 * Hist trigger actions fetch data from variables, not directly from
2449 * events. However, for convenience, users are allowed to directly
2450 * specify an event field in an action, which will be automatically
2451 * converted into a variable on their behalf.
2452
2453 * If a user specifies a field on an event that isn't the event the
2454 * histogram currently being defined (the target event histogram), the
2455 * only way that can be accomplished is if a new hist trigger is
2456 * created and the field variable defined on that.
2457 *
2458 * This function creates a new histogram compatible with the target
2459 * event (meaning a histogram with the same key as the target
2460 * histogram), and creates a variable for the specified field, but
2461 * with 'synthetic_' prepended to the variable name in order to avoid
2462 * collision with normal field variables.
2463 *
2464 * Return: The variable created for the field.
2465 */
2466 static struct hist_field *
create_field_var_hist(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * field_name)2467 create_field_var_hist(struct hist_trigger_data *target_hist_data,
2468 char *subsys_name, char *event_name, char *field_name)
2469 {
2470 struct trace_array *tr = target_hist_data->event_file->tr;
2471 struct hist_field *event_var = ERR_PTR(-EINVAL);
2472 struct hist_trigger_data *hist_data;
2473 unsigned int i, n, first = true;
2474 struct field_var_hist *var_hist;
2475 struct trace_event_file *file;
2476 struct hist_field *key_field;
2477 char *saved_filter;
2478 char *cmd;
2479 int ret;
2480
2481 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
2482 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
2483 return ERR_PTR(-EINVAL);
2484 }
2485
2486 file = event_file(tr, subsys_name, event_name);
2487
2488 if (IS_ERR(file)) {
2489 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
2490 ret = PTR_ERR(file);
2491 return ERR_PTR(ret);
2492 }
2493
2494 /*
2495 * Look for a histogram compatible with target. We'll use the
2496 * found histogram specification to create a new matching
2497 * histogram with our variable on it. target_hist_data is not
2498 * yet a registered histogram so we can't use that.
2499 */
2500 hist_data = find_compatible_hist(target_hist_data, file);
2501 if (!hist_data) {
2502 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
2503 return ERR_PTR(-EINVAL);
2504 }
2505
2506 /* See if a synthetic field variable has already been created */
2507 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
2508 event_name, field_name);
2509 if (!IS_ERR_OR_NULL(event_var))
2510 return event_var;
2511
2512 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
2513 if (!var_hist)
2514 return ERR_PTR(-ENOMEM);
2515
2516 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2517 if (!cmd) {
2518 kfree(var_hist);
2519 return ERR_PTR(-ENOMEM);
2520 }
2521
2522 /* Use the same keys as the compatible histogram */
2523 strcat(cmd, "keys=");
2524
2525 for_each_hist_key_field(i, hist_data) {
2526 key_field = hist_data->fields[i];
2527 if (!first)
2528 strcat(cmd, ",");
2529 strcat(cmd, key_field->field->name);
2530 first = false;
2531 }
2532
2533 /* Create the synthetic field variable specification */
2534 strcat(cmd, ":synthetic_");
2535 strcat(cmd, field_name);
2536 strcat(cmd, "=");
2537 strcat(cmd, field_name);
2538
2539 /* Use the same filter as the compatible histogram */
2540 saved_filter = find_trigger_filter(hist_data, file);
2541 if (saved_filter) {
2542 strcat(cmd, " if ");
2543 strcat(cmd, saved_filter);
2544 }
2545
2546 var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
2547 if (!var_hist->cmd) {
2548 kfree(cmd);
2549 kfree(var_hist);
2550 return ERR_PTR(-ENOMEM);
2551 }
2552
2553 /* Save the compatible histogram information */
2554 var_hist->hist_data = hist_data;
2555
2556 /* Create the new histogram with our variable */
2557 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
2558 "", "hist", cmd);
2559 if (ret) {
2560 kfree(cmd);
2561 kfree(var_hist->cmd);
2562 kfree(var_hist);
2563 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
2564 return ERR_PTR(ret);
2565 }
2566
2567 kfree(cmd);
2568
2569 /* If we can't find the variable, something went wrong */
2570 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
2571 event_name, field_name);
2572 if (IS_ERR_OR_NULL(event_var)) {
2573 kfree(var_hist->cmd);
2574 kfree(var_hist);
2575 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
2576 return ERR_PTR(-EINVAL);
2577 }
2578
2579 n = target_hist_data->n_field_var_hists;
2580 target_hist_data->field_var_hists[n] = var_hist;
2581 target_hist_data->n_field_var_hists++;
2582
2583 return event_var;
2584 }
2585
2586 static struct hist_field *
find_target_event_var(struct hist_trigger_data * hist_data,char * subsys_name,char * event_name,char * var_name)2587 find_target_event_var(struct hist_trigger_data *hist_data,
2588 char *subsys_name, char *event_name, char *var_name)
2589 {
2590 struct trace_event_file *file = hist_data->event_file;
2591 struct hist_field *hist_field = NULL;
2592
2593 if (subsys_name) {
2594 struct trace_event_call *call;
2595
2596 if (!event_name)
2597 return NULL;
2598
2599 call = file->event_call;
2600
2601 if (strcmp(subsys_name, call->class->system) != 0)
2602 return NULL;
2603
2604 if (strcmp(event_name, trace_event_name(call)) != 0)
2605 return NULL;
2606 }
2607
2608 hist_field = find_var_field(hist_data, var_name);
2609
2610 return hist_field;
2611 }
2612
__update_field_vars(struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * rec,struct field_var ** field_vars,unsigned int n_field_vars,unsigned int field_var_str_start)2613 static inline void __update_field_vars(struct tracing_map_elt *elt,
2614 struct ring_buffer_event *rbe,
2615 void *rec,
2616 struct field_var **field_vars,
2617 unsigned int n_field_vars,
2618 unsigned int field_var_str_start)
2619 {
2620 struct hist_elt_data *elt_data = elt->private_data;
2621 unsigned int i, j, var_idx;
2622 u64 var_val;
2623
2624 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
2625 struct field_var *field_var = field_vars[i];
2626 struct hist_field *var = field_var->var;
2627 struct hist_field *val = field_var->val;
2628
2629 var_val = val->fn(val, elt, rbe, rec);
2630 var_idx = var->var.idx;
2631
2632 if (val->flags & HIST_FIELD_FL_STRING) {
2633 char *str = elt_data->field_var_str[j++];
2634 char *val_str = (char *)(uintptr_t)var_val;
2635 unsigned int size;
2636
2637 size = min(val->size, STR_VAR_LEN_MAX);
2638 strscpy(str, val_str, size);
2639 var_val = (u64)(uintptr_t)str;
2640 }
2641 tracing_map_set_var(elt, var_idx, var_val);
2642 }
2643 }
2644
update_field_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * rec)2645 static void update_field_vars(struct hist_trigger_data *hist_data,
2646 struct tracing_map_elt *elt,
2647 struct ring_buffer_event *rbe,
2648 void *rec)
2649 {
2650 __update_field_vars(elt, rbe, rec, hist_data->field_vars,
2651 hist_data->n_field_vars, 0);
2652 }
2653
save_track_data_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2654 static void save_track_data_vars(struct hist_trigger_data *hist_data,
2655 struct tracing_map_elt *elt, void *rec,
2656 struct ring_buffer_event *rbe, void *key,
2657 struct action_data *data, u64 *var_ref_vals)
2658 {
2659 __update_field_vars(elt, rbe, rec, hist_data->save_vars,
2660 hist_data->n_save_vars, hist_data->n_field_var_str);
2661 }
2662
create_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * name,int size,const char * type)2663 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
2664 struct trace_event_file *file,
2665 char *name, int size, const char *type)
2666 {
2667 struct hist_field *var;
2668 int idx;
2669
2670 if (find_var(hist_data, file, name) && !hist_data->remove) {
2671 var = ERR_PTR(-EINVAL);
2672 goto out;
2673 }
2674
2675 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2676 if (!var) {
2677 var = ERR_PTR(-ENOMEM);
2678 goto out;
2679 }
2680
2681 idx = tracing_map_add_var(hist_data->map);
2682 if (idx < 0) {
2683 kfree(var);
2684 var = ERR_PTR(-EINVAL);
2685 goto out;
2686 }
2687
2688 var->ref = 1;
2689 var->flags = HIST_FIELD_FL_VAR;
2690 var->var.idx = idx;
2691 var->var.hist_data = var->hist_data = hist_data;
2692 var->size = size;
2693 var->var.name = kstrdup(name, GFP_KERNEL);
2694 var->type = kstrdup(type, GFP_KERNEL);
2695 if (!var->var.name || !var->type) {
2696 kfree(var->var.name);
2697 kfree(var->type);
2698 kfree(var);
2699 var = ERR_PTR(-ENOMEM);
2700 }
2701 out:
2702 return var;
2703 }
2704
create_field_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_name)2705 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
2706 struct trace_event_file *file,
2707 char *field_name)
2708 {
2709 struct hist_field *val = NULL, *var = NULL;
2710 unsigned long flags = HIST_FIELD_FL_VAR;
2711 struct trace_array *tr = file->tr;
2712 struct field_var *field_var;
2713 int ret = 0;
2714
2715 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
2716 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
2717 ret = -EINVAL;
2718 goto err;
2719 }
2720
2721 val = parse_atom(hist_data, file, field_name, &flags, NULL);
2722 if (IS_ERR(val)) {
2723 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
2724 ret = PTR_ERR(val);
2725 goto err;
2726 }
2727
2728 var = create_var(hist_data, file, field_name, val->size, val->type);
2729 if (IS_ERR(var)) {
2730 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
2731 kfree(val);
2732 ret = PTR_ERR(var);
2733 goto err;
2734 }
2735
2736 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
2737 if (!field_var) {
2738 kfree(val);
2739 kfree(var);
2740 ret = -ENOMEM;
2741 goto err;
2742 }
2743
2744 field_var->var = var;
2745 field_var->val = val;
2746 out:
2747 return field_var;
2748 err:
2749 field_var = ERR_PTR(ret);
2750 goto out;
2751 }
2752
2753 /**
2754 * create_target_field_var - Automatically create a variable for a field
2755 * @target_hist_data: The target hist trigger
2756 * @subsys_name: Optional subsystem name
2757 * @event_name: Optional event name
2758 * @var_name: The name of the field (and the resulting variable)
2759 *
2760 * Hist trigger actions fetch data from variables, not directly from
2761 * events. However, for convenience, users are allowed to directly
2762 * specify an event field in an action, which will be automatically
2763 * converted into a variable on their behalf.
2764
2765 * This function creates a field variable with the name var_name on
2766 * the hist trigger currently being defined on the target event. If
2767 * subsys_name and event_name are specified, this function simply
2768 * verifies that they do in fact match the target event subsystem and
2769 * event name.
2770 *
2771 * Return: The variable created for the field.
2772 */
2773 static struct field_var *
create_target_field_var(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * var_name)2774 create_target_field_var(struct hist_trigger_data *target_hist_data,
2775 char *subsys_name, char *event_name, char *var_name)
2776 {
2777 struct trace_event_file *file = target_hist_data->event_file;
2778
2779 if (subsys_name) {
2780 struct trace_event_call *call;
2781
2782 if (!event_name)
2783 return NULL;
2784
2785 call = file->event_call;
2786
2787 if (strcmp(subsys_name, call->class->system) != 0)
2788 return NULL;
2789
2790 if (strcmp(event_name, trace_event_name(call)) != 0)
2791 return NULL;
2792 }
2793
2794 return create_field_var(target_hist_data, file, var_name);
2795 }
2796
check_track_val_max(u64 track_val,u64 var_val)2797 static bool check_track_val_max(u64 track_val, u64 var_val)
2798 {
2799 if (var_val <= track_val)
2800 return false;
2801
2802 return true;
2803 }
2804
check_track_val_changed(u64 track_val,u64 var_val)2805 static bool check_track_val_changed(u64 track_val, u64 var_val)
2806 {
2807 if (var_val == track_val)
2808 return false;
2809
2810 return true;
2811 }
2812
get_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)2813 static u64 get_track_val(struct hist_trigger_data *hist_data,
2814 struct tracing_map_elt *elt,
2815 struct action_data *data)
2816 {
2817 unsigned int track_var_idx = data->track_data.track_var->var.idx;
2818 u64 track_val;
2819
2820 track_val = tracing_map_read_var(elt, track_var_idx);
2821
2822 return track_val;
2823 }
2824
save_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data,u64 var_val)2825 static void save_track_val(struct hist_trigger_data *hist_data,
2826 struct tracing_map_elt *elt,
2827 struct action_data *data, u64 var_val)
2828 {
2829 unsigned int track_var_idx = data->track_data.track_var->var.idx;
2830
2831 tracing_map_set_var(elt, track_var_idx, var_val);
2832 }
2833
save_track_data(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2834 static void save_track_data(struct hist_trigger_data *hist_data,
2835 struct tracing_map_elt *elt, void *rec,
2836 struct ring_buffer_event *rbe, void *key,
2837 struct action_data *data, u64 *var_ref_vals)
2838 {
2839 if (data->track_data.save_data)
2840 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
2841 }
2842
check_track_val(struct tracing_map_elt * elt,struct action_data * data,u64 var_val)2843 static bool check_track_val(struct tracing_map_elt *elt,
2844 struct action_data *data,
2845 u64 var_val)
2846 {
2847 struct hist_trigger_data *hist_data;
2848 u64 track_val;
2849
2850 hist_data = data->track_data.track_var->hist_data;
2851 track_val = get_track_val(hist_data, elt, data);
2852
2853 return data->track_data.check_val(track_val, var_val);
2854 }
2855
2856 #ifdef CONFIG_TRACER_SNAPSHOT
cond_snapshot_update(struct trace_array * tr,void * cond_data)2857 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
2858 {
2859 /* called with tr->max_lock held */
2860 struct track_data *track_data = tr->cond_snapshot->cond_data;
2861 struct hist_elt_data *elt_data, *track_elt_data;
2862 struct snapshot_context *context = cond_data;
2863 struct action_data *action;
2864 u64 track_val;
2865
2866 if (!track_data)
2867 return false;
2868
2869 action = track_data->action_data;
2870
2871 track_val = get_track_val(track_data->hist_data, context->elt,
2872 track_data->action_data);
2873
2874 if (!action->track_data.check_val(track_data->track_val, track_val))
2875 return false;
2876
2877 track_data->track_val = track_val;
2878 memcpy(track_data->key, context->key, track_data->key_len);
2879
2880 elt_data = context->elt->private_data;
2881 track_elt_data = track_data->elt.private_data;
2882 if (elt_data->comm)
2883 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
2884
2885 track_data->updated = true;
2886
2887 return true;
2888 }
2889
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2890 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
2891 struct tracing_map_elt *elt, void *rec,
2892 struct ring_buffer_event *rbe, void *key,
2893 struct action_data *data,
2894 u64 *var_ref_vals)
2895 {
2896 struct trace_event_file *file = hist_data->event_file;
2897 struct snapshot_context context;
2898
2899 context.elt = elt;
2900 context.key = key;
2901
2902 tracing_snapshot_cond(file->tr, &context);
2903 }
2904
2905 static void hist_trigger_print_key(struct seq_file *m,
2906 struct hist_trigger_data *hist_data,
2907 void *key,
2908 struct tracing_map_elt *elt);
2909
snapshot_action(struct hist_trigger_data * hist_data)2910 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
2911 {
2912 unsigned int i;
2913
2914 if (!hist_data->n_actions)
2915 return NULL;
2916
2917 for (i = 0; i < hist_data->n_actions; i++) {
2918 struct action_data *data = hist_data->actions[i];
2919
2920 if (data->action == ACTION_SNAPSHOT)
2921 return data;
2922 }
2923
2924 return NULL;
2925 }
2926
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)2927 static void track_data_snapshot_print(struct seq_file *m,
2928 struct hist_trigger_data *hist_data)
2929 {
2930 struct trace_event_file *file = hist_data->event_file;
2931 struct track_data *track_data;
2932 struct action_data *action;
2933
2934 track_data = tracing_cond_snapshot_data(file->tr);
2935 if (!track_data)
2936 return;
2937
2938 if (!track_data->updated)
2939 return;
2940
2941 action = snapshot_action(hist_data);
2942 if (!action)
2943 return;
2944
2945 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n");
2946 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
2947 action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
2948 action->track_data.var_str, track_data->track_val);
2949
2950 seq_puts(m, "\ttriggered by event with key: ");
2951 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
2952 seq_putc(m, '\n');
2953 }
2954 #else
cond_snapshot_update(struct trace_array * tr,void * cond_data)2955 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
2956 {
2957 return false;
2958 }
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2959 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
2960 struct tracing_map_elt *elt, void *rec,
2961 struct ring_buffer_event *rbe, void *key,
2962 struct action_data *data,
2963 u64 *var_ref_vals) {}
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)2964 static void track_data_snapshot_print(struct seq_file *m,
2965 struct hist_trigger_data *hist_data) {}
2966 #endif /* CONFIG_TRACER_SNAPSHOT */
2967
track_data_print(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)2968 static void track_data_print(struct seq_file *m,
2969 struct hist_trigger_data *hist_data,
2970 struct tracing_map_elt *elt,
2971 struct action_data *data)
2972 {
2973 u64 track_val = get_track_val(hist_data, elt, data);
2974 unsigned int i, save_var_idx;
2975
2976 if (data->handler == HANDLER_ONMAX)
2977 seq_printf(m, "\n\tmax: %10llu", track_val);
2978 else if (data->handler == HANDLER_ONCHANGE)
2979 seq_printf(m, "\n\tchanged: %10llu", track_val);
2980
2981 if (data->action == ACTION_SNAPSHOT)
2982 return;
2983
2984 for (i = 0; i < hist_data->n_save_vars; i++) {
2985 struct hist_field *save_val = hist_data->save_vars[i]->val;
2986 struct hist_field *save_var = hist_data->save_vars[i]->var;
2987 u64 val;
2988
2989 save_var_idx = save_var->var.idx;
2990
2991 val = tracing_map_read_var(elt, save_var_idx);
2992
2993 if (save_val->flags & HIST_FIELD_FL_STRING) {
2994 seq_printf(m, " %s: %-32s", save_var->var.name,
2995 (char *)(uintptr_t)(val));
2996 } else
2997 seq_printf(m, " %s: %10llu", save_var->var.name, val);
2998 }
2999 }
3000
ontrack_action(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3001 static void ontrack_action(struct hist_trigger_data *hist_data,
3002 struct tracing_map_elt *elt, void *rec,
3003 struct ring_buffer_event *rbe, void *key,
3004 struct action_data *data, u64 *var_ref_vals)
3005 {
3006 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3007
3008 if (check_track_val(elt, data, var_val)) {
3009 save_track_val(hist_data, elt, data, var_val);
3010 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3011 }
3012 }
3013
action_data_destroy(struct action_data * data)3014 static void action_data_destroy(struct action_data *data)
3015 {
3016 unsigned int i;
3017
3018 lockdep_assert_held(&event_mutex);
3019
3020 kfree(data->action_name);
3021
3022 for (i = 0; i < data->n_params; i++)
3023 kfree(data->params[i]);
3024
3025 if (data->synth_event)
3026 data->synth_event->ref--;
3027
3028 kfree(data->synth_event_name);
3029
3030 kfree(data);
3031 }
3032
track_data_destroy(struct hist_trigger_data * hist_data,struct action_data * data)3033 static void track_data_destroy(struct hist_trigger_data *hist_data,
3034 struct action_data *data)
3035 {
3036 struct trace_event_file *file = hist_data->event_file;
3037
3038 destroy_hist_field(data->track_data.track_var, 0);
3039
3040 if (data->action == ACTION_SNAPSHOT) {
3041 struct track_data *track_data;
3042
3043 track_data = tracing_cond_snapshot_data(file->tr);
3044 if (track_data && track_data->hist_data == hist_data) {
3045 tracing_snapshot_cond_disable(file->tr);
3046 track_data_free(track_data);
3047 }
3048 }
3049
3050 kfree(data->track_data.var_str);
3051
3052 action_data_destroy(data);
3053 }
3054
3055 static int action_create(struct hist_trigger_data *hist_data,
3056 struct action_data *data);
3057
track_data_create(struct hist_trigger_data * hist_data,struct action_data * data)3058 static int track_data_create(struct hist_trigger_data *hist_data,
3059 struct action_data *data)
3060 {
3061 struct hist_field *var_field, *ref_field, *track_var = NULL;
3062 struct trace_event_file *file = hist_data->event_file;
3063 struct trace_array *tr = file->tr;
3064 char *track_data_var_str;
3065 int ret = 0;
3066
3067 track_data_var_str = data->track_data.var_str;
3068 if (track_data_var_str[0] != '$') {
3069 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3070 return -EINVAL;
3071 }
3072 track_data_var_str++;
3073
3074 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3075 if (!var_field) {
3076 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3077 return -EINVAL;
3078 }
3079
3080 ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3081 if (!ref_field)
3082 return -ENOMEM;
3083
3084 data->track_data.var_ref = ref_field;
3085
3086 if (data->handler == HANDLER_ONMAX)
3087 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3088 if (IS_ERR(track_var)) {
3089 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3090 ret = PTR_ERR(track_var);
3091 goto out;
3092 }
3093
3094 if (data->handler == HANDLER_ONCHANGE)
3095 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3096 if (IS_ERR(track_var)) {
3097 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3098 ret = PTR_ERR(track_var);
3099 goto out;
3100 }
3101 data->track_data.track_var = track_var;
3102
3103 ret = action_create(hist_data, data);
3104 out:
3105 return ret;
3106 }
3107
parse_action_params(struct trace_array * tr,char * params,struct action_data * data)3108 static int parse_action_params(struct trace_array *tr, char *params,
3109 struct action_data *data)
3110 {
3111 char *param, *saved_param;
3112 bool first_param = true;
3113 int ret = 0;
3114
3115 while (params) {
3116 if (data->n_params >= SYNTH_FIELDS_MAX) {
3117 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3118 ret = -EINVAL;
3119 goto out;
3120 }
3121
3122 param = strsep(¶ms, ",");
3123 if (!param) {
3124 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3125 ret = -EINVAL;
3126 goto out;
3127 }
3128
3129 param = strstrip(param);
3130 if (strlen(param) < 2) {
3131 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3132 ret = -EINVAL;
3133 goto out;
3134 }
3135
3136 saved_param = kstrdup(param, GFP_KERNEL);
3137 if (!saved_param) {
3138 ret = -ENOMEM;
3139 goto out;
3140 }
3141
3142 if (first_param && data->use_trace_keyword) {
3143 data->synth_event_name = saved_param;
3144 first_param = false;
3145 continue;
3146 }
3147 first_param = false;
3148
3149 data->params[data->n_params++] = saved_param;
3150 }
3151 out:
3152 return ret;
3153 }
3154
action_parse(struct trace_array * tr,char * str,struct action_data * data,enum handler_id handler)3155 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3156 enum handler_id handler)
3157 {
3158 char *action_name;
3159 int ret = 0;
3160
3161 strsep(&str, ".");
3162 if (!str) {
3163 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3164 ret = -EINVAL;
3165 goto out;
3166 }
3167
3168 action_name = strsep(&str, "(");
3169 if (!action_name || !str) {
3170 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3171 ret = -EINVAL;
3172 goto out;
3173 }
3174
3175 if (str_has_prefix(action_name, "save")) {
3176 char *params = strsep(&str, ")");
3177
3178 if (!params) {
3179 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3180 ret = -EINVAL;
3181 goto out;
3182 }
3183
3184 ret = parse_action_params(tr, params, data);
3185 if (ret)
3186 goto out;
3187
3188 if (handler == HANDLER_ONMAX)
3189 data->track_data.check_val = check_track_val_max;
3190 else if (handler == HANDLER_ONCHANGE)
3191 data->track_data.check_val = check_track_val_changed;
3192 else {
3193 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3194 ret = -EINVAL;
3195 goto out;
3196 }
3197
3198 data->track_data.save_data = save_track_data_vars;
3199 data->fn = ontrack_action;
3200 data->action = ACTION_SAVE;
3201 } else if (str_has_prefix(action_name, "snapshot")) {
3202 char *params = strsep(&str, ")");
3203
3204 if (!str) {
3205 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3206 ret = -EINVAL;
3207 goto out;
3208 }
3209
3210 if (handler == HANDLER_ONMAX)
3211 data->track_data.check_val = check_track_val_max;
3212 else if (handler == HANDLER_ONCHANGE)
3213 data->track_data.check_val = check_track_val_changed;
3214 else {
3215 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3216 ret = -EINVAL;
3217 goto out;
3218 }
3219
3220 data->track_data.save_data = save_track_data_snapshot;
3221 data->fn = ontrack_action;
3222 data->action = ACTION_SNAPSHOT;
3223 } else {
3224 char *params = strsep(&str, ")");
3225
3226 if (str_has_prefix(action_name, "trace"))
3227 data->use_trace_keyword = true;
3228
3229 if (params) {
3230 ret = parse_action_params(tr, params, data);
3231 if (ret)
3232 goto out;
3233 }
3234
3235 if (handler == HANDLER_ONMAX)
3236 data->track_data.check_val = check_track_val_max;
3237 else if (handler == HANDLER_ONCHANGE)
3238 data->track_data.check_val = check_track_val_changed;
3239
3240 if (handler != HANDLER_ONMATCH) {
3241 data->track_data.save_data = action_trace;
3242 data->fn = ontrack_action;
3243 } else
3244 data->fn = action_trace;
3245
3246 data->action = ACTION_TRACE;
3247 }
3248
3249 data->action_name = kstrdup(action_name, GFP_KERNEL);
3250 if (!data->action_name) {
3251 ret = -ENOMEM;
3252 goto out;
3253 }
3254
3255 data->handler = handler;
3256 out:
3257 return ret;
3258 }
3259
track_data_parse(struct hist_trigger_data * hist_data,char * str,enum handler_id handler)3260 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
3261 char *str, enum handler_id handler)
3262 {
3263 struct action_data *data;
3264 int ret = -EINVAL;
3265 char *var_str;
3266
3267 data = kzalloc(sizeof(*data), GFP_KERNEL);
3268 if (!data)
3269 return ERR_PTR(-ENOMEM);
3270
3271 var_str = strsep(&str, ")");
3272 if (!var_str || !str) {
3273 ret = -EINVAL;
3274 goto free;
3275 }
3276
3277 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
3278 if (!data->track_data.var_str) {
3279 ret = -ENOMEM;
3280 goto free;
3281 }
3282
3283 ret = action_parse(hist_data->event_file->tr, str, data, handler);
3284 if (ret)
3285 goto free;
3286 out:
3287 return data;
3288 free:
3289 track_data_destroy(hist_data, data);
3290 data = ERR_PTR(ret);
3291 goto out;
3292 }
3293
onmatch_destroy(struct action_data * data)3294 static void onmatch_destroy(struct action_data *data)
3295 {
3296 kfree(data->match_data.event);
3297 kfree(data->match_data.event_system);
3298
3299 action_data_destroy(data);
3300 }
3301
destroy_field_var(struct field_var * field_var)3302 static void destroy_field_var(struct field_var *field_var)
3303 {
3304 if (!field_var)
3305 return;
3306
3307 destroy_hist_field(field_var->var, 0);
3308 destroy_hist_field(field_var->val, 0);
3309
3310 kfree(field_var);
3311 }
3312
destroy_field_vars(struct hist_trigger_data * hist_data)3313 static void destroy_field_vars(struct hist_trigger_data *hist_data)
3314 {
3315 unsigned int i;
3316
3317 for (i = 0; i < hist_data->n_field_vars; i++)
3318 destroy_field_var(hist_data->field_vars[i]);
3319
3320 for (i = 0; i < hist_data->n_save_vars; i++)
3321 destroy_field_var(hist_data->save_vars[i]);
3322 }
3323
save_field_var(struct hist_trigger_data * hist_data,struct field_var * field_var)3324 static void save_field_var(struct hist_trigger_data *hist_data,
3325 struct field_var *field_var)
3326 {
3327 hist_data->field_vars[hist_data->n_field_vars++] = field_var;
3328
3329 if (field_var->val->flags & HIST_FIELD_FL_STRING)
3330 hist_data->n_field_var_str++;
3331 }
3332
3333
check_synth_field(struct synth_event * event,struct hist_field * hist_field,unsigned int field_pos)3334 static int check_synth_field(struct synth_event *event,
3335 struct hist_field *hist_field,
3336 unsigned int field_pos)
3337 {
3338 struct synth_field *field;
3339
3340 if (field_pos >= event->n_fields)
3341 return -EINVAL;
3342
3343 field = event->fields[field_pos];
3344
3345 /*
3346 * A dynamic string synth field can accept static or
3347 * dynamic. A static string synth field can only accept a
3348 * same-sized static string, which is checked for later.
3349 */
3350 if (strstr(hist_field->type, "char[") && field->is_string
3351 && field->is_dynamic)
3352 return 0;
3353
3354 if (strcmp(field->type, hist_field->type) != 0) {
3355 if (field->size != hist_field->size ||
3356 (!field->is_string && field->is_signed != hist_field->is_signed))
3357 return -EINVAL;
3358 }
3359
3360 return 0;
3361 }
3362
3363 static struct hist_field *
trace_action_find_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)3364 trace_action_find_var(struct hist_trigger_data *hist_data,
3365 struct action_data *data,
3366 char *system, char *event, char *var)
3367 {
3368 struct trace_array *tr = hist_data->event_file->tr;
3369 struct hist_field *hist_field;
3370
3371 var++; /* skip '$' */
3372
3373 hist_field = find_target_event_var(hist_data, system, event, var);
3374 if (!hist_field) {
3375 if (!system && data->handler == HANDLER_ONMATCH) {
3376 system = data->match_data.event_system;
3377 event = data->match_data.event;
3378 }
3379
3380 hist_field = find_event_var(hist_data, system, event, var);
3381 }
3382
3383 if (!hist_field)
3384 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
3385
3386 return hist_field;
3387 }
3388
3389 static struct hist_field *
trace_action_create_field_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)3390 trace_action_create_field_var(struct hist_trigger_data *hist_data,
3391 struct action_data *data, char *system,
3392 char *event, char *var)
3393 {
3394 struct hist_field *hist_field = NULL;
3395 struct field_var *field_var;
3396
3397 /*
3398 * First try to create a field var on the target event (the
3399 * currently being defined). This will create a variable for
3400 * unqualified fields on the target event, or if qualified,
3401 * target fields that have qualified names matching the target.
3402 */
3403 field_var = create_target_field_var(hist_data, system, event, var);
3404
3405 if (field_var && !IS_ERR(field_var)) {
3406 save_field_var(hist_data, field_var);
3407 hist_field = field_var->var;
3408 } else {
3409 field_var = NULL;
3410 /*
3411 * If no explicit system.event is specfied, default to
3412 * looking for fields on the onmatch(system.event.xxx)
3413 * event.
3414 */
3415 if (!system && data->handler == HANDLER_ONMATCH) {
3416 system = data->match_data.event_system;
3417 event = data->match_data.event;
3418 }
3419
3420 if (!event)
3421 goto free;
3422 /*
3423 * At this point, we're looking at a field on another
3424 * event. Because we can't modify a hist trigger on
3425 * another event to add a variable for a field, we need
3426 * to create a new trigger on that event and create the
3427 * variable at the same time.
3428 */
3429 hist_field = create_field_var_hist(hist_data, system, event, var);
3430 if (IS_ERR(hist_field))
3431 goto free;
3432 }
3433 out:
3434 return hist_field;
3435 free:
3436 destroy_field_var(field_var);
3437 hist_field = NULL;
3438 goto out;
3439 }
3440
trace_action_create(struct hist_trigger_data * hist_data,struct action_data * data)3441 static int trace_action_create(struct hist_trigger_data *hist_data,
3442 struct action_data *data)
3443 {
3444 struct trace_array *tr = hist_data->event_file->tr;
3445 char *event_name, *param, *system = NULL;
3446 struct hist_field *hist_field, *var_ref;
3447 unsigned int i;
3448 unsigned int field_pos = 0;
3449 struct synth_event *event;
3450 char *synth_event_name;
3451 int var_ref_idx, ret = 0;
3452
3453 lockdep_assert_held(&event_mutex);
3454
3455 /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
3456 if (data->n_params > SYNTH_FIELDS_MAX)
3457 return -EINVAL;
3458
3459 if (data->use_trace_keyword)
3460 synth_event_name = data->synth_event_name;
3461 else
3462 synth_event_name = data->action_name;
3463
3464 event = find_synth_event(synth_event_name);
3465 if (!event) {
3466 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
3467 return -EINVAL;
3468 }
3469
3470 event->ref++;
3471
3472 for (i = 0; i < data->n_params; i++) {
3473 char *p;
3474
3475 p = param = kstrdup(data->params[i], GFP_KERNEL);
3476 if (!param) {
3477 ret = -ENOMEM;
3478 goto err;
3479 }
3480
3481 system = strsep(¶m, ".");
3482 if (!param) {
3483 param = (char *)system;
3484 system = event_name = NULL;
3485 } else {
3486 event_name = strsep(¶m, ".");
3487 if (!param) {
3488 kfree(p);
3489 ret = -EINVAL;
3490 goto err;
3491 }
3492 }
3493
3494 if (param[0] == '$')
3495 hist_field = trace_action_find_var(hist_data, data,
3496 system, event_name,
3497 param);
3498 else
3499 hist_field = trace_action_create_field_var(hist_data,
3500 data,
3501 system,
3502 event_name,
3503 param);
3504
3505 if (!hist_field) {
3506 kfree(p);
3507 ret = -EINVAL;
3508 goto err;
3509 }
3510
3511 if (check_synth_field(event, hist_field, field_pos) == 0) {
3512 var_ref = create_var_ref(hist_data, hist_field,
3513 system, event_name);
3514 if (!var_ref) {
3515 kfree(p);
3516 ret = -ENOMEM;
3517 goto err;
3518 }
3519
3520 var_ref_idx = find_var_ref_idx(hist_data, var_ref);
3521 if (WARN_ON(var_ref_idx < 0)) {
3522 kfree(p);
3523 ret = var_ref_idx;
3524 goto err;
3525 }
3526
3527 data->var_ref_idx[i] = var_ref_idx;
3528
3529 field_pos++;
3530 kfree(p);
3531 continue;
3532 }
3533
3534 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
3535 kfree(p);
3536 ret = -EINVAL;
3537 goto err;
3538 }
3539
3540 if (field_pos != event->n_fields) {
3541 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
3542 ret = -EINVAL;
3543 goto err;
3544 }
3545
3546 data->synth_event = event;
3547 out:
3548 return ret;
3549 err:
3550 event->ref--;
3551
3552 goto out;
3553 }
3554
action_create(struct hist_trigger_data * hist_data,struct action_data * data)3555 static int action_create(struct hist_trigger_data *hist_data,
3556 struct action_data *data)
3557 {
3558 struct trace_event_file *file = hist_data->event_file;
3559 struct trace_array *tr = file->tr;
3560 struct track_data *track_data;
3561 struct field_var *field_var;
3562 unsigned int i;
3563 char *param;
3564 int ret = 0;
3565
3566 if (data->action == ACTION_TRACE)
3567 return trace_action_create(hist_data, data);
3568
3569 if (data->action == ACTION_SNAPSHOT) {
3570 track_data = track_data_alloc(hist_data->key_size, data, hist_data);
3571 if (IS_ERR(track_data)) {
3572 ret = PTR_ERR(track_data);
3573 goto out;
3574 }
3575
3576 ret = tracing_snapshot_cond_enable(file->tr, track_data,
3577 cond_snapshot_update);
3578 if (ret)
3579 track_data_free(track_data);
3580
3581 goto out;
3582 }
3583
3584 if (data->action == ACTION_SAVE) {
3585 if (hist_data->n_save_vars) {
3586 ret = -EEXIST;
3587 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
3588 goto out;
3589 }
3590
3591 for (i = 0; i < data->n_params; i++) {
3592 param = kstrdup(data->params[i], GFP_KERNEL);
3593 if (!param) {
3594 ret = -ENOMEM;
3595 goto out;
3596 }
3597
3598 field_var = create_target_field_var(hist_data, NULL, NULL, param);
3599 if (IS_ERR(field_var)) {
3600 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
3601 errpos(param));
3602 ret = PTR_ERR(field_var);
3603 kfree(param);
3604 goto out;
3605 }
3606
3607 hist_data->save_vars[hist_data->n_save_vars++] = field_var;
3608 if (field_var->val->flags & HIST_FIELD_FL_STRING)
3609 hist_data->n_save_var_str++;
3610 kfree(param);
3611 }
3612 }
3613 out:
3614 return ret;
3615 }
3616
onmatch_create(struct hist_trigger_data * hist_data,struct action_data * data)3617 static int onmatch_create(struct hist_trigger_data *hist_data,
3618 struct action_data *data)
3619 {
3620 return action_create(hist_data, data);
3621 }
3622
onmatch_parse(struct trace_array * tr,char * str)3623 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
3624 {
3625 char *match_event, *match_event_system;
3626 struct action_data *data;
3627 int ret = -EINVAL;
3628
3629 data = kzalloc(sizeof(*data), GFP_KERNEL);
3630 if (!data)
3631 return ERR_PTR(-ENOMEM);
3632
3633 match_event = strsep(&str, ")");
3634 if (!match_event || !str) {
3635 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
3636 goto free;
3637 }
3638
3639 match_event_system = strsep(&match_event, ".");
3640 if (!match_event) {
3641 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
3642 goto free;
3643 }
3644
3645 if (IS_ERR(event_file(tr, match_event_system, match_event))) {
3646 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
3647 goto free;
3648 }
3649
3650 data->match_data.event = kstrdup(match_event, GFP_KERNEL);
3651 if (!data->match_data.event) {
3652 ret = -ENOMEM;
3653 goto free;
3654 }
3655
3656 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
3657 if (!data->match_data.event_system) {
3658 ret = -ENOMEM;
3659 goto free;
3660 }
3661
3662 ret = action_parse(tr, str, data, HANDLER_ONMATCH);
3663 if (ret)
3664 goto free;
3665 out:
3666 return data;
3667 free:
3668 onmatch_destroy(data);
3669 data = ERR_PTR(ret);
3670 goto out;
3671 }
3672
create_hitcount_val(struct hist_trigger_data * hist_data)3673 static int create_hitcount_val(struct hist_trigger_data *hist_data)
3674 {
3675 hist_data->fields[HITCOUNT_IDX] =
3676 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
3677 if (!hist_data->fields[HITCOUNT_IDX])
3678 return -ENOMEM;
3679
3680 hist_data->n_vals++;
3681 hist_data->n_fields++;
3682
3683 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
3684 return -EINVAL;
3685
3686 return 0;
3687 }
3688
__create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * field_str,unsigned long flags)3689 static int __create_val_field(struct hist_trigger_data *hist_data,
3690 unsigned int val_idx,
3691 struct trace_event_file *file,
3692 char *var_name, char *field_str,
3693 unsigned long flags)
3694 {
3695 struct hist_field *hist_field;
3696 int ret = 0;
3697
3698 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
3699 if (IS_ERR(hist_field)) {
3700 ret = PTR_ERR(hist_field);
3701 goto out;
3702 }
3703
3704 hist_data->fields[val_idx] = hist_field;
3705
3706 ++hist_data->n_vals;
3707 ++hist_data->n_fields;
3708
3709 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3710 ret = -EINVAL;
3711 out:
3712 return ret;
3713 }
3714
create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * field_str)3715 static int create_val_field(struct hist_trigger_data *hist_data,
3716 unsigned int val_idx,
3717 struct trace_event_file *file,
3718 char *field_str)
3719 {
3720 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
3721 return -EINVAL;
3722
3723 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
3724 }
3725
create_var_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * expr_str)3726 static int create_var_field(struct hist_trigger_data *hist_data,
3727 unsigned int val_idx,
3728 struct trace_event_file *file,
3729 char *var_name, char *expr_str)
3730 {
3731 struct trace_array *tr = hist_data->event_file->tr;
3732 unsigned long flags = 0;
3733 int ret;
3734
3735 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3736 return -EINVAL;
3737
3738 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
3739 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
3740 return -EINVAL;
3741 }
3742
3743 flags |= HIST_FIELD_FL_VAR;
3744 hist_data->n_vars++;
3745 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
3746 return -EINVAL;
3747
3748 ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
3749
3750 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_STRING)
3751 hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++;
3752
3753 return ret;
3754 }
3755
create_val_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3756 static int create_val_fields(struct hist_trigger_data *hist_data,
3757 struct trace_event_file *file)
3758 {
3759 char *fields_str, *field_str;
3760 unsigned int i, j = 1;
3761 int ret;
3762
3763 ret = create_hitcount_val(hist_data);
3764 if (ret)
3765 goto out;
3766
3767 fields_str = hist_data->attrs->vals_str;
3768 if (!fields_str)
3769 goto out;
3770
3771 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
3772 j < TRACING_MAP_VALS_MAX; i++) {
3773 field_str = strsep(&fields_str, ",");
3774 if (!field_str)
3775 break;
3776
3777 if (strcmp(field_str, "hitcount") == 0)
3778 continue;
3779
3780 ret = create_val_field(hist_data, j++, file, field_str);
3781 if (ret)
3782 goto out;
3783 }
3784
3785 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
3786 ret = -EINVAL;
3787 out:
3788 return ret;
3789 }
3790
create_key_field(struct hist_trigger_data * hist_data,unsigned int key_idx,unsigned int key_offset,struct trace_event_file * file,char * field_str)3791 static int create_key_field(struct hist_trigger_data *hist_data,
3792 unsigned int key_idx,
3793 unsigned int key_offset,
3794 struct trace_event_file *file,
3795 char *field_str)
3796 {
3797 struct trace_array *tr = hist_data->event_file->tr;
3798 struct hist_field *hist_field = NULL;
3799 unsigned long flags = 0;
3800 unsigned int key_size;
3801 int ret = 0;
3802
3803 if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
3804 return -EINVAL;
3805
3806 flags |= HIST_FIELD_FL_KEY;
3807
3808 if (strcmp(field_str, "stacktrace") == 0) {
3809 flags |= HIST_FIELD_FL_STACKTRACE;
3810 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
3811 hist_field = create_hist_field(hist_data, NULL, flags, NULL);
3812 } else {
3813 hist_field = parse_expr(hist_data, file, field_str, flags,
3814 NULL, 0);
3815 if (IS_ERR(hist_field)) {
3816 ret = PTR_ERR(hist_field);
3817 goto out;
3818 }
3819
3820 if (field_has_hist_vars(hist_field, 0)) {
3821 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
3822 destroy_hist_field(hist_field, 0);
3823 ret = -EINVAL;
3824 goto out;
3825 }
3826
3827 key_size = hist_field->size;
3828 }
3829
3830 hist_data->fields[key_idx] = hist_field;
3831
3832 key_size = ALIGN(key_size, sizeof(u64));
3833 hist_data->fields[key_idx]->size = key_size;
3834 hist_data->fields[key_idx]->offset = key_offset;
3835
3836 hist_data->key_size += key_size;
3837
3838 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
3839 ret = -EINVAL;
3840 goto out;
3841 }
3842
3843 hist_data->n_keys++;
3844 hist_data->n_fields++;
3845
3846 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
3847 return -EINVAL;
3848
3849 ret = key_size;
3850 out:
3851 return ret;
3852 }
3853
create_key_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3854 static int create_key_fields(struct hist_trigger_data *hist_data,
3855 struct trace_event_file *file)
3856 {
3857 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
3858 char *fields_str, *field_str;
3859 int ret = -EINVAL;
3860
3861 fields_str = hist_data->attrs->keys_str;
3862 if (!fields_str)
3863 goto out;
3864
3865 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
3866 field_str = strsep(&fields_str, ",");
3867 if (!field_str)
3868 break;
3869 ret = create_key_field(hist_data, i, key_offset,
3870 file, field_str);
3871 if (ret < 0)
3872 goto out;
3873 key_offset += ret;
3874 }
3875 if (fields_str) {
3876 ret = -EINVAL;
3877 goto out;
3878 }
3879 ret = 0;
3880 out:
3881 return ret;
3882 }
3883
create_var_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3884 static int create_var_fields(struct hist_trigger_data *hist_data,
3885 struct trace_event_file *file)
3886 {
3887 unsigned int i, j = hist_data->n_vals;
3888 int ret = 0;
3889
3890 unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
3891
3892 for (i = 0; i < n_vars; i++) {
3893 char *var_name = hist_data->attrs->var_defs.name[i];
3894 char *expr = hist_data->attrs->var_defs.expr[i];
3895
3896 ret = create_var_field(hist_data, j++, file, var_name, expr);
3897 if (ret)
3898 goto out;
3899 }
3900 out:
3901 return ret;
3902 }
3903
free_var_defs(struct hist_trigger_data * hist_data)3904 static void free_var_defs(struct hist_trigger_data *hist_data)
3905 {
3906 unsigned int i;
3907
3908 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
3909 kfree(hist_data->attrs->var_defs.name[i]);
3910 kfree(hist_data->attrs->var_defs.expr[i]);
3911 }
3912
3913 hist_data->attrs->var_defs.n_vars = 0;
3914 }
3915
parse_var_defs(struct hist_trigger_data * hist_data)3916 static int parse_var_defs(struct hist_trigger_data *hist_data)
3917 {
3918 struct trace_array *tr = hist_data->event_file->tr;
3919 char *s, *str, *var_name, *field_str;
3920 unsigned int i, j, n_vars = 0;
3921 int ret = 0;
3922
3923 for (i = 0; i < hist_data->attrs->n_assignments; i++) {
3924 str = hist_data->attrs->assignment_str[i];
3925 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
3926 field_str = strsep(&str, ",");
3927 if (!field_str)
3928 break;
3929
3930 var_name = strsep(&field_str, "=");
3931 if (!var_name || !field_str) {
3932 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
3933 errpos(var_name));
3934 ret = -EINVAL;
3935 goto free;
3936 }
3937
3938 if (n_vars == TRACING_MAP_VARS_MAX) {
3939 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
3940 ret = -EINVAL;
3941 goto free;
3942 }
3943
3944 s = kstrdup(var_name, GFP_KERNEL);
3945 if (!s) {
3946 ret = -ENOMEM;
3947 goto free;
3948 }
3949 hist_data->attrs->var_defs.name[n_vars] = s;
3950
3951 s = kstrdup(field_str, GFP_KERNEL);
3952 if (!s) {
3953 kfree(hist_data->attrs->var_defs.name[n_vars]);
3954 hist_data->attrs->var_defs.name[n_vars] = NULL;
3955 ret = -ENOMEM;
3956 goto free;
3957 }
3958 hist_data->attrs->var_defs.expr[n_vars++] = s;
3959
3960 hist_data->attrs->var_defs.n_vars = n_vars;
3961 }
3962 }
3963
3964 return ret;
3965 free:
3966 free_var_defs(hist_data);
3967
3968 return ret;
3969 }
3970
create_hist_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3971 static int create_hist_fields(struct hist_trigger_data *hist_data,
3972 struct trace_event_file *file)
3973 {
3974 int ret;
3975
3976 ret = parse_var_defs(hist_data);
3977 if (ret)
3978 goto out;
3979
3980 ret = create_val_fields(hist_data, file);
3981 if (ret)
3982 goto out;
3983
3984 ret = create_var_fields(hist_data, file);
3985 if (ret)
3986 goto out;
3987
3988 ret = create_key_fields(hist_data, file);
3989 if (ret)
3990 goto out;
3991 out:
3992 free_var_defs(hist_data);
3993
3994 return ret;
3995 }
3996
is_descending(struct trace_array * tr,const char * str)3997 static int is_descending(struct trace_array *tr, const char *str)
3998 {
3999 if (!str)
4000 return 0;
4001
4002 if (strcmp(str, "descending") == 0)
4003 return 1;
4004
4005 if (strcmp(str, "ascending") == 0)
4006 return 0;
4007
4008 hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
4009
4010 return -EINVAL;
4011 }
4012
create_sort_keys(struct hist_trigger_data * hist_data)4013 static int create_sort_keys(struct hist_trigger_data *hist_data)
4014 {
4015 struct trace_array *tr = hist_data->event_file->tr;
4016 char *fields_str = hist_data->attrs->sort_key_str;
4017 struct tracing_map_sort_key *sort_key;
4018 int descending, ret = 0;
4019 unsigned int i, j, k;
4020
4021 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4022
4023 if (!fields_str)
4024 goto out;
4025
4026 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4027 struct hist_field *hist_field;
4028 char *field_str, *field_name;
4029 const char *test_name;
4030
4031 sort_key = &hist_data->sort_keys[i];
4032
4033 field_str = strsep(&fields_str, ",");
4034 if (!field_str)
4035 break;
4036
4037 if (!*field_str) {
4038 ret = -EINVAL;
4039 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4040 break;
4041 }
4042
4043 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4044 hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
4045 ret = -EINVAL;
4046 break;
4047 }
4048
4049 field_name = strsep(&field_str, ".");
4050 if (!field_name || !*field_name) {
4051 ret = -EINVAL;
4052 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4053 break;
4054 }
4055
4056 if (strcmp(field_name, "hitcount") == 0) {
4057 descending = is_descending(tr, field_str);
4058 if (descending < 0) {
4059 ret = descending;
4060 break;
4061 }
4062 sort_key->descending = descending;
4063 continue;
4064 }
4065
4066 for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4067 unsigned int idx;
4068
4069 hist_field = hist_data->fields[j];
4070 if (hist_field->flags & HIST_FIELD_FL_VAR)
4071 continue;
4072
4073 idx = k++;
4074
4075 test_name = hist_field_name(hist_field, 0);
4076
4077 if (strcmp(field_name, test_name) == 0) {
4078 sort_key->field_idx = idx;
4079 descending = is_descending(tr, field_str);
4080 if (descending < 0) {
4081 ret = descending;
4082 goto out;
4083 }
4084 sort_key->descending = descending;
4085 break;
4086 }
4087 }
4088 if (j == hist_data->n_fields) {
4089 ret = -EINVAL;
4090 hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
4091 break;
4092 }
4093 }
4094
4095 hist_data->n_sort_keys = i;
4096 out:
4097 return ret;
4098 }
4099
destroy_actions(struct hist_trigger_data * hist_data)4100 static void destroy_actions(struct hist_trigger_data *hist_data)
4101 {
4102 unsigned int i;
4103
4104 for (i = 0; i < hist_data->n_actions; i++) {
4105 struct action_data *data = hist_data->actions[i];
4106
4107 if (data->handler == HANDLER_ONMATCH)
4108 onmatch_destroy(data);
4109 else if (data->handler == HANDLER_ONMAX ||
4110 data->handler == HANDLER_ONCHANGE)
4111 track_data_destroy(hist_data, data);
4112 else
4113 kfree(data);
4114 }
4115 }
4116
parse_actions(struct hist_trigger_data * hist_data)4117 static int parse_actions(struct hist_trigger_data *hist_data)
4118 {
4119 struct trace_array *tr = hist_data->event_file->tr;
4120 struct action_data *data;
4121 unsigned int i;
4122 int ret = 0;
4123 char *str;
4124 int len;
4125
4126 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4127 str = hist_data->attrs->action_str[i];
4128
4129 if ((len = str_has_prefix(str, "onmatch("))) {
4130 char *action_str = str + len;
4131
4132 data = onmatch_parse(tr, action_str);
4133 if (IS_ERR(data)) {
4134 ret = PTR_ERR(data);
4135 break;
4136 }
4137 } else if ((len = str_has_prefix(str, "onmax("))) {
4138 char *action_str = str + len;
4139
4140 data = track_data_parse(hist_data, action_str,
4141 HANDLER_ONMAX);
4142 if (IS_ERR(data)) {
4143 ret = PTR_ERR(data);
4144 break;
4145 }
4146 } else if ((len = str_has_prefix(str, "onchange("))) {
4147 char *action_str = str + len;
4148
4149 data = track_data_parse(hist_data, action_str,
4150 HANDLER_ONCHANGE);
4151 if (IS_ERR(data)) {
4152 ret = PTR_ERR(data);
4153 break;
4154 }
4155 } else {
4156 ret = -EINVAL;
4157 break;
4158 }
4159
4160 hist_data->actions[hist_data->n_actions++] = data;
4161 }
4162
4163 return ret;
4164 }
4165
create_actions(struct hist_trigger_data * hist_data)4166 static int create_actions(struct hist_trigger_data *hist_data)
4167 {
4168 struct action_data *data;
4169 unsigned int i;
4170 int ret = 0;
4171
4172 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4173 data = hist_data->actions[i];
4174
4175 if (data->handler == HANDLER_ONMATCH) {
4176 ret = onmatch_create(hist_data, data);
4177 if (ret)
4178 break;
4179 } else if (data->handler == HANDLER_ONMAX ||
4180 data->handler == HANDLER_ONCHANGE) {
4181 ret = track_data_create(hist_data, data);
4182 if (ret)
4183 break;
4184 } else {
4185 ret = -EINVAL;
4186 break;
4187 }
4188 }
4189
4190 return ret;
4191 }
4192
print_actions(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt)4193 static void print_actions(struct seq_file *m,
4194 struct hist_trigger_data *hist_data,
4195 struct tracing_map_elt *elt)
4196 {
4197 unsigned int i;
4198
4199 for (i = 0; i < hist_data->n_actions; i++) {
4200 struct action_data *data = hist_data->actions[i];
4201
4202 if (data->action == ACTION_SNAPSHOT)
4203 continue;
4204
4205 if (data->handler == HANDLER_ONMAX ||
4206 data->handler == HANDLER_ONCHANGE)
4207 track_data_print(m, hist_data, elt, data);
4208 }
4209 }
4210
print_action_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4211 static void print_action_spec(struct seq_file *m,
4212 struct hist_trigger_data *hist_data,
4213 struct action_data *data)
4214 {
4215 unsigned int i;
4216
4217 if (data->action == ACTION_SAVE) {
4218 for (i = 0; i < hist_data->n_save_vars; i++) {
4219 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4220 if (i < hist_data->n_save_vars - 1)
4221 seq_puts(m, ",");
4222 }
4223 } else if (data->action == ACTION_TRACE) {
4224 if (data->use_trace_keyword)
4225 seq_printf(m, "%s", data->synth_event_name);
4226 for (i = 0; i < data->n_params; i++) {
4227 if (i || data->use_trace_keyword)
4228 seq_puts(m, ",");
4229 seq_printf(m, "%s", data->params[i]);
4230 }
4231 }
4232 }
4233
print_track_data_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4234 static void print_track_data_spec(struct seq_file *m,
4235 struct hist_trigger_data *hist_data,
4236 struct action_data *data)
4237 {
4238 if (data->handler == HANDLER_ONMAX)
4239 seq_puts(m, ":onmax(");
4240 else if (data->handler == HANDLER_ONCHANGE)
4241 seq_puts(m, ":onchange(");
4242 seq_printf(m, "%s", data->track_data.var_str);
4243 seq_printf(m, ").%s(", data->action_name);
4244
4245 print_action_spec(m, hist_data, data);
4246
4247 seq_puts(m, ")");
4248 }
4249
print_onmatch_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4250 static void print_onmatch_spec(struct seq_file *m,
4251 struct hist_trigger_data *hist_data,
4252 struct action_data *data)
4253 {
4254 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
4255 data->match_data.event);
4256
4257 seq_printf(m, "%s(", data->action_name);
4258
4259 print_action_spec(m, hist_data, data);
4260
4261 seq_puts(m, ")");
4262 }
4263
actions_match(struct hist_trigger_data * hist_data,struct hist_trigger_data * hist_data_test)4264 static bool actions_match(struct hist_trigger_data *hist_data,
4265 struct hist_trigger_data *hist_data_test)
4266 {
4267 unsigned int i, j;
4268
4269 if (hist_data->n_actions != hist_data_test->n_actions)
4270 return false;
4271
4272 for (i = 0; i < hist_data->n_actions; i++) {
4273 struct action_data *data = hist_data->actions[i];
4274 struct action_data *data_test = hist_data_test->actions[i];
4275 char *action_name, *action_name_test;
4276
4277 if (data->handler != data_test->handler)
4278 return false;
4279 if (data->action != data_test->action)
4280 return false;
4281
4282 if (data->n_params != data_test->n_params)
4283 return false;
4284
4285 for (j = 0; j < data->n_params; j++) {
4286 if (strcmp(data->params[j], data_test->params[j]) != 0)
4287 return false;
4288 }
4289
4290 if (data->use_trace_keyword)
4291 action_name = data->synth_event_name;
4292 else
4293 action_name = data->action_name;
4294
4295 if (data_test->use_trace_keyword)
4296 action_name_test = data_test->synth_event_name;
4297 else
4298 action_name_test = data_test->action_name;
4299
4300 if (strcmp(action_name, action_name_test) != 0)
4301 return false;
4302
4303 if (data->handler == HANDLER_ONMATCH) {
4304 if (strcmp(data->match_data.event_system,
4305 data_test->match_data.event_system) != 0)
4306 return false;
4307 if (strcmp(data->match_data.event,
4308 data_test->match_data.event) != 0)
4309 return false;
4310 } else if (data->handler == HANDLER_ONMAX ||
4311 data->handler == HANDLER_ONCHANGE) {
4312 if (strcmp(data->track_data.var_str,
4313 data_test->track_data.var_str) != 0)
4314 return false;
4315 }
4316 }
4317
4318 return true;
4319 }
4320
4321
print_actions_spec(struct seq_file * m,struct hist_trigger_data * hist_data)4322 static void print_actions_spec(struct seq_file *m,
4323 struct hist_trigger_data *hist_data)
4324 {
4325 unsigned int i;
4326
4327 for (i = 0; i < hist_data->n_actions; i++) {
4328 struct action_data *data = hist_data->actions[i];
4329
4330 if (data->handler == HANDLER_ONMATCH)
4331 print_onmatch_spec(m, hist_data, data);
4332 else if (data->handler == HANDLER_ONMAX ||
4333 data->handler == HANDLER_ONCHANGE)
4334 print_track_data_spec(m, hist_data, data);
4335 }
4336 }
4337
destroy_field_var_hists(struct hist_trigger_data * hist_data)4338 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
4339 {
4340 unsigned int i;
4341
4342 for (i = 0; i < hist_data->n_field_var_hists; i++) {
4343 kfree(hist_data->field_var_hists[i]->cmd);
4344 kfree(hist_data->field_var_hists[i]);
4345 }
4346 }
4347
destroy_hist_data(struct hist_trigger_data * hist_data)4348 static void destroy_hist_data(struct hist_trigger_data *hist_data)
4349 {
4350 if (!hist_data)
4351 return;
4352
4353 destroy_hist_trigger_attrs(hist_data->attrs);
4354 destroy_hist_fields(hist_data);
4355 tracing_map_destroy(hist_data->map);
4356
4357 destroy_actions(hist_data);
4358 destroy_field_vars(hist_data);
4359 destroy_field_var_hists(hist_data);
4360
4361 kfree(hist_data);
4362 }
4363
create_tracing_map_fields(struct hist_trigger_data * hist_data)4364 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
4365 {
4366 struct tracing_map *map = hist_data->map;
4367 struct ftrace_event_field *field;
4368 struct hist_field *hist_field;
4369 int i, idx = 0;
4370
4371 for_each_hist_field(i, hist_data) {
4372 hist_field = hist_data->fields[i];
4373 if (hist_field->flags & HIST_FIELD_FL_KEY) {
4374 tracing_map_cmp_fn_t cmp_fn;
4375
4376 field = hist_field->field;
4377
4378 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
4379 cmp_fn = tracing_map_cmp_none;
4380 else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
4381 cmp_fn = tracing_map_cmp_num(hist_field->size,
4382 hist_field->is_signed);
4383 else if (is_string_field(field))
4384 cmp_fn = tracing_map_cmp_string;
4385 else
4386 cmp_fn = tracing_map_cmp_num(field->size,
4387 field->is_signed);
4388 idx = tracing_map_add_key_field(map,
4389 hist_field->offset,
4390 cmp_fn);
4391 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
4392 idx = tracing_map_add_sum_field(map);
4393
4394 if (idx < 0)
4395 return idx;
4396
4397 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4398 idx = tracing_map_add_var(map);
4399 if (idx < 0)
4400 return idx;
4401 hist_field->var.idx = idx;
4402 hist_field->var.hist_data = hist_data;
4403 }
4404 }
4405
4406 return 0;
4407 }
4408
4409 static struct hist_trigger_data *
create_hist_data(unsigned int map_bits,struct hist_trigger_attrs * attrs,struct trace_event_file * file,bool remove)4410 create_hist_data(unsigned int map_bits,
4411 struct hist_trigger_attrs *attrs,
4412 struct trace_event_file *file,
4413 bool remove)
4414 {
4415 const struct tracing_map_ops *map_ops = NULL;
4416 struct hist_trigger_data *hist_data;
4417 int ret = 0;
4418
4419 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
4420 if (!hist_data)
4421 return ERR_PTR(-ENOMEM);
4422
4423 hist_data->attrs = attrs;
4424 hist_data->remove = remove;
4425 hist_data->event_file = file;
4426
4427 ret = parse_actions(hist_data);
4428 if (ret)
4429 goto free;
4430
4431 ret = create_hist_fields(hist_data, file);
4432 if (ret)
4433 goto free;
4434
4435 ret = create_sort_keys(hist_data);
4436 if (ret)
4437 goto free;
4438
4439 map_ops = &hist_trigger_elt_data_ops;
4440
4441 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
4442 map_ops, hist_data);
4443 if (IS_ERR(hist_data->map)) {
4444 ret = PTR_ERR(hist_data->map);
4445 hist_data->map = NULL;
4446 goto free;
4447 }
4448
4449 ret = create_tracing_map_fields(hist_data);
4450 if (ret)
4451 goto free;
4452 out:
4453 return hist_data;
4454 free:
4455 hist_data->attrs = NULL;
4456
4457 destroy_hist_data(hist_data);
4458
4459 hist_data = ERR_PTR(ret);
4460
4461 goto out;
4462 }
4463
hist_trigger_elt_update(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,u64 * var_ref_vals)4464 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
4465 struct tracing_map_elt *elt, void *rec,
4466 struct ring_buffer_event *rbe,
4467 u64 *var_ref_vals)
4468 {
4469 struct hist_elt_data *elt_data;
4470 struct hist_field *hist_field;
4471 unsigned int i, var_idx;
4472 u64 hist_val;
4473
4474 elt_data = elt->private_data;
4475 elt_data->var_ref_vals = var_ref_vals;
4476
4477 for_each_hist_val_field(i, hist_data) {
4478 hist_field = hist_data->fields[i];
4479 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4480 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4481 var_idx = hist_field->var.idx;
4482
4483 if (hist_field->flags & HIST_FIELD_FL_STRING) {
4484 unsigned int str_start, var_str_idx, idx;
4485 char *str, *val_str;
4486 unsigned int size;
4487
4488 str_start = hist_data->n_field_var_str +
4489 hist_data->n_save_var_str;
4490 var_str_idx = hist_field->var_str_idx;
4491 idx = str_start + var_str_idx;
4492
4493 str = elt_data->field_var_str[idx];
4494 val_str = (char *)(uintptr_t)hist_val;
4495
4496 size = min(hist_field->size, STR_VAR_LEN_MAX);
4497 strscpy(str, val_str, size);
4498
4499 hist_val = (u64)(uintptr_t)str;
4500 }
4501 tracing_map_set_var(elt, var_idx, hist_val);
4502 continue;
4503 }
4504 tracing_map_update_sum(elt, i, hist_val);
4505 }
4506
4507 for_each_hist_key_field(i, hist_data) {
4508 hist_field = hist_data->fields[i];
4509 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4510 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4511 var_idx = hist_field->var.idx;
4512 tracing_map_set_var(elt, var_idx, hist_val);
4513 }
4514 }
4515
4516 update_field_vars(hist_data, elt, rbe, rec);
4517 }
4518
add_to_key(char * compound_key,void * key,struct hist_field * key_field,void * rec)4519 static inline void add_to_key(char *compound_key, void *key,
4520 struct hist_field *key_field, void *rec)
4521 {
4522 size_t size = key_field->size;
4523
4524 if (key_field->flags & HIST_FIELD_FL_STRING) {
4525 struct ftrace_event_field *field;
4526
4527 field = key_field->field;
4528 if (field->filter_type == FILTER_DYN_STRING)
4529 size = *(u32 *)(rec + field->offset) >> 16;
4530 else if (field->filter_type == FILTER_STATIC_STRING)
4531 size = field->size;
4532
4533 /* ensure NULL-termination */
4534 if (size > key_field->size - 1)
4535 size = key_field->size - 1;
4536
4537 strncpy(compound_key + key_field->offset, (char *)key, size);
4538 } else
4539 memcpy(compound_key + key_field->offset, key, size);
4540 }
4541
4542 static void
hist_trigger_actions(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,u64 * var_ref_vals)4543 hist_trigger_actions(struct hist_trigger_data *hist_data,
4544 struct tracing_map_elt *elt, void *rec,
4545 struct ring_buffer_event *rbe, void *key,
4546 u64 *var_ref_vals)
4547 {
4548 struct action_data *data;
4549 unsigned int i;
4550
4551 for (i = 0; i < hist_data->n_actions; i++) {
4552 data = hist_data->actions[i];
4553 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
4554 }
4555 }
4556
event_hist_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * rbe)4557 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
4558 struct ring_buffer_event *rbe)
4559 {
4560 struct hist_trigger_data *hist_data = data->private_data;
4561 bool use_compound_key = (hist_data->n_keys > 1);
4562 unsigned long entries[HIST_STACKTRACE_DEPTH];
4563 u64 var_ref_vals[TRACING_MAP_VARS_MAX];
4564 char compound_key[HIST_KEY_SIZE_MAX];
4565 struct tracing_map_elt *elt = NULL;
4566 struct hist_field *key_field;
4567 u64 field_contents;
4568 void *key = NULL;
4569 unsigned int i;
4570
4571 memset(compound_key, 0, hist_data->key_size);
4572
4573 for_each_hist_key_field(i, hist_data) {
4574 key_field = hist_data->fields[i];
4575
4576 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4577 memset(entries, 0, HIST_STACKTRACE_SIZE);
4578 stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
4579 HIST_STACKTRACE_SKIP);
4580 key = entries;
4581 } else {
4582 field_contents = key_field->fn(key_field, elt, rbe, rec);
4583 if (key_field->flags & HIST_FIELD_FL_STRING) {
4584 key = (void *)(unsigned long)field_contents;
4585 use_compound_key = true;
4586 } else
4587 key = (void *)&field_contents;
4588 }
4589
4590 if (use_compound_key)
4591 add_to_key(compound_key, key, key_field, rec);
4592 }
4593
4594 if (use_compound_key)
4595 key = compound_key;
4596
4597 if (hist_data->n_var_refs &&
4598 !resolve_var_refs(hist_data, key, var_ref_vals, false))
4599 return;
4600
4601 elt = tracing_map_insert(hist_data->map, key);
4602 if (!elt)
4603 return;
4604
4605 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
4606
4607 if (resolve_var_refs(hist_data, key, var_ref_vals, true))
4608 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
4609 }
4610
hist_trigger_stacktrace_print(struct seq_file * m,unsigned long * stacktrace_entries,unsigned int max_entries)4611 static void hist_trigger_stacktrace_print(struct seq_file *m,
4612 unsigned long *stacktrace_entries,
4613 unsigned int max_entries)
4614 {
4615 char str[KSYM_SYMBOL_LEN];
4616 unsigned int spaces = 8;
4617 unsigned int i;
4618
4619 for (i = 0; i < max_entries; i++) {
4620 if (!stacktrace_entries[i])
4621 return;
4622
4623 seq_printf(m, "%*c", 1 + spaces, ' ');
4624 sprint_symbol(str, stacktrace_entries[i]);
4625 seq_printf(m, "%s\n", str);
4626 }
4627 }
4628
hist_trigger_print_key(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)4629 static void hist_trigger_print_key(struct seq_file *m,
4630 struct hist_trigger_data *hist_data,
4631 void *key,
4632 struct tracing_map_elt *elt)
4633 {
4634 struct hist_field *key_field;
4635 char str[KSYM_SYMBOL_LEN];
4636 bool multiline = false;
4637 const char *field_name;
4638 unsigned int i;
4639 u64 uval;
4640
4641 seq_puts(m, "{ ");
4642
4643 for_each_hist_key_field(i, hist_data) {
4644 key_field = hist_data->fields[i];
4645
4646 if (i > hist_data->n_vals)
4647 seq_puts(m, ", ");
4648
4649 field_name = hist_field_name(key_field, 0);
4650
4651 if (key_field->flags & HIST_FIELD_FL_HEX) {
4652 uval = *(u64 *)(key + key_field->offset);
4653 seq_printf(m, "%s: %llx", field_name, uval);
4654 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
4655 uval = *(u64 *)(key + key_field->offset);
4656 sprint_symbol_no_offset(str, uval);
4657 seq_printf(m, "%s: [%llx] %-45s", field_name,
4658 uval, str);
4659 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
4660 uval = *(u64 *)(key + key_field->offset);
4661 sprint_symbol(str, uval);
4662 seq_printf(m, "%s: [%llx] %-55s", field_name,
4663 uval, str);
4664 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
4665 struct hist_elt_data *elt_data = elt->private_data;
4666 char *comm;
4667
4668 if (WARN_ON_ONCE(!elt_data))
4669 return;
4670
4671 comm = elt_data->comm;
4672
4673 uval = *(u64 *)(key + key_field->offset);
4674 seq_printf(m, "%s: %-16s[%10llu]", field_name,
4675 comm, uval);
4676 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
4677 const char *syscall_name;
4678
4679 uval = *(u64 *)(key + key_field->offset);
4680 syscall_name = get_syscall_name(uval);
4681 if (!syscall_name)
4682 syscall_name = "unknown_syscall";
4683
4684 seq_printf(m, "%s: %-30s[%3llu]", field_name,
4685 syscall_name, uval);
4686 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4687 seq_puts(m, "stacktrace:\n");
4688 hist_trigger_stacktrace_print(m,
4689 key + key_field->offset,
4690 HIST_STACKTRACE_DEPTH);
4691 multiline = true;
4692 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
4693 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
4694 *(u64 *)(key + key_field->offset));
4695 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
4696 seq_printf(m, "%s: %-50s", field_name,
4697 (char *)(key + key_field->offset));
4698 } else {
4699 uval = *(u64 *)(key + key_field->offset);
4700 seq_printf(m, "%s: %10llu", field_name, uval);
4701 }
4702 }
4703
4704 if (!multiline)
4705 seq_puts(m, " ");
4706
4707 seq_puts(m, "}");
4708 }
4709
hist_trigger_entry_print(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)4710 static void hist_trigger_entry_print(struct seq_file *m,
4711 struct hist_trigger_data *hist_data,
4712 void *key,
4713 struct tracing_map_elt *elt)
4714 {
4715 const char *field_name;
4716 unsigned int i;
4717
4718 hist_trigger_print_key(m, hist_data, key, elt);
4719
4720 seq_printf(m, " hitcount: %10llu",
4721 tracing_map_read_sum(elt, HITCOUNT_IDX));
4722
4723 for (i = 1; i < hist_data->n_vals; i++) {
4724 field_name = hist_field_name(hist_data->fields[i], 0);
4725
4726 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
4727 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
4728 continue;
4729
4730 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
4731 seq_printf(m, " %s: %10llx", field_name,
4732 tracing_map_read_sum(elt, i));
4733 } else {
4734 seq_printf(m, " %s: %10llu", field_name,
4735 tracing_map_read_sum(elt, i));
4736 }
4737 }
4738
4739 print_actions(m, hist_data, elt);
4740
4741 seq_puts(m, "\n");
4742 }
4743
print_entries(struct seq_file * m,struct hist_trigger_data * hist_data)4744 static int print_entries(struct seq_file *m,
4745 struct hist_trigger_data *hist_data)
4746 {
4747 struct tracing_map_sort_entry **sort_entries = NULL;
4748 struct tracing_map *map = hist_data->map;
4749 int i, n_entries;
4750
4751 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
4752 hist_data->n_sort_keys,
4753 &sort_entries);
4754 if (n_entries < 0)
4755 return n_entries;
4756
4757 for (i = 0; i < n_entries; i++)
4758 hist_trigger_entry_print(m, hist_data,
4759 sort_entries[i]->key,
4760 sort_entries[i]->elt);
4761
4762 tracing_map_destroy_sort_entries(sort_entries, n_entries);
4763
4764 return n_entries;
4765 }
4766
hist_trigger_show(struct seq_file * m,struct event_trigger_data * data,int n)4767 static void hist_trigger_show(struct seq_file *m,
4768 struct event_trigger_data *data, int n)
4769 {
4770 struct hist_trigger_data *hist_data;
4771 int n_entries;
4772
4773 if (n > 0)
4774 seq_puts(m, "\n\n");
4775
4776 seq_puts(m, "# event histogram\n#\n# trigger info: ");
4777 data->ops->print(m, data->ops, data);
4778 seq_puts(m, "#\n\n");
4779
4780 hist_data = data->private_data;
4781 n_entries = print_entries(m, hist_data);
4782 if (n_entries < 0)
4783 n_entries = 0;
4784
4785 track_data_snapshot_print(m, hist_data);
4786
4787 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
4788 (u64)atomic64_read(&hist_data->map->hits),
4789 n_entries, (u64)atomic64_read(&hist_data->map->drops));
4790 }
4791
hist_show(struct seq_file * m,void * v)4792 static int hist_show(struct seq_file *m, void *v)
4793 {
4794 struct event_trigger_data *data;
4795 struct trace_event_file *event_file;
4796 int n = 0, ret = 0;
4797
4798 mutex_lock(&event_mutex);
4799
4800 event_file = event_file_data(m->private);
4801 if (unlikely(!event_file)) {
4802 ret = -ENODEV;
4803 goto out_unlock;
4804 }
4805
4806 list_for_each_entry(data, &event_file->triggers, list) {
4807 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
4808 hist_trigger_show(m, data, n++);
4809 }
4810
4811 out_unlock:
4812 mutex_unlock(&event_mutex);
4813
4814 return ret;
4815 }
4816
event_hist_open(struct inode * inode,struct file * file)4817 static int event_hist_open(struct inode *inode, struct file *file)
4818 {
4819 int ret;
4820
4821 ret = security_locked_down(LOCKDOWN_TRACEFS);
4822 if (ret)
4823 return ret;
4824
4825 return single_open(file, hist_show, file);
4826 }
4827
4828 const struct file_operations event_hist_fops = {
4829 .open = event_hist_open,
4830 .read = seq_read,
4831 .llseek = seq_lseek,
4832 .release = single_release,
4833 };
4834
4835 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
hist_field_debug_show_flags(struct seq_file * m,unsigned long flags)4836 static void hist_field_debug_show_flags(struct seq_file *m,
4837 unsigned long flags)
4838 {
4839 seq_puts(m, " flags:\n");
4840
4841 if (flags & HIST_FIELD_FL_KEY)
4842 seq_puts(m, " HIST_FIELD_FL_KEY\n");
4843 else if (flags & HIST_FIELD_FL_HITCOUNT)
4844 seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n");
4845 else if (flags & HIST_FIELD_FL_VAR)
4846 seq_puts(m, " HIST_FIELD_FL_VAR\n");
4847 else if (flags & HIST_FIELD_FL_VAR_REF)
4848 seq_puts(m, " HIST_FIELD_FL_VAR_REF\n");
4849 else
4850 seq_puts(m, " VAL: normal u64 value\n");
4851
4852 if (flags & HIST_FIELD_FL_ALIAS)
4853 seq_puts(m, " HIST_FIELD_FL_ALIAS\n");
4854 }
4855
hist_field_debug_show(struct seq_file * m,struct hist_field * field,unsigned long flags)4856 static int hist_field_debug_show(struct seq_file *m,
4857 struct hist_field *field, unsigned long flags)
4858 {
4859 if ((field->flags & flags) != flags) {
4860 seq_printf(m, "ERROR: bad flags - %lx\n", flags);
4861 return -EINVAL;
4862 }
4863
4864 hist_field_debug_show_flags(m, field->flags);
4865 if (field->field)
4866 seq_printf(m, " ftrace_event_field name: %s\n",
4867 field->field->name);
4868
4869 if (field->flags & HIST_FIELD_FL_VAR) {
4870 seq_printf(m, " var.name: %s\n", field->var.name);
4871 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
4872 field->var.idx);
4873 }
4874
4875 if (field->flags & HIST_FIELD_FL_ALIAS)
4876 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4877 field->var_ref_idx);
4878
4879 if (field->flags & HIST_FIELD_FL_VAR_REF) {
4880 seq_printf(m, " name: %s\n", field->name);
4881 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
4882 field->var.idx);
4883 seq_printf(m, " var.hist_data: %p\n", field->var.hist_data);
4884 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4885 field->var_ref_idx);
4886 if (field->system)
4887 seq_printf(m, " system: %s\n", field->system);
4888 if (field->event_name)
4889 seq_printf(m, " event_name: %s\n", field->event_name);
4890 }
4891
4892 seq_printf(m, " type: %s\n", field->type);
4893 seq_printf(m, " size: %u\n", field->size);
4894 seq_printf(m, " is_signed: %u\n", field->is_signed);
4895
4896 return 0;
4897 }
4898
field_var_debug_show(struct seq_file * m,struct field_var * field_var,unsigned int i,bool save_vars)4899 static int field_var_debug_show(struct seq_file *m,
4900 struct field_var *field_var, unsigned int i,
4901 bool save_vars)
4902 {
4903 const char *vars_name = save_vars ? "save_vars" : "field_vars";
4904 struct hist_field *field;
4905 int ret = 0;
4906
4907 seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i);
4908
4909 field = field_var->var;
4910
4911 seq_printf(m, "\n %s[%d].var:\n", vars_name, i);
4912
4913 hist_field_debug_show_flags(m, field->flags);
4914 seq_printf(m, " var.name: %s\n", field->var.name);
4915 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
4916 field->var.idx);
4917
4918 field = field_var->val;
4919
4920 seq_printf(m, "\n %s[%d].val:\n", vars_name, i);
4921 if (field->field)
4922 seq_printf(m, " ftrace_event_field name: %s\n",
4923 field->field->name);
4924 else {
4925 ret = -EINVAL;
4926 goto out;
4927 }
4928
4929 seq_printf(m, " type: %s\n", field->type);
4930 seq_printf(m, " size: %u\n", field->size);
4931 seq_printf(m, " is_signed: %u\n", field->is_signed);
4932 out:
4933 return ret;
4934 }
4935
hist_action_debug_show(struct seq_file * m,struct action_data * data,int i)4936 static int hist_action_debug_show(struct seq_file *m,
4937 struct action_data *data, int i)
4938 {
4939 int ret = 0;
4940
4941 if (data->handler == HANDLER_ONMAX ||
4942 data->handler == HANDLER_ONCHANGE) {
4943 seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i);
4944 ret = hist_field_debug_show(m, data->track_data.var_ref,
4945 HIST_FIELD_FL_VAR_REF);
4946 if (ret)
4947 goto out;
4948
4949 seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i);
4950 ret = hist_field_debug_show(m, data->track_data.track_var,
4951 HIST_FIELD_FL_VAR);
4952 if (ret)
4953 goto out;
4954 }
4955
4956 if (data->handler == HANDLER_ONMATCH) {
4957 seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n",
4958 i, data->match_data.event_system);
4959 seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n",
4960 i, data->match_data.event);
4961 }
4962 out:
4963 return ret;
4964 }
4965
hist_actions_debug_show(struct seq_file * m,struct hist_trigger_data * hist_data)4966 static int hist_actions_debug_show(struct seq_file *m,
4967 struct hist_trigger_data *hist_data)
4968 {
4969 int i, ret = 0;
4970
4971 if (hist_data->n_actions)
4972 seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
4973
4974 for (i = 0; i < hist_data->n_actions; i++) {
4975 struct action_data *action = hist_data->actions[i];
4976
4977 ret = hist_action_debug_show(m, action, i);
4978 if (ret)
4979 goto out;
4980 }
4981
4982 if (hist_data->n_save_vars)
4983 seq_puts(m, "\n save action variables (save() params):\n");
4984
4985 for (i = 0; i < hist_data->n_save_vars; i++) {
4986 ret = field_var_debug_show(m, hist_data->save_vars[i], i, true);
4987 if (ret)
4988 goto out;
4989 }
4990 out:
4991 return ret;
4992 }
4993
hist_trigger_debug_show(struct seq_file * m,struct event_trigger_data * data,int n)4994 static void hist_trigger_debug_show(struct seq_file *m,
4995 struct event_trigger_data *data, int n)
4996 {
4997 struct hist_trigger_data *hist_data;
4998 int i, ret;
4999
5000 if (n > 0)
5001 seq_puts(m, "\n\n");
5002
5003 seq_puts(m, "# event histogram\n#\n# trigger info: ");
5004 data->ops->print(m, data->ops, data);
5005 seq_puts(m, "#\n\n");
5006
5007 hist_data = data->private_data;
5008
5009 seq_printf(m, "hist_data: %p\n\n", hist_data);
5010 seq_printf(m, " n_vals: %u\n", hist_data->n_vals);
5011 seq_printf(m, " n_keys: %u\n", hist_data->n_keys);
5012 seq_printf(m, " n_fields: %u\n", hist_data->n_fields);
5013
5014 seq_puts(m, "\n val fields:\n\n");
5015
5016 seq_puts(m, " hist_data->fields[0]:\n");
5017 ret = hist_field_debug_show(m, hist_data->fields[0],
5018 HIST_FIELD_FL_HITCOUNT);
5019 if (ret)
5020 return;
5021
5022 for (i = 1; i < hist_data->n_vals; i++) {
5023 seq_printf(m, "\n hist_data->fields[%d]:\n", i);
5024 ret = hist_field_debug_show(m, hist_data->fields[i], 0);
5025 if (ret)
5026 return;
5027 }
5028
5029 seq_puts(m, "\n key fields:\n");
5030
5031 for (i = hist_data->n_vals; i < hist_data->n_fields; i++) {
5032 seq_printf(m, "\n hist_data->fields[%d]:\n", i);
5033 ret = hist_field_debug_show(m, hist_data->fields[i],
5034 HIST_FIELD_FL_KEY);
5035 if (ret)
5036 return;
5037 }
5038
5039 if (hist_data->n_var_refs)
5040 seq_puts(m, "\n variable reference fields:\n");
5041
5042 for (i = 0; i < hist_data->n_var_refs; i++) {
5043 seq_printf(m, "\n hist_data->var_refs[%d]:\n", i);
5044 ret = hist_field_debug_show(m, hist_data->var_refs[i],
5045 HIST_FIELD_FL_VAR_REF);
5046 if (ret)
5047 return;
5048 }
5049
5050 if (hist_data->n_field_vars)
5051 seq_puts(m, "\n field variables:\n");
5052
5053 for (i = 0; i < hist_data->n_field_vars; i++) {
5054 ret = field_var_debug_show(m, hist_data->field_vars[i], i, false);
5055 if (ret)
5056 return;
5057 }
5058
5059 ret = hist_actions_debug_show(m, hist_data);
5060 if (ret)
5061 return;
5062 }
5063
hist_debug_show(struct seq_file * m,void * v)5064 static int hist_debug_show(struct seq_file *m, void *v)
5065 {
5066 struct event_trigger_data *data;
5067 struct trace_event_file *event_file;
5068 int n = 0, ret = 0;
5069
5070 mutex_lock(&event_mutex);
5071
5072 event_file = event_file_data(m->private);
5073 if (unlikely(!event_file)) {
5074 ret = -ENODEV;
5075 goto out_unlock;
5076 }
5077
5078 list_for_each_entry(data, &event_file->triggers, list) {
5079 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5080 hist_trigger_debug_show(m, data, n++);
5081 }
5082
5083 out_unlock:
5084 mutex_unlock(&event_mutex);
5085
5086 return ret;
5087 }
5088
event_hist_debug_open(struct inode * inode,struct file * file)5089 static int event_hist_debug_open(struct inode *inode, struct file *file)
5090 {
5091 int ret;
5092
5093 ret = security_locked_down(LOCKDOWN_TRACEFS);
5094 if (ret)
5095 return ret;
5096
5097 return single_open(file, hist_debug_show, file);
5098 }
5099
5100 const struct file_operations event_hist_debug_fops = {
5101 .open = event_hist_debug_open,
5102 .read = seq_read,
5103 .llseek = seq_lseek,
5104 .release = single_release,
5105 };
5106 #endif
5107
hist_field_print(struct seq_file * m,struct hist_field * hist_field)5108 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5109 {
5110 const char *field_name = hist_field_name(hist_field, 0);
5111
5112 if (hist_field->var.name)
5113 seq_printf(m, "%s=", hist_field->var.name);
5114
5115 if (hist_field->flags & HIST_FIELD_FL_CPU)
5116 seq_puts(m, "common_cpu");
5117 else if (field_name) {
5118 if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5119 hist_field->flags & HIST_FIELD_FL_ALIAS)
5120 seq_putc(m, '$');
5121 seq_printf(m, "%s", field_name);
5122 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5123 seq_puts(m, "common_timestamp");
5124
5125 if (hist_field->flags) {
5126 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5127 !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5128 const char *flags = get_hist_field_flags(hist_field);
5129
5130 if (flags)
5131 seq_printf(m, ".%s", flags);
5132 }
5133 }
5134 }
5135
event_hist_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)5136 static int event_hist_trigger_print(struct seq_file *m,
5137 struct event_trigger_ops *ops,
5138 struct event_trigger_data *data)
5139 {
5140 struct hist_trigger_data *hist_data = data->private_data;
5141 struct hist_field *field;
5142 bool have_var = false;
5143 unsigned int i;
5144
5145 seq_puts(m, "hist:");
5146
5147 if (data->name)
5148 seq_printf(m, "%s:", data->name);
5149
5150 seq_puts(m, "keys=");
5151
5152 for_each_hist_key_field(i, hist_data) {
5153 field = hist_data->fields[i];
5154
5155 if (i > hist_data->n_vals)
5156 seq_puts(m, ",");
5157
5158 if (field->flags & HIST_FIELD_FL_STACKTRACE)
5159 seq_puts(m, "stacktrace");
5160 else
5161 hist_field_print(m, field);
5162 }
5163
5164 seq_puts(m, ":vals=");
5165
5166 for_each_hist_val_field(i, hist_data) {
5167 field = hist_data->fields[i];
5168 if (field->flags & HIST_FIELD_FL_VAR) {
5169 have_var = true;
5170 continue;
5171 }
5172
5173 if (i == HITCOUNT_IDX)
5174 seq_puts(m, "hitcount");
5175 else {
5176 seq_puts(m, ",");
5177 hist_field_print(m, field);
5178 }
5179 }
5180
5181 if (have_var) {
5182 unsigned int n = 0;
5183
5184 seq_puts(m, ":");
5185
5186 for_each_hist_val_field(i, hist_data) {
5187 field = hist_data->fields[i];
5188
5189 if (field->flags & HIST_FIELD_FL_VAR) {
5190 if (n++)
5191 seq_puts(m, ",");
5192 hist_field_print(m, field);
5193 }
5194 }
5195 }
5196
5197 seq_puts(m, ":sort=");
5198
5199 for (i = 0; i < hist_data->n_sort_keys; i++) {
5200 struct tracing_map_sort_key *sort_key;
5201 unsigned int idx, first_key_idx;
5202
5203 /* skip VAR vals */
5204 first_key_idx = hist_data->n_vals - hist_data->n_vars;
5205
5206 sort_key = &hist_data->sort_keys[i];
5207 idx = sort_key->field_idx;
5208
5209 if (WARN_ON(idx >= HIST_FIELDS_MAX))
5210 return -EINVAL;
5211
5212 if (i > 0)
5213 seq_puts(m, ",");
5214
5215 if (idx == HITCOUNT_IDX)
5216 seq_puts(m, "hitcount");
5217 else {
5218 if (idx >= first_key_idx)
5219 idx += hist_data->n_vars;
5220 hist_field_print(m, hist_data->fields[idx]);
5221 }
5222
5223 if (sort_key->descending)
5224 seq_puts(m, ".descending");
5225 }
5226 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5227 if (hist_data->enable_timestamps)
5228 seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5229
5230 print_actions_spec(m, hist_data);
5231
5232 if (data->filter_str)
5233 seq_printf(m, " if %s", data->filter_str);
5234
5235 if (data->paused)
5236 seq_puts(m, " [paused]");
5237 else
5238 seq_puts(m, " [active]");
5239
5240 seq_putc(m, '\n');
5241
5242 return 0;
5243 }
5244
event_hist_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)5245 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5246 struct event_trigger_data *data)
5247 {
5248 struct hist_trigger_data *hist_data = data->private_data;
5249
5250 if (!data->ref && hist_data->attrs->name)
5251 save_named_trigger(hist_data->attrs->name, data);
5252
5253 data->ref++;
5254
5255 return 0;
5256 }
5257
unregister_field_var_hists(struct hist_trigger_data * hist_data)5258 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5259 {
5260 struct trace_event_file *file;
5261 unsigned int i;
5262 char *cmd;
5263 int ret;
5264
5265 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5266 file = hist_data->field_var_hists[i]->hist_data->event_file;
5267 cmd = hist_data->field_var_hists[i]->cmd;
5268 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5269 "!hist", "hist", cmd);
5270 }
5271 }
5272
event_hist_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)5273 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5274 struct event_trigger_data *data)
5275 {
5276 struct hist_trigger_data *hist_data = data->private_data;
5277
5278 if (WARN_ON_ONCE(data->ref <= 0))
5279 return;
5280
5281 data->ref--;
5282 if (!data->ref) {
5283 if (data->name)
5284 del_named_trigger(data);
5285
5286 trigger_data_free(data);
5287
5288 remove_hist_vars(hist_data);
5289
5290 unregister_field_var_hists(hist_data);
5291
5292 destroy_hist_data(hist_data);
5293 }
5294 }
5295
5296 static struct event_trigger_ops event_hist_trigger_ops = {
5297 .func = event_hist_trigger,
5298 .print = event_hist_trigger_print,
5299 .init = event_hist_trigger_init,
5300 .free = event_hist_trigger_free,
5301 };
5302
event_hist_trigger_named_init(struct event_trigger_ops * ops,struct event_trigger_data * data)5303 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5304 struct event_trigger_data *data)
5305 {
5306 data->ref++;
5307
5308 save_named_trigger(data->named_data->name, data);
5309
5310 event_hist_trigger_init(ops, data->named_data);
5311
5312 return 0;
5313 }
5314
event_hist_trigger_named_free(struct event_trigger_ops * ops,struct event_trigger_data * data)5315 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5316 struct event_trigger_data *data)
5317 {
5318 if (WARN_ON_ONCE(data->ref <= 0))
5319 return;
5320
5321 event_hist_trigger_free(ops, data->named_data);
5322
5323 data->ref--;
5324 if (!data->ref) {
5325 del_named_trigger(data);
5326 trigger_data_free(data);
5327 }
5328 }
5329
5330 static struct event_trigger_ops event_hist_trigger_named_ops = {
5331 .func = event_hist_trigger,
5332 .print = event_hist_trigger_print,
5333 .init = event_hist_trigger_named_init,
5334 .free = event_hist_trigger_named_free,
5335 };
5336
event_hist_get_trigger_ops(char * cmd,char * param)5337 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5338 char *param)
5339 {
5340 return &event_hist_trigger_ops;
5341 }
5342
hist_clear(struct event_trigger_data * data)5343 static void hist_clear(struct event_trigger_data *data)
5344 {
5345 struct hist_trigger_data *hist_data = data->private_data;
5346
5347 if (data->name)
5348 pause_named_trigger(data);
5349
5350 tracepoint_synchronize_unregister();
5351
5352 tracing_map_clear(hist_data->map);
5353
5354 if (data->name)
5355 unpause_named_trigger(data);
5356 }
5357
compatible_field(struct ftrace_event_field * field,struct ftrace_event_field * test_field)5358 static bool compatible_field(struct ftrace_event_field *field,
5359 struct ftrace_event_field *test_field)
5360 {
5361 if (field == test_field)
5362 return true;
5363 if (field == NULL || test_field == NULL)
5364 return false;
5365 if (strcmp(field->name, test_field->name) != 0)
5366 return false;
5367 if (strcmp(field->type, test_field->type) != 0)
5368 return false;
5369 if (field->size != test_field->size)
5370 return false;
5371 if (field->is_signed != test_field->is_signed)
5372 return false;
5373
5374 return true;
5375 }
5376
hist_trigger_match(struct event_trigger_data * data,struct event_trigger_data * data_test,struct event_trigger_data * named_data,bool ignore_filter)5377 static bool hist_trigger_match(struct event_trigger_data *data,
5378 struct event_trigger_data *data_test,
5379 struct event_trigger_data *named_data,
5380 bool ignore_filter)
5381 {
5382 struct tracing_map_sort_key *sort_key, *sort_key_test;
5383 struct hist_trigger_data *hist_data, *hist_data_test;
5384 struct hist_field *key_field, *key_field_test;
5385 unsigned int i;
5386
5387 if (named_data && (named_data != data_test) &&
5388 (named_data != data_test->named_data))
5389 return false;
5390
5391 if (!named_data && is_named_trigger(data_test))
5392 return false;
5393
5394 hist_data = data->private_data;
5395 hist_data_test = data_test->private_data;
5396
5397 if (hist_data->n_vals != hist_data_test->n_vals ||
5398 hist_data->n_fields != hist_data_test->n_fields ||
5399 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5400 return false;
5401
5402 if (!ignore_filter) {
5403 if ((data->filter_str && !data_test->filter_str) ||
5404 (!data->filter_str && data_test->filter_str))
5405 return false;
5406 }
5407
5408 for_each_hist_field(i, hist_data) {
5409 key_field = hist_data->fields[i];
5410 key_field_test = hist_data_test->fields[i];
5411
5412 if (key_field->flags != key_field_test->flags)
5413 return false;
5414 if (!compatible_field(key_field->field, key_field_test->field))
5415 return false;
5416 if (key_field->offset != key_field_test->offset)
5417 return false;
5418 if (key_field->size != key_field_test->size)
5419 return false;
5420 if (key_field->is_signed != key_field_test->is_signed)
5421 return false;
5422 if (!!key_field->var.name != !!key_field_test->var.name)
5423 return false;
5424 if (key_field->var.name &&
5425 strcmp(key_field->var.name, key_field_test->var.name) != 0)
5426 return false;
5427 }
5428
5429 for (i = 0; i < hist_data->n_sort_keys; i++) {
5430 sort_key = &hist_data->sort_keys[i];
5431 sort_key_test = &hist_data_test->sort_keys[i];
5432
5433 if (sort_key->field_idx != sort_key_test->field_idx ||
5434 sort_key->descending != sort_key_test->descending)
5435 return false;
5436 }
5437
5438 if (!ignore_filter && data->filter_str &&
5439 (strcmp(data->filter_str, data_test->filter_str) != 0))
5440 return false;
5441
5442 if (!actions_match(hist_data, hist_data_test))
5443 return false;
5444
5445 return true;
5446 }
5447
hist_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)5448 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5449 struct event_trigger_data *data,
5450 struct trace_event_file *file)
5451 {
5452 struct hist_trigger_data *hist_data = data->private_data;
5453 struct event_trigger_data *test, *named_data = NULL;
5454 struct trace_array *tr = file->tr;
5455 int ret = 0;
5456
5457 if (hist_data->attrs->name) {
5458 named_data = find_named_trigger(hist_data->attrs->name);
5459 if (named_data) {
5460 if (!hist_trigger_match(data, named_data, named_data,
5461 true)) {
5462 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5463 ret = -EINVAL;
5464 goto out;
5465 }
5466 }
5467 }
5468
5469 if (hist_data->attrs->name && !named_data)
5470 goto new;
5471
5472 lockdep_assert_held(&event_mutex);
5473
5474 list_for_each_entry(test, &file->triggers, list) {
5475 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5476 if (!hist_trigger_match(data, test, named_data, false))
5477 continue;
5478 if (hist_data->attrs->pause)
5479 test->paused = true;
5480 else if (hist_data->attrs->cont)
5481 test->paused = false;
5482 else if (hist_data->attrs->clear)
5483 hist_clear(test);
5484 else {
5485 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5486 ret = -EEXIST;
5487 }
5488 goto out;
5489 }
5490 }
5491 new:
5492 if (hist_data->attrs->cont || hist_data->attrs->clear) {
5493 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5494 ret = -ENOENT;
5495 goto out;
5496 }
5497
5498 if (hist_data->attrs->pause)
5499 data->paused = true;
5500
5501 if (named_data) {
5502 data->private_data = named_data->private_data;
5503 set_named_trigger_data(data, named_data);
5504 data->ops = &event_hist_trigger_named_ops;
5505 }
5506
5507 if (data->ops->init) {
5508 ret = data->ops->init(data->ops, data);
5509 if (ret < 0)
5510 goto out;
5511 }
5512
5513 if (hist_data->enable_timestamps) {
5514 char *clock = hist_data->attrs->clock;
5515
5516 ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5517 if (ret) {
5518 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
5519 goto out;
5520 }
5521
5522 tracing_set_time_stamp_abs(file->tr, true);
5523 }
5524
5525 if (named_data)
5526 destroy_hist_data(hist_data);
5527
5528 ret++;
5529 out:
5530 return ret;
5531 }
5532
hist_trigger_enable(struct event_trigger_data * data,struct trace_event_file * file)5533 static int hist_trigger_enable(struct event_trigger_data *data,
5534 struct trace_event_file *file)
5535 {
5536 int ret = 0;
5537
5538 list_add_tail_rcu(&data->list, &file->triggers);
5539
5540 update_cond_flag(file);
5541
5542 if (trace_event_trigger_enable_disable(file, 1) < 0) {
5543 list_del_rcu(&data->list);
5544 update_cond_flag(file);
5545 ret--;
5546 }
5547
5548 return ret;
5549 }
5550
have_hist_trigger_match(struct event_trigger_data * data,struct trace_event_file * file)5551 static bool have_hist_trigger_match(struct event_trigger_data *data,
5552 struct trace_event_file *file)
5553 {
5554 struct hist_trigger_data *hist_data = data->private_data;
5555 struct event_trigger_data *test, *named_data = NULL;
5556 bool match = false;
5557
5558 lockdep_assert_held(&event_mutex);
5559
5560 if (hist_data->attrs->name)
5561 named_data = find_named_trigger(hist_data->attrs->name);
5562
5563 list_for_each_entry(test, &file->triggers, list) {
5564 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5565 if (hist_trigger_match(data, test, named_data, false)) {
5566 match = true;
5567 break;
5568 }
5569 }
5570 }
5571
5572 return match;
5573 }
5574
hist_trigger_check_refs(struct event_trigger_data * data,struct trace_event_file * file)5575 static bool hist_trigger_check_refs(struct event_trigger_data *data,
5576 struct trace_event_file *file)
5577 {
5578 struct hist_trigger_data *hist_data = data->private_data;
5579 struct event_trigger_data *test, *named_data = NULL;
5580
5581 lockdep_assert_held(&event_mutex);
5582
5583 if (hist_data->attrs->name)
5584 named_data = find_named_trigger(hist_data->attrs->name);
5585
5586 list_for_each_entry(test, &file->triggers, list) {
5587 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5588 if (!hist_trigger_match(data, test, named_data, false))
5589 continue;
5590 hist_data = test->private_data;
5591 if (check_var_refs(hist_data))
5592 return true;
5593 break;
5594 }
5595 }
5596
5597 return false;
5598 }
5599
hist_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)5600 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
5601 struct event_trigger_data *data,
5602 struct trace_event_file *file)
5603 {
5604 struct hist_trigger_data *hist_data = data->private_data;
5605 struct event_trigger_data *test, *named_data = NULL;
5606 bool unregistered = false;
5607
5608 lockdep_assert_held(&event_mutex);
5609
5610 if (hist_data->attrs->name)
5611 named_data = find_named_trigger(hist_data->attrs->name);
5612
5613 list_for_each_entry(test, &file->triggers, list) {
5614 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5615 if (!hist_trigger_match(data, test, named_data, false))
5616 continue;
5617 unregistered = true;
5618 list_del_rcu(&test->list);
5619 trace_event_trigger_enable_disable(file, 0);
5620 update_cond_flag(file);
5621 break;
5622 }
5623 }
5624
5625 if (unregistered && test->ops->free)
5626 test->ops->free(test->ops, test);
5627
5628 if (hist_data->enable_timestamps) {
5629 if (!hist_data->remove || unregistered)
5630 tracing_set_time_stamp_abs(file->tr, false);
5631 }
5632 }
5633
hist_file_check_refs(struct trace_event_file * file)5634 static bool hist_file_check_refs(struct trace_event_file *file)
5635 {
5636 struct hist_trigger_data *hist_data;
5637 struct event_trigger_data *test;
5638
5639 lockdep_assert_held(&event_mutex);
5640
5641 list_for_each_entry(test, &file->triggers, list) {
5642 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5643 hist_data = test->private_data;
5644 if (check_var_refs(hist_data))
5645 return true;
5646 }
5647 }
5648
5649 return false;
5650 }
5651
hist_unreg_all(struct trace_event_file * file)5652 static void hist_unreg_all(struct trace_event_file *file)
5653 {
5654 struct event_trigger_data *test, *n;
5655 struct hist_trigger_data *hist_data;
5656 struct synth_event *se;
5657 const char *se_name;
5658
5659 lockdep_assert_held(&event_mutex);
5660
5661 if (hist_file_check_refs(file))
5662 return;
5663
5664 list_for_each_entry_safe(test, n, &file->triggers, list) {
5665 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5666 hist_data = test->private_data;
5667 list_del_rcu(&test->list);
5668 trace_event_trigger_enable_disable(file, 0);
5669
5670 se_name = trace_event_name(file->event_call);
5671 se = find_synth_event(se_name);
5672 if (se)
5673 se->ref--;
5674
5675 update_cond_flag(file);
5676 if (hist_data->enable_timestamps)
5677 tracing_set_time_stamp_abs(file->tr, false);
5678 if (test->ops->free)
5679 test->ops->free(test->ops, test);
5680 }
5681 }
5682 }
5683
event_hist_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)5684 static int event_hist_trigger_func(struct event_command *cmd_ops,
5685 struct trace_event_file *file,
5686 char *glob, char *cmd, char *param)
5687 {
5688 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
5689 struct event_trigger_data *trigger_data;
5690 struct hist_trigger_attrs *attrs;
5691 struct event_trigger_ops *trigger_ops;
5692 struct hist_trigger_data *hist_data;
5693 struct synth_event *se;
5694 const char *se_name;
5695 bool remove = false;
5696 char *trigger, *p;
5697 int ret = 0;
5698
5699 lockdep_assert_held(&event_mutex);
5700
5701 if (glob && strlen(glob)) {
5702 hist_err_clear();
5703 last_cmd_set(file, param);
5704 }
5705
5706 if (!param)
5707 return -EINVAL;
5708
5709 if (glob[0] == '!')
5710 remove = true;
5711
5712 /*
5713 * separate the trigger from the filter (k:v [if filter])
5714 * allowing for whitespace in the trigger
5715 */
5716 p = trigger = param;
5717 do {
5718 p = strstr(p, "if");
5719 if (!p)
5720 break;
5721 if (p == param)
5722 return -EINVAL;
5723 if (*(p - 1) != ' ' && *(p - 1) != '\t') {
5724 p++;
5725 continue;
5726 }
5727 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
5728 return -EINVAL;
5729 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
5730 p++;
5731 continue;
5732 }
5733 break;
5734 } while (p);
5735
5736 if (!p)
5737 param = NULL;
5738 else {
5739 *(p - 1) = '\0';
5740 param = strstrip(p);
5741 trigger = strstrip(trigger);
5742 }
5743
5744 attrs = parse_hist_trigger_attrs(file->tr, trigger);
5745 if (IS_ERR(attrs))
5746 return PTR_ERR(attrs);
5747
5748 if (attrs->map_bits)
5749 hist_trigger_bits = attrs->map_bits;
5750
5751 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
5752 if (IS_ERR(hist_data)) {
5753 destroy_hist_trigger_attrs(attrs);
5754 return PTR_ERR(hist_data);
5755 }
5756
5757 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
5758
5759 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
5760 if (!trigger_data) {
5761 ret = -ENOMEM;
5762 goto out_free;
5763 }
5764
5765 trigger_data->count = -1;
5766 trigger_data->ops = trigger_ops;
5767 trigger_data->cmd_ops = cmd_ops;
5768
5769 INIT_LIST_HEAD(&trigger_data->list);
5770 RCU_INIT_POINTER(trigger_data->filter, NULL);
5771
5772 trigger_data->private_data = hist_data;
5773
5774 /* if param is non-empty, it's supposed to be a filter */
5775 if (param && cmd_ops->set_filter) {
5776 ret = cmd_ops->set_filter(param, trigger_data, file);
5777 if (ret < 0)
5778 goto out_free;
5779 }
5780
5781 if (remove) {
5782 if (!have_hist_trigger_match(trigger_data, file))
5783 goto out_free;
5784
5785 if (hist_trigger_check_refs(trigger_data, file)) {
5786 ret = -EBUSY;
5787 goto out_free;
5788 }
5789
5790 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5791 se_name = trace_event_name(file->event_call);
5792 se = find_synth_event(se_name);
5793 if (se)
5794 se->ref--;
5795 ret = 0;
5796 goto out_free;
5797 }
5798
5799 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
5800 /*
5801 * The above returns on success the # of triggers registered,
5802 * but if it didn't register any it returns zero. Consider no
5803 * triggers registered a failure too.
5804 */
5805 if (!ret) {
5806 if (!(attrs->pause || attrs->cont || attrs->clear))
5807 ret = -ENOENT;
5808 goto out_free;
5809 } else if (ret < 0)
5810 goto out_free;
5811
5812 if (get_named_trigger_data(trigger_data))
5813 goto enable;
5814
5815 if (has_hist_vars(hist_data))
5816 save_hist_vars(hist_data);
5817
5818 ret = create_actions(hist_data);
5819 if (ret)
5820 goto out_unreg;
5821
5822 ret = tracing_map_init(hist_data->map);
5823 if (ret)
5824 goto out_unreg;
5825 enable:
5826 ret = hist_trigger_enable(trigger_data, file);
5827 if (ret)
5828 goto out_unreg;
5829
5830 se_name = trace_event_name(file->event_call);
5831 se = find_synth_event(se_name);
5832 if (se)
5833 se->ref++;
5834 /* Just return zero, not the number of registered triggers */
5835 ret = 0;
5836 out:
5837 if (ret == 0 && glob[0])
5838 hist_err_clear();
5839
5840 return ret;
5841 out_unreg:
5842 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5843 out_free:
5844 if (cmd_ops->set_filter)
5845 cmd_ops->set_filter(NULL, trigger_data, NULL);
5846
5847 remove_hist_vars(hist_data);
5848
5849 kfree(trigger_data);
5850
5851 destroy_hist_data(hist_data);
5852 goto out;
5853 }
5854
5855 static struct event_command trigger_hist_cmd = {
5856 .name = "hist",
5857 .trigger_type = ETT_EVENT_HIST,
5858 .flags = EVENT_CMD_FL_NEEDS_REC,
5859 .func = event_hist_trigger_func,
5860 .reg = hist_register_trigger,
5861 .unreg = hist_unregister_trigger,
5862 .unreg_all = hist_unreg_all,
5863 .get_trigger_ops = event_hist_get_trigger_ops,
5864 .set_filter = set_trigger_filter,
5865 };
5866
register_trigger_hist_cmd(void)5867 __init int register_trigger_hist_cmd(void)
5868 {
5869 int ret;
5870
5871 ret = register_event_command(&trigger_hist_cmd);
5872 WARN_ON(ret < 0);
5873
5874 return ret;
5875 }
5876
5877 static void
hist_enable_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)5878 hist_enable_trigger(struct event_trigger_data *data, void *rec,
5879 struct ring_buffer_event *event)
5880 {
5881 struct enable_trigger_data *enable_data = data->private_data;
5882 struct event_trigger_data *test;
5883
5884 list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
5885 lockdep_is_held(&event_mutex)) {
5886 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5887 if (enable_data->enable)
5888 test->paused = false;
5889 else
5890 test->paused = true;
5891 }
5892 }
5893 }
5894
5895 static void
hist_enable_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)5896 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
5897 struct ring_buffer_event *event)
5898 {
5899 if (!data->count)
5900 return;
5901
5902 if (data->count != -1)
5903 (data->count)--;
5904
5905 hist_enable_trigger(data, rec, event);
5906 }
5907
5908 static struct event_trigger_ops hist_enable_trigger_ops = {
5909 .func = hist_enable_trigger,
5910 .print = event_enable_trigger_print,
5911 .init = event_trigger_init,
5912 .free = event_enable_trigger_free,
5913 };
5914
5915 static struct event_trigger_ops hist_enable_count_trigger_ops = {
5916 .func = hist_enable_count_trigger,
5917 .print = event_enable_trigger_print,
5918 .init = event_trigger_init,
5919 .free = event_enable_trigger_free,
5920 };
5921
5922 static struct event_trigger_ops hist_disable_trigger_ops = {
5923 .func = hist_enable_trigger,
5924 .print = event_enable_trigger_print,
5925 .init = event_trigger_init,
5926 .free = event_enable_trigger_free,
5927 };
5928
5929 static struct event_trigger_ops hist_disable_count_trigger_ops = {
5930 .func = hist_enable_count_trigger,
5931 .print = event_enable_trigger_print,
5932 .init = event_trigger_init,
5933 .free = event_enable_trigger_free,
5934 };
5935
5936 static struct event_trigger_ops *
hist_enable_get_trigger_ops(char * cmd,char * param)5937 hist_enable_get_trigger_ops(char *cmd, char *param)
5938 {
5939 struct event_trigger_ops *ops;
5940 bool enable;
5941
5942 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
5943
5944 if (enable)
5945 ops = param ? &hist_enable_count_trigger_ops :
5946 &hist_enable_trigger_ops;
5947 else
5948 ops = param ? &hist_disable_count_trigger_ops :
5949 &hist_disable_trigger_ops;
5950
5951 return ops;
5952 }
5953
hist_enable_unreg_all(struct trace_event_file * file)5954 static void hist_enable_unreg_all(struct trace_event_file *file)
5955 {
5956 struct event_trigger_data *test, *n;
5957
5958 list_for_each_entry_safe(test, n, &file->triggers, list) {
5959 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
5960 list_del_rcu(&test->list);
5961 update_cond_flag(file);
5962 trace_event_trigger_enable_disable(file, 0);
5963 if (test->ops->free)
5964 test->ops->free(test->ops, test);
5965 }
5966 }
5967 }
5968
5969 static struct event_command trigger_hist_enable_cmd = {
5970 .name = ENABLE_HIST_STR,
5971 .trigger_type = ETT_HIST_ENABLE,
5972 .func = event_enable_trigger_func,
5973 .reg = event_enable_register_trigger,
5974 .unreg = event_enable_unregister_trigger,
5975 .unreg_all = hist_enable_unreg_all,
5976 .get_trigger_ops = hist_enable_get_trigger_ops,
5977 .set_filter = set_trigger_filter,
5978 };
5979
5980 static struct event_command trigger_hist_disable_cmd = {
5981 .name = DISABLE_HIST_STR,
5982 .trigger_type = ETT_HIST_ENABLE,
5983 .func = event_enable_trigger_func,
5984 .reg = event_enable_register_trigger,
5985 .unreg = event_enable_unregister_trigger,
5986 .unreg_all = hist_enable_unreg_all,
5987 .get_trigger_ops = hist_enable_get_trigger_ops,
5988 .set_filter = set_trigger_filter,
5989 };
5990
unregister_trigger_hist_enable_disable_cmds(void)5991 static __init void unregister_trigger_hist_enable_disable_cmds(void)
5992 {
5993 unregister_event_command(&trigger_hist_enable_cmd);
5994 unregister_event_command(&trigger_hist_disable_cmd);
5995 }
5996
register_trigger_hist_enable_disable_cmds(void)5997 __init int register_trigger_hist_enable_disable_cmds(void)
5998 {
5999 int ret;
6000
6001 ret = register_event_command(&trigger_hist_enable_cmd);
6002 if (WARN_ON(ret < 0))
6003 return ret;
6004 ret = register_event_command(&trigger_hist_disable_cmd);
6005 if (WARN_ON(ret < 0))
6006 unregister_trigger_hist_enable_disable_cmds();
6007
6008 return ret;
6009 }
6010