1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_hist - trace event hist triggers
4 *
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20
21 #include "tracing_map.h"
22 #include "trace_synth.h"
23
24 #define ERRORS \
25 C(NONE, "No error"), \
26 C(DUPLICATE_VAR, "Variable already defined"), \
27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 C(TOO_MANY_VARS, "Too many variables defined"), \
29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
51 C(TOO_MANY_PARAMS, "Too many action params"), \
52 C(PARAM_NOT_FOUND, "Couldn't find param"), \
53 C(INVALID_PARAM, "Invalid action param"), \
54 C(ACTION_NOT_FOUND, "No action found"), \
55 C(NO_SAVE_PARAMS, "No params found for save()"), \
56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 C(ACTION_MISMATCH, "Handler doesn't support action"), \
58 C(NO_CLOSING_PAREN, "No closing paren found"), \
59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
62 C(VAR_NOT_FOUND, "Couldn't find variable"), \
63 C(FIELD_NOT_FOUND, "Couldn't find field"), \
64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
66 C(EMPTY_SORT_FIELD, "Empty sort field"), \
67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"),
70
71 #undef C
72 #define C(a, b) HIST_ERR_##a
73
74 enum { ERRORS };
75
76 #undef C
77 #define C(a, b) b
78
79 static const char *err_text[] = { ERRORS };
80
81 struct hist_field;
82
83 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
84 struct tracing_map_elt *elt,
85 struct ring_buffer_event *rbe,
86 void *event);
87
88 #define HIST_FIELD_OPERANDS_MAX 2
89 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
90 #define HIST_ACTIONS_MAX 8
91
92 enum field_op_id {
93 FIELD_OP_NONE,
94 FIELD_OP_PLUS,
95 FIELD_OP_MINUS,
96 FIELD_OP_UNARY_MINUS,
97 };
98
99 /*
100 * A hist_var (histogram variable) contains variable information for
101 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
102 * flag set. A hist_var has a variable name e.g. ts0, and is
103 * associated with a given histogram trigger, as specified by
104 * hist_data. The hist_var idx is the unique index assigned to the
105 * variable by the hist trigger's tracing_map. The idx is what is
106 * used to set a variable's value and, by a variable reference, to
107 * retrieve it.
108 */
109 struct hist_var {
110 char *name;
111 struct hist_trigger_data *hist_data;
112 unsigned int idx;
113 };
114
115 struct hist_field {
116 struct ftrace_event_field *field;
117 unsigned long flags;
118 hist_field_fn_t fn;
119 unsigned int ref;
120 unsigned int size;
121 unsigned int offset;
122 unsigned int is_signed;
123 const char *type;
124 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
125 struct hist_trigger_data *hist_data;
126
127 /*
128 * Variable fields contain variable-specific info in var.
129 */
130 struct hist_var var;
131 enum field_op_id operator;
132 char *system;
133 char *event_name;
134
135 /*
136 * The name field is used for EXPR and VAR_REF fields. VAR
137 * fields contain the variable name in var.name.
138 */
139 char *name;
140
141 /*
142 * When a histogram trigger is hit, if it has any references
143 * to variables, the values of those variables are collected
144 * into a var_ref_vals array by resolve_var_refs(). The
145 * current value of each variable is read from the tracing_map
146 * using the hist field's hist_var.idx and entered into the
147 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
148 */
149 unsigned int var_ref_idx;
150 bool read_once;
151
152 unsigned int var_str_idx;
153 };
154
hist_field_none(struct hist_field * field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)155 static u64 hist_field_none(struct hist_field *field,
156 struct tracing_map_elt *elt,
157 struct ring_buffer_event *rbe,
158 void *event)
159 {
160 return 0;
161 }
162
hist_field_counter(struct hist_field * field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)163 static u64 hist_field_counter(struct hist_field *field,
164 struct tracing_map_elt *elt,
165 struct ring_buffer_event *rbe,
166 void *event)
167 {
168 return 1;
169 }
170
hist_field_string(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)171 static u64 hist_field_string(struct hist_field *hist_field,
172 struct tracing_map_elt *elt,
173 struct ring_buffer_event *rbe,
174 void *event)
175 {
176 char *addr = (char *)(event + hist_field->field->offset);
177
178 return (u64)(unsigned long)addr;
179 }
180
hist_field_dynstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)181 static u64 hist_field_dynstring(struct hist_field *hist_field,
182 struct tracing_map_elt *elt,
183 struct ring_buffer_event *rbe,
184 void *event)
185 {
186 u32 str_item = *(u32 *)(event + hist_field->field->offset);
187 int str_loc = str_item & 0xffff;
188 char *addr = (char *)(event + str_loc);
189
190 return (u64)(unsigned long)addr;
191 }
192
hist_field_pstring(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)193 static u64 hist_field_pstring(struct hist_field *hist_field,
194 struct tracing_map_elt *elt,
195 struct ring_buffer_event *rbe,
196 void *event)
197 {
198 char **addr = (char **)(event + hist_field->field->offset);
199
200 return (u64)(unsigned long)*addr;
201 }
202
hist_field_log2(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)203 static u64 hist_field_log2(struct hist_field *hist_field,
204 struct tracing_map_elt *elt,
205 struct ring_buffer_event *rbe,
206 void *event)
207 {
208 struct hist_field *operand = hist_field->operands[0];
209
210 u64 val = operand->fn(operand, elt, rbe, event);
211
212 return (u64) ilog2(roundup_pow_of_two(val));
213 }
214
hist_field_plus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)215 static u64 hist_field_plus(struct hist_field *hist_field,
216 struct tracing_map_elt *elt,
217 struct ring_buffer_event *rbe,
218 void *event)
219 {
220 struct hist_field *operand1 = hist_field->operands[0];
221 struct hist_field *operand2 = hist_field->operands[1];
222
223 u64 val1 = operand1->fn(operand1, elt, rbe, event);
224 u64 val2 = operand2->fn(operand2, elt, rbe, event);
225
226 return val1 + val2;
227 }
228
hist_field_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)229 static u64 hist_field_minus(struct hist_field *hist_field,
230 struct tracing_map_elt *elt,
231 struct ring_buffer_event *rbe,
232 void *event)
233 {
234 struct hist_field *operand1 = hist_field->operands[0];
235 struct hist_field *operand2 = hist_field->operands[1];
236
237 u64 val1 = operand1->fn(operand1, elt, rbe, event);
238 u64 val2 = operand2->fn(operand2, elt, rbe, event);
239
240 return val1 - val2;
241 }
242
hist_field_unary_minus(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)243 static u64 hist_field_unary_minus(struct hist_field *hist_field,
244 struct tracing_map_elt *elt,
245 struct ring_buffer_event *rbe,
246 void *event)
247 {
248 struct hist_field *operand = hist_field->operands[0];
249
250 s64 sval = (s64)operand->fn(operand, elt, rbe, event);
251 u64 val = (u64)-sval;
252
253 return val;
254 }
255
256 #define DEFINE_HIST_FIELD_FN(type) \
257 static u64 hist_field_##type(struct hist_field *hist_field, \
258 struct tracing_map_elt *elt, \
259 struct ring_buffer_event *rbe, \
260 void *event) \
261 { \
262 type *addr = (type *)(event + hist_field->field->offset); \
263 \
264 return (u64)(unsigned long)*addr; \
265 }
266
267 DEFINE_HIST_FIELD_FN(s64);
268 DEFINE_HIST_FIELD_FN(u64);
269 DEFINE_HIST_FIELD_FN(s32);
270 DEFINE_HIST_FIELD_FN(u32);
271 DEFINE_HIST_FIELD_FN(s16);
272 DEFINE_HIST_FIELD_FN(u16);
273 DEFINE_HIST_FIELD_FN(s8);
274 DEFINE_HIST_FIELD_FN(u8);
275
276 #define for_each_hist_field(i, hist_data) \
277 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
278
279 #define for_each_hist_val_field(i, hist_data) \
280 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
281
282 #define for_each_hist_key_field(i, hist_data) \
283 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
284
285 #define HIST_STACKTRACE_DEPTH 16
286 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
287 #define HIST_STACKTRACE_SKIP 5
288
289 #define HITCOUNT_IDX 0
290 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
291
292 enum hist_field_flags {
293 HIST_FIELD_FL_HITCOUNT = 1 << 0,
294 HIST_FIELD_FL_KEY = 1 << 1,
295 HIST_FIELD_FL_STRING = 1 << 2,
296 HIST_FIELD_FL_HEX = 1 << 3,
297 HIST_FIELD_FL_SYM = 1 << 4,
298 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
299 HIST_FIELD_FL_EXECNAME = 1 << 6,
300 HIST_FIELD_FL_SYSCALL = 1 << 7,
301 HIST_FIELD_FL_STACKTRACE = 1 << 8,
302 HIST_FIELD_FL_LOG2 = 1 << 9,
303 HIST_FIELD_FL_TIMESTAMP = 1 << 10,
304 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
305 HIST_FIELD_FL_VAR = 1 << 12,
306 HIST_FIELD_FL_EXPR = 1 << 13,
307 HIST_FIELD_FL_VAR_REF = 1 << 14,
308 HIST_FIELD_FL_CPU = 1 << 15,
309 HIST_FIELD_FL_ALIAS = 1 << 16,
310 };
311
312 struct var_defs {
313 unsigned int n_vars;
314 char *name[TRACING_MAP_VARS_MAX];
315 char *expr[TRACING_MAP_VARS_MAX];
316 };
317
318 struct hist_trigger_attrs {
319 char *keys_str;
320 char *vals_str;
321 char *sort_key_str;
322 char *name;
323 char *clock;
324 bool pause;
325 bool cont;
326 bool clear;
327 bool ts_in_usecs;
328 unsigned int map_bits;
329
330 char *assignment_str[TRACING_MAP_VARS_MAX];
331 unsigned int n_assignments;
332
333 char *action_str[HIST_ACTIONS_MAX];
334 unsigned int n_actions;
335
336 struct var_defs var_defs;
337 };
338
339 struct field_var {
340 struct hist_field *var;
341 struct hist_field *val;
342 };
343
344 struct field_var_hist {
345 struct hist_trigger_data *hist_data;
346 char *cmd;
347 };
348
349 struct hist_trigger_data {
350 struct hist_field *fields[HIST_FIELDS_MAX];
351 unsigned int n_vals;
352 unsigned int n_keys;
353 unsigned int n_fields;
354 unsigned int n_vars;
355 unsigned int n_var_str;
356 unsigned int key_size;
357 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
358 unsigned int n_sort_keys;
359 struct trace_event_file *event_file;
360 struct hist_trigger_attrs *attrs;
361 struct tracing_map *map;
362 bool enable_timestamps;
363 bool remove;
364 struct hist_field *var_refs[TRACING_MAP_VARS_MAX];
365 unsigned int n_var_refs;
366
367 struct action_data *actions[HIST_ACTIONS_MAX];
368 unsigned int n_actions;
369
370 struct field_var *field_vars[SYNTH_FIELDS_MAX];
371 unsigned int n_field_vars;
372 unsigned int n_field_var_str;
373 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
374 unsigned int n_field_var_hists;
375
376 struct field_var *save_vars[SYNTH_FIELDS_MAX];
377 unsigned int n_save_vars;
378 unsigned int n_save_var_str;
379 };
380
381 struct action_data;
382
383 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
384 struct tracing_map_elt *elt, void *rec,
385 struct ring_buffer_event *rbe, void *key,
386 struct action_data *data, u64 *var_ref_vals);
387
388 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
389
390 enum handler_id {
391 HANDLER_ONMATCH = 1,
392 HANDLER_ONMAX,
393 HANDLER_ONCHANGE,
394 };
395
396 enum action_id {
397 ACTION_SAVE = 1,
398 ACTION_TRACE,
399 ACTION_SNAPSHOT,
400 };
401
402 struct action_data {
403 enum handler_id handler;
404 enum action_id action;
405 char *action_name;
406 action_fn_t fn;
407
408 unsigned int n_params;
409 char *params[SYNTH_FIELDS_MAX];
410
411 /*
412 * When a histogram trigger is hit, the values of any
413 * references to variables, including variables being passed
414 * as parameters to synthetic events, are collected into a
415 * var_ref_vals array. This var_ref_idx array is an array of
416 * indices into the var_ref_vals array, one for each synthetic
417 * event param, and is passed to the synthetic event
418 * invocation.
419 */
420 unsigned int var_ref_idx[SYNTH_FIELDS_MAX];
421 struct synth_event *synth_event;
422 bool use_trace_keyword;
423 char *synth_event_name;
424
425 union {
426 struct {
427 char *event;
428 char *event_system;
429 } match_data;
430
431 struct {
432 /*
433 * var_str contains the $-unstripped variable
434 * name referenced by var_ref, and used when
435 * printing the action. Because var_ref
436 * creation is deferred to create_actions(),
437 * we need a per-action way to save it until
438 * then, thus var_str.
439 */
440 char *var_str;
441
442 /*
443 * var_ref refers to the variable being
444 * tracked e.g onmax($var).
445 */
446 struct hist_field *var_ref;
447
448 /*
449 * track_var contains the 'invisible' tracking
450 * variable created to keep the current
451 * e.g. max value.
452 */
453 struct hist_field *track_var;
454
455 check_track_val_fn_t check_val;
456 action_fn_t save_data;
457 } track_data;
458 };
459 };
460
461 struct track_data {
462 u64 track_val;
463 bool updated;
464
465 unsigned int key_len;
466 void *key;
467 struct tracing_map_elt elt;
468
469 struct action_data *action_data;
470 struct hist_trigger_data *hist_data;
471 };
472
473 struct hist_elt_data {
474 char *comm;
475 u64 *var_ref_vals;
476 char *field_var_str[SYNTH_FIELDS_MAX];
477 };
478
479 struct snapshot_context {
480 struct tracing_map_elt *elt;
481 void *key;
482 };
483
track_data_free(struct track_data * track_data)484 static void track_data_free(struct track_data *track_data)
485 {
486 struct hist_elt_data *elt_data;
487
488 if (!track_data)
489 return;
490
491 kfree(track_data->key);
492
493 elt_data = track_data->elt.private_data;
494 if (elt_data) {
495 kfree(elt_data->comm);
496 kfree(elt_data);
497 }
498
499 kfree(track_data);
500 }
501
track_data_alloc(unsigned int key_len,struct action_data * action_data,struct hist_trigger_data * hist_data)502 static struct track_data *track_data_alloc(unsigned int key_len,
503 struct action_data *action_data,
504 struct hist_trigger_data *hist_data)
505 {
506 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
507 struct hist_elt_data *elt_data;
508
509 if (!data)
510 return ERR_PTR(-ENOMEM);
511
512 data->key = kzalloc(key_len, GFP_KERNEL);
513 if (!data->key) {
514 track_data_free(data);
515 return ERR_PTR(-ENOMEM);
516 }
517
518 data->key_len = key_len;
519 data->action_data = action_data;
520 data->hist_data = hist_data;
521
522 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
523 if (!elt_data) {
524 track_data_free(data);
525 return ERR_PTR(-ENOMEM);
526 }
527
528 data->elt.private_data = elt_data;
529
530 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
531 if (!elt_data->comm) {
532 track_data_free(data);
533 return ERR_PTR(-ENOMEM);
534 }
535
536 return data;
537 }
538
539 static char last_cmd[MAX_FILTER_STR_VAL];
540 static char last_cmd_loc[MAX_FILTER_STR_VAL];
541
errpos(char * str)542 static int errpos(char *str)
543 {
544 return err_pos(last_cmd, str);
545 }
546
last_cmd_set(struct trace_event_file * file,char * str)547 static void last_cmd_set(struct trace_event_file *file, char *str)
548 {
549 const char *system = NULL, *name = NULL;
550 struct trace_event_call *call;
551
552 if (!str)
553 return;
554
555 strcpy(last_cmd, "hist:");
556 strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
557
558 if (file) {
559 call = file->event_call;
560 system = call->class->system;
561 if (system) {
562 name = trace_event_name(call);
563 if (!name)
564 system = NULL;
565 }
566 }
567
568 if (system)
569 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
570 }
571
hist_err(struct trace_array * tr,u8 err_type,u8 err_pos)572 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
573 {
574 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
575 err_type, err_pos);
576 }
577
hist_err_clear(void)578 static void hist_err_clear(void)
579 {
580 last_cmd[0] = '\0';
581 last_cmd_loc[0] = '\0';
582 }
583
584 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
585 unsigned int *var_ref_idx);
586
trace_synth(struct synth_event * event,u64 * var_ref_vals,unsigned int * var_ref_idx)587 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
588 unsigned int *var_ref_idx)
589 {
590 struct tracepoint *tp = event->tp;
591
592 if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
593 struct tracepoint_func *probe_func_ptr;
594 synth_probe_func_t probe_func;
595 void *__data;
596
597 if (!(cpu_online(raw_smp_processor_id())))
598 return;
599
600 probe_func_ptr = rcu_dereference_sched((tp)->funcs);
601 if (probe_func_ptr) {
602 do {
603 probe_func = probe_func_ptr->func;
604 __data = probe_func_ptr->data;
605 probe_func(__data, var_ref_vals, var_ref_idx);
606 } while ((++probe_func_ptr)->func);
607 }
608 }
609 }
610
action_trace(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)611 static void action_trace(struct hist_trigger_data *hist_data,
612 struct tracing_map_elt *elt, void *rec,
613 struct ring_buffer_event *rbe, void *key,
614 struct action_data *data, u64 *var_ref_vals)
615 {
616 struct synth_event *event = data->synth_event;
617
618 trace_synth(event, var_ref_vals, data->var_ref_idx);
619 }
620
621 struct hist_var_data {
622 struct list_head list;
623 struct hist_trigger_data *hist_data;
624 };
625
hist_field_timestamp(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)626 static u64 hist_field_timestamp(struct hist_field *hist_field,
627 struct tracing_map_elt *elt,
628 struct ring_buffer_event *rbe,
629 void *event)
630 {
631 struct hist_trigger_data *hist_data = hist_field->hist_data;
632 struct trace_array *tr = hist_data->event_file->tr;
633
634 u64 ts = ring_buffer_event_time_stamp(rbe);
635
636 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
637 ts = ns2usecs(ts);
638
639 return ts;
640 }
641
hist_field_cpu(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)642 static u64 hist_field_cpu(struct hist_field *hist_field,
643 struct tracing_map_elt *elt,
644 struct ring_buffer_event *rbe,
645 void *event)
646 {
647 int cpu = smp_processor_id();
648
649 return cpu;
650 }
651
652 /**
653 * check_field_for_var_ref - Check if a VAR_REF field references a variable
654 * @hist_field: The VAR_REF field to check
655 * @var_data: The hist trigger that owns the variable
656 * @var_idx: The trigger variable identifier
657 *
658 * Check the given VAR_REF field to see whether or not it references
659 * the given variable associated with the given trigger.
660 *
661 * Return: The VAR_REF field if it does reference the variable, NULL if not
662 */
663 static struct hist_field *
check_field_for_var_ref(struct hist_field * hist_field,struct hist_trigger_data * var_data,unsigned int var_idx)664 check_field_for_var_ref(struct hist_field *hist_field,
665 struct hist_trigger_data *var_data,
666 unsigned int var_idx)
667 {
668 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
669
670 if (hist_field && hist_field->var.idx == var_idx &&
671 hist_field->var.hist_data == var_data)
672 return hist_field;
673
674 return NULL;
675 }
676
677 /**
678 * find_var_ref - Check if a trigger has a reference to a trigger variable
679 * @hist_data: The hist trigger that might have a reference to the variable
680 * @var_data: The hist trigger that owns the variable
681 * @var_idx: The trigger variable identifier
682 *
683 * Check the list of var_refs[] on the first hist trigger to see
684 * whether any of them are references to the variable on the second
685 * trigger.
686 *
687 * Return: The VAR_REF field referencing the variable if so, NULL if not
688 */
find_var_ref(struct hist_trigger_data * hist_data,struct hist_trigger_data * var_data,unsigned int var_idx)689 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
690 struct hist_trigger_data *var_data,
691 unsigned int var_idx)
692 {
693 struct hist_field *hist_field;
694 unsigned int i;
695
696 for (i = 0; i < hist_data->n_var_refs; i++) {
697 hist_field = hist_data->var_refs[i];
698 if (check_field_for_var_ref(hist_field, var_data, var_idx))
699 return hist_field;
700 }
701
702 return NULL;
703 }
704
705 /**
706 * find_any_var_ref - Check if there is a reference to a given trigger variable
707 * @hist_data: The hist trigger
708 * @var_idx: The trigger variable identifier
709 *
710 * Check to see whether the given variable is currently referenced by
711 * any other trigger.
712 *
713 * The trigger the variable is defined on is explicitly excluded - the
714 * assumption being that a self-reference doesn't prevent a trigger
715 * from being removed.
716 *
717 * Return: The VAR_REF field referencing the variable if so, NULL if not
718 */
find_any_var_ref(struct hist_trigger_data * hist_data,unsigned int var_idx)719 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
720 unsigned int var_idx)
721 {
722 struct trace_array *tr = hist_data->event_file->tr;
723 struct hist_field *found = NULL;
724 struct hist_var_data *var_data;
725
726 list_for_each_entry(var_data, &tr->hist_vars, list) {
727 if (var_data->hist_data == hist_data)
728 continue;
729 found = find_var_ref(var_data->hist_data, hist_data, var_idx);
730 if (found)
731 break;
732 }
733
734 return found;
735 }
736
737 /**
738 * check_var_refs - Check if there is a reference to any of trigger's variables
739 * @hist_data: The hist trigger
740 *
741 * A trigger can define one or more variables. If any one of them is
742 * currently referenced by any other trigger, this function will
743 * determine that.
744
745 * Typically used to determine whether or not a trigger can be removed
746 * - if there are any references to a trigger's variables, it cannot.
747 *
748 * Return: True if there is a reference to any of trigger's variables
749 */
check_var_refs(struct hist_trigger_data * hist_data)750 static bool check_var_refs(struct hist_trigger_data *hist_data)
751 {
752 struct hist_field *field;
753 bool found = false;
754 int i;
755
756 for_each_hist_field(i, hist_data) {
757 field = hist_data->fields[i];
758 if (field && field->flags & HIST_FIELD_FL_VAR) {
759 if (find_any_var_ref(hist_data, field->var.idx)) {
760 found = true;
761 break;
762 }
763 }
764 }
765
766 return found;
767 }
768
find_hist_vars(struct hist_trigger_data * hist_data)769 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
770 {
771 struct trace_array *tr = hist_data->event_file->tr;
772 struct hist_var_data *var_data, *found = NULL;
773
774 list_for_each_entry(var_data, &tr->hist_vars, list) {
775 if (var_data->hist_data == hist_data) {
776 found = var_data;
777 break;
778 }
779 }
780
781 return found;
782 }
783
field_has_hist_vars(struct hist_field * hist_field,unsigned int level)784 static bool field_has_hist_vars(struct hist_field *hist_field,
785 unsigned int level)
786 {
787 int i;
788
789 if (level > 3)
790 return false;
791
792 if (!hist_field)
793 return false;
794
795 if (hist_field->flags & HIST_FIELD_FL_VAR ||
796 hist_field->flags & HIST_FIELD_FL_VAR_REF)
797 return true;
798
799 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
800 struct hist_field *operand;
801
802 operand = hist_field->operands[i];
803 if (field_has_hist_vars(operand, level + 1))
804 return true;
805 }
806
807 return false;
808 }
809
has_hist_vars(struct hist_trigger_data * hist_data)810 static bool has_hist_vars(struct hist_trigger_data *hist_data)
811 {
812 struct hist_field *hist_field;
813 int i;
814
815 for_each_hist_field(i, hist_data) {
816 hist_field = hist_data->fields[i];
817 if (field_has_hist_vars(hist_field, 0))
818 return true;
819 }
820
821 return false;
822 }
823
save_hist_vars(struct hist_trigger_data * hist_data)824 static int save_hist_vars(struct hist_trigger_data *hist_data)
825 {
826 struct trace_array *tr = hist_data->event_file->tr;
827 struct hist_var_data *var_data;
828
829 var_data = find_hist_vars(hist_data);
830 if (var_data)
831 return 0;
832
833 if (tracing_check_open_get_tr(tr))
834 return -ENODEV;
835
836 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
837 if (!var_data) {
838 trace_array_put(tr);
839 return -ENOMEM;
840 }
841
842 var_data->hist_data = hist_data;
843 list_add(&var_data->list, &tr->hist_vars);
844
845 return 0;
846 }
847
remove_hist_vars(struct hist_trigger_data * hist_data)848 static void remove_hist_vars(struct hist_trigger_data *hist_data)
849 {
850 struct trace_array *tr = hist_data->event_file->tr;
851 struct hist_var_data *var_data;
852
853 var_data = find_hist_vars(hist_data);
854 if (!var_data)
855 return;
856
857 if (WARN_ON(check_var_refs(hist_data)))
858 return;
859
860 list_del(&var_data->list);
861
862 kfree(var_data);
863
864 trace_array_put(tr);
865 }
866
find_var_field(struct hist_trigger_data * hist_data,const char * var_name)867 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
868 const char *var_name)
869 {
870 struct hist_field *hist_field, *found = NULL;
871 int i;
872
873 for_each_hist_field(i, hist_data) {
874 hist_field = hist_data->fields[i];
875 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
876 strcmp(hist_field->var.name, var_name) == 0) {
877 found = hist_field;
878 break;
879 }
880 }
881
882 return found;
883 }
884
find_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,const char * var_name)885 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
886 struct trace_event_file *file,
887 const char *var_name)
888 {
889 struct hist_trigger_data *test_data;
890 struct event_trigger_data *test;
891 struct hist_field *hist_field;
892
893 lockdep_assert_held(&event_mutex);
894
895 hist_field = find_var_field(hist_data, var_name);
896 if (hist_field)
897 return hist_field;
898
899 list_for_each_entry(test, &file->triggers, list) {
900 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
901 test_data = test->private_data;
902 hist_field = find_var_field(test_data, var_name);
903 if (hist_field)
904 return hist_field;
905 }
906 }
907
908 return NULL;
909 }
910
find_var_file(struct trace_array * tr,char * system,char * event_name,char * var_name)911 static struct trace_event_file *find_var_file(struct trace_array *tr,
912 char *system,
913 char *event_name,
914 char *var_name)
915 {
916 struct hist_trigger_data *var_hist_data;
917 struct hist_var_data *var_data;
918 struct trace_event_file *file, *found = NULL;
919
920 if (system)
921 return find_event_file(tr, system, event_name);
922
923 list_for_each_entry(var_data, &tr->hist_vars, list) {
924 var_hist_data = var_data->hist_data;
925 file = var_hist_data->event_file;
926 if (file == found)
927 continue;
928
929 if (find_var_field(var_hist_data, var_name)) {
930 if (found) {
931 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
932 return NULL;
933 }
934
935 found = file;
936 }
937 }
938
939 return found;
940 }
941
find_file_var(struct trace_event_file * file,const char * var_name)942 static struct hist_field *find_file_var(struct trace_event_file *file,
943 const char *var_name)
944 {
945 struct hist_trigger_data *test_data;
946 struct event_trigger_data *test;
947 struct hist_field *hist_field;
948
949 lockdep_assert_held(&event_mutex);
950
951 list_for_each_entry(test, &file->triggers, list) {
952 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
953 test_data = test->private_data;
954 hist_field = find_var_field(test_data, var_name);
955 if (hist_field)
956 return hist_field;
957 }
958 }
959
960 return NULL;
961 }
962
963 static struct hist_field *
find_match_var(struct hist_trigger_data * hist_data,char * var_name)964 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
965 {
966 struct trace_array *tr = hist_data->event_file->tr;
967 struct hist_field *hist_field, *found = NULL;
968 struct trace_event_file *file;
969 unsigned int i;
970
971 for (i = 0; i < hist_data->n_actions; i++) {
972 struct action_data *data = hist_data->actions[i];
973
974 if (data->handler == HANDLER_ONMATCH) {
975 char *system = data->match_data.event_system;
976 char *event_name = data->match_data.event;
977
978 file = find_var_file(tr, system, event_name, var_name);
979 if (!file)
980 continue;
981 hist_field = find_file_var(file, var_name);
982 if (hist_field) {
983 if (found) {
984 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
985 errpos(var_name));
986 return ERR_PTR(-EINVAL);
987 }
988
989 found = hist_field;
990 }
991 }
992 }
993 return found;
994 }
995
find_event_var(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)996 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
997 char *system,
998 char *event_name,
999 char *var_name)
1000 {
1001 struct trace_array *tr = hist_data->event_file->tr;
1002 struct hist_field *hist_field = NULL;
1003 struct trace_event_file *file;
1004
1005 if (!system || !event_name) {
1006 hist_field = find_match_var(hist_data, var_name);
1007 if (IS_ERR(hist_field))
1008 return NULL;
1009 if (hist_field)
1010 return hist_field;
1011 }
1012
1013 file = find_var_file(tr, system, event_name, var_name);
1014 if (!file)
1015 return NULL;
1016
1017 hist_field = find_file_var(file, var_name);
1018
1019 return hist_field;
1020 }
1021
hist_field_var_ref(struct hist_field * hist_field,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * event)1022 static u64 hist_field_var_ref(struct hist_field *hist_field,
1023 struct tracing_map_elt *elt,
1024 struct ring_buffer_event *rbe,
1025 void *event)
1026 {
1027 struct hist_elt_data *elt_data;
1028 u64 var_val = 0;
1029
1030 if (WARN_ON_ONCE(!elt))
1031 return var_val;
1032
1033 elt_data = elt->private_data;
1034 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1035
1036 return var_val;
1037 }
1038
resolve_var_refs(struct hist_trigger_data * hist_data,void * key,u64 * var_ref_vals,bool self)1039 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1040 u64 *var_ref_vals, bool self)
1041 {
1042 struct hist_trigger_data *var_data;
1043 struct tracing_map_elt *var_elt;
1044 struct hist_field *hist_field;
1045 unsigned int i, var_idx;
1046 bool resolved = true;
1047 u64 var_val = 0;
1048
1049 for (i = 0; i < hist_data->n_var_refs; i++) {
1050 hist_field = hist_data->var_refs[i];
1051 var_idx = hist_field->var.idx;
1052 var_data = hist_field->var.hist_data;
1053
1054 if (var_data == NULL) {
1055 resolved = false;
1056 break;
1057 }
1058
1059 if ((self && var_data != hist_data) ||
1060 (!self && var_data == hist_data))
1061 continue;
1062
1063 var_elt = tracing_map_lookup(var_data->map, key);
1064 if (!var_elt) {
1065 resolved = false;
1066 break;
1067 }
1068
1069 if (!tracing_map_var_set(var_elt, var_idx)) {
1070 resolved = false;
1071 break;
1072 }
1073
1074 if (self || !hist_field->read_once)
1075 var_val = tracing_map_read_var(var_elt, var_idx);
1076 else
1077 var_val = tracing_map_read_var_once(var_elt, var_idx);
1078
1079 var_ref_vals[i] = var_val;
1080 }
1081
1082 return resolved;
1083 }
1084
hist_field_name(struct hist_field * field,unsigned int level)1085 static const char *hist_field_name(struct hist_field *field,
1086 unsigned int level)
1087 {
1088 const char *field_name = "";
1089
1090 if (WARN_ON_ONCE(!field))
1091 return field_name;
1092
1093 if (level > 1)
1094 return field_name;
1095
1096 if (field->field)
1097 field_name = field->field->name;
1098 else if (field->flags & HIST_FIELD_FL_LOG2 ||
1099 field->flags & HIST_FIELD_FL_ALIAS)
1100 field_name = hist_field_name(field->operands[0], ++level);
1101 else if (field->flags & HIST_FIELD_FL_CPU)
1102 field_name = "common_cpu";
1103 else if (field->flags & HIST_FIELD_FL_EXPR ||
1104 field->flags & HIST_FIELD_FL_VAR_REF) {
1105 if (field->system) {
1106 static char full_name[MAX_FILTER_STR_VAL];
1107
1108 strcat(full_name, field->system);
1109 strcat(full_name, ".");
1110 strcat(full_name, field->event_name);
1111 strcat(full_name, ".");
1112 strcat(full_name, field->name);
1113 field_name = full_name;
1114 } else
1115 field_name = field->name;
1116 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1117 field_name = "common_timestamp";
1118
1119 if (field_name == NULL)
1120 field_name = "";
1121
1122 return field_name;
1123 }
1124
select_value_fn(int field_size,int field_is_signed)1125 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
1126 {
1127 hist_field_fn_t fn = NULL;
1128
1129 switch (field_size) {
1130 case 8:
1131 if (field_is_signed)
1132 fn = hist_field_s64;
1133 else
1134 fn = hist_field_u64;
1135 break;
1136 case 4:
1137 if (field_is_signed)
1138 fn = hist_field_s32;
1139 else
1140 fn = hist_field_u32;
1141 break;
1142 case 2:
1143 if (field_is_signed)
1144 fn = hist_field_s16;
1145 else
1146 fn = hist_field_u16;
1147 break;
1148 case 1:
1149 if (field_is_signed)
1150 fn = hist_field_s8;
1151 else
1152 fn = hist_field_u8;
1153 break;
1154 }
1155
1156 return fn;
1157 }
1158
parse_map_size(char * str)1159 static int parse_map_size(char *str)
1160 {
1161 unsigned long size, map_bits;
1162 int ret;
1163
1164 ret = kstrtoul(str, 0, &size);
1165 if (ret)
1166 goto out;
1167
1168 map_bits = ilog2(roundup_pow_of_two(size));
1169 if (map_bits < TRACING_MAP_BITS_MIN ||
1170 map_bits > TRACING_MAP_BITS_MAX)
1171 ret = -EINVAL;
1172 else
1173 ret = map_bits;
1174 out:
1175 return ret;
1176 }
1177
destroy_hist_trigger_attrs(struct hist_trigger_attrs * attrs)1178 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
1179 {
1180 unsigned int i;
1181
1182 if (!attrs)
1183 return;
1184
1185 for (i = 0; i < attrs->n_assignments; i++)
1186 kfree(attrs->assignment_str[i]);
1187
1188 for (i = 0; i < attrs->n_actions; i++)
1189 kfree(attrs->action_str[i]);
1190
1191 kfree(attrs->name);
1192 kfree(attrs->sort_key_str);
1193 kfree(attrs->keys_str);
1194 kfree(attrs->vals_str);
1195 kfree(attrs->clock);
1196 kfree(attrs);
1197 }
1198
parse_action(char * str,struct hist_trigger_attrs * attrs)1199 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
1200 {
1201 int ret = -EINVAL;
1202
1203 if (attrs->n_actions >= HIST_ACTIONS_MAX)
1204 return ret;
1205
1206 if ((str_has_prefix(str, "onmatch(")) ||
1207 (str_has_prefix(str, "onmax(")) ||
1208 (str_has_prefix(str, "onchange("))) {
1209 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
1210 if (!attrs->action_str[attrs->n_actions]) {
1211 ret = -ENOMEM;
1212 return ret;
1213 }
1214 attrs->n_actions++;
1215 ret = 0;
1216 }
1217 return ret;
1218 }
1219
parse_assignment(struct trace_array * tr,char * str,struct hist_trigger_attrs * attrs)1220 static int parse_assignment(struct trace_array *tr,
1221 char *str, struct hist_trigger_attrs *attrs)
1222 {
1223 int len, ret = 0;
1224
1225 if ((len = str_has_prefix(str, "key=")) ||
1226 (len = str_has_prefix(str, "keys="))) {
1227 attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
1228 if (!attrs->keys_str) {
1229 ret = -ENOMEM;
1230 goto out;
1231 }
1232 } else if ((len = str_has_prefix(str, "val=")) ||
1233 (len = str_has_prefix(str, "vals=")) ||
1234 (len = str_has_prefix(str, "values="))) {
1235 attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
1236 if (!attrs->vals_str) {
1237 ret = -ENOMEM;
1238 goto out;
1239 }
1240 } else if ((len = str_has_prefix(str, "sort="))) {
1241 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
1242 if (!attrs->sort_key_str) {
1243 ret = -ENOMEM;
1244 goto out;
1245 }
1246 } else if (str_has_prefix(str, "name=")) {
1247 attrs->name = kstrdup(str, GFP_KERNEL);
1248 if (!attrs->name) {
1249 ret = -ENOMEM;
1250 goto out;
1251 }
1252 } else if ((len = str_has_prefix(str, "clock="))) {
1253 str += len;
1254
1255 str = strstrip(str);
1256 attrs->clock = kstrdup(str, GFP_KERNEL);
1257 if (!attrs->clock) {
1258 ret = -ENOMEM;
1259 goto out;
1260 }
1261 } else if ((len = str_has_prefix(str, "size="))) {
1262 int map_bits = parse_map_size(str + len);
1263
1264 if (map_bits < 0) {
1265 ret = map_bits;
1266 goto out;
1267 }
1268 attrs->map_bits = map_bits;
1269 } else {
1270 char *assignment;
1271
1272 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
1273 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
1274 ret = -EINVAL;
1275 goto out;
1276 }
1277
1278 assignment = kstrdup(str, GFP_KERNEL);
1279 if (!assignment) {
1280 ret = -ENOMEM;
1281 goto out;
1282 }
1283
1284 attrs->assignment_str[attrs->n_assignments++] = assignment;
1285 }
1286 out:
1287 return ret;
1288 }
1289
1290 static struct hist_trigger_attrs *
parse_hist_trigger_attrs(struct trace_array * tr,char * trigger_str)1291 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
1292 {
1293 struct hist_trigger_attrs *attrs;
1294 int ret = 0;
1295
1296 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
1297 if (!attrs)
1298 return ERR_PTR(-ENOMEM);
1299
1300 while (trigger_str) {
1301 char *str = strsep(&trigger_str, ":");
1302 char *rhs;
1303
1304 rhs = strchr(str, '=');
1305 if (rhs) {
1306 if (!strlen(++rhs)) {
1307 ret = -EINVAL;
1308 hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
1309 goto free;
1310 }
1311 ret = parse_assignment(tr, str, attrs);
1312 if (ret)
1313 goto free;
1314 } else if (strcmp(str, "pause") == 0)
1315 attrs->pause = true;
1316 else if ((strcmp(str, "cont") == 0) ||
1317 (strcmp(str, "continue") == 0))
1318 attrs->cont = true;
1319 else if (strcmp(str, "clear") == 0)
1320 attrs->clear = true;
1321 else {
1322 ret = parse_action(str, attrs);
1323 if (ret)
1324 goto free;
1325 }
1326 }
1327
1328 if (!attrs->keys_str) {
1329 ret = -EINVAL;
1330 goto free;
1331 }
1332
1333 if (!attrs->clock) {
1334 attrs->clock = kstrdup("global", GFP_KERNEL);
1335 if (!attrs->clock) {
1336 ret = -ENOMEM;
1337 goto free;
1338 }
1339 }
1340
1341 return attrs;
1342 free:
1343 destroy_hist_trigger_attrs(attrs);
1344
1345 return ERR_PTR(ret);
1346 }
1347
save_comm(char * comm,struct task_struct * task)1348 static inline void save_comm(char *comm, struct task_struct *task)
1349 {
1350 if (!task->pid) {
1351 strcpy(comm, "<idle>");
1352 return;
1353 }
1354
1355 if (WARN_ON_ONCE(task->pid < 0)) {
1356 strcpy(comm, "<XXX>");
1357 return;
1358 }
1359
1360 strncpy(comm, task->comm, TASK_COMM_LEN);
1361 }
1362
hist_elt_data_free(struct hist_elt_data * elt_data)1363 static void hist_elt_data_free(struct hist_elt_data *elt_data)
1364 {
1365 unsigned int i;
1366
1367 for (i = 0; i < SYNTH_FIELDS_MAX; i++)
1368 kfree(elt_data->field_var_str[i]);
1369
1370 kfree(elt_data->comm);
1371 kfree(elt_data);
1372 }
1373
hist_trigger_elt_data_free(struct tracing_map_elt * elt)1374 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
1375 {
1376 struct hist_elt_data *elt_data = elt->private_data;
1377
1378 hist_elt_data_free(elt_data);
1379 }
1380
hist_trigger_elt_data_alloc(struct tracing_map_elt * elt)1381 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
1382 {
1383 struct hist_trigger_data *hist_data = elt->map->private_data;
1384 unsigned int size = TASK_COMM_LEN;
1385 struct hist_elt_data *elt_data;
1386 struct hist_field *key_field;
1387 unsigned int i, n_str;
1388
1389 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
1390 if (!elt_data)
1391 return -ENOMEM;
1392
1393 for_each_hist_key_field(i, hist_data) {
1394 key_field = hist_data->fields[i];
1395
1396 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
1397 elt_data->comm = kzalloc(size, GFP_KERNEL);
1398 if (!elt_data->comm) {
1399 kfree(elt_data);
1400 return -ENOMEM;
1401 }
1402 break;
1403 }
1404 }
1405
1406 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str +
1407 hist_data->n_var_str;
1408 if (n_str > SYNTH_FIELDS_MAX) {
1409 hist_elt_data_free(elt_data);
1410 return -EINVAL;
1411 }
1412
1413 BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1));
1414
1415 size = STR_VAR_LEN_MAX;
1416
1417 for (i = 0; i < n_str; i++) {
1418 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
1419 if (!elt_data->field_var_str[i]) {
1420 hist_elt_data_free(elt_data);
1421 return -ENOMEM;
1422 }
1423 }
1424
1425 elt->private_data = elt_data;
1426
1427 return 0;
1428 }
1429
hist_trigger_elt_data_init(struct tracing_map_elt * elt)1430 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
1431 {
1432 struct hist_elt_data *elt_data = elt->private_data;
1433
1434 if (elt_data->comm)
1435 save_comm(elt_data->comm, current);
1436 }
1437
1438 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
1439 .elt_alloc = hist_trigger_elt_data_alloc,
1440 .elt_free = hist_trigger_elt_data_free,
1441 .elt_init = hist_trigger_elt_data_init,
1442 };
1443
get_hist_field_flags(struct hist_field * hist_field)1444 static const char *get_hist_field_flags(struct hist_field *hist_field)
1445 {
1446 const char *flags_str = NULL;
1447
1448 if (hist_field->flags & HIST_FIELD_FL_HEX)
1449 flags_str = "hex";
1450 else if (hist_field->flags & HIST_FIELD_FL_SYM)
1451 flags_str = "sym";
1452 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1453 flags_str = "sym-offset";
1454 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1455 flags_str = "execname";
1456 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1457 flags_str = "syscall";
1458 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
1459 flags_str = "log2";
1460 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
1461 flags_str = "usecs";
1462
1463 return flags_str;
1464 }
1465
expr_field_str(struct hist_field * field,char * expr)1466 static void expr_field_str(struct hist_field *field, char *expr)
1467 {
1468 if (field->flags & HIST_FIELD_FL_VAR_REF)
1469 strcat(expr, "$");
1470
1471 strcat(expr, hist_field_name(field, 0));
1472
1473 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
1474 const char *flags_str = get_hist_field_flags(field);
1475
1476 if (flags_str) {
1477 strcat(expr, ".");
1478 strcat(expr, flags_str);
1479 }
1480 }
1481 }
1482
expr_str(struct hist_field * field,unsigned int level)1483 static char *expr_str(struct hist_field *field, unsigned int level)
1484 {
1485 char *expr;
1486
1487 if (level > 1)
1488 return NULL;
1489
1490 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
1491 if (!expr)
1492 return NULL;
1493
1494 if (!field->operands[0]) {
1495 expr_field_str(field, expr);
1496 return expr;
1497 }
1498
1499 if (field->operator == FIELD_OP_UNARY_MINUS) {
1500 char *subexpr;
1501
1502 strcat(expr, "-(");
1503 subexpr = expr_str(field->operands[0], ++level);
1504 if (!subexpr) {
1505 kfree(expr);
1506 return NULL;
1507 }
1508 strcat(expr, subexpr);
1509 strcat(expr, ")");
1510
1511 kfree(subexpr);
1512
1513 return expr;
1514 }
1515
1516 expr_field_str(field->operands[0], expr);
1517
1518 switch (field->operator) {
1519 case FIELD_OP_MINUS:
1520 strcat(expr, "-");
1521 break;
1522 case FIELD_OP_PLUS:
1523 strcat(expr, "+");
1524 break;
1525 default:
1526 kfree(expr);
1527 return NULL;
1528 }
1529
1530 expr_field_str(field->operands[1], expr);
1531
1532 return expr;
1533 }
1534
contains_operator(char * str)1535 static int contains_operator(char *str)
1536 {
1537 enum field_op_id field_op = FIELD_OP_NONE;
1538 char *op;
1539
1540 op = strpbrk(str, "+-");
1541 if (!op)
1542 return FIELD_OP_NONE;
1543
1544 switch (*op) {
1545 case '-':
1546 /*
1547 * Unfortunately, the modifier ".sym-offset"
1548 * can confuse things.
1549 */
1550 if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11))
1551 return FIELD_OP_NONE;
1552
1553 if (*str == '-')
1554 field_op = FIELD_OP_UNARY_MINUS;
1555 else
1556 field_op = FIELD_OP_MINUS;
1557 break;
1558 case '+':
1559 field_op = FIELD_OP_PLUS;
1560 break;
1561 default:
1562 break;
1563 }
1564
1565 return field_op;
1566 }
1567
get_hist_field(struct hist_field * hist_field)1568 static void get_hist_field(struct hist_field *hist_field)
1569 {
1570 hist_field->ref++;
1571 }
1572
__destroy_hist_field(struct hist_field * hist_field)1573 static void __destroy_hist_field(struct hist_field *hist_field)
1574 {
1575 if (--hist_field->ref > 1)
1576 return;
1577
1578 kfree(hist_field->var.name);
1579 kfree(hist_field->name);
1580 kfree(hist_field->type);
1581
1582 kfree(hist_field->system);
1583 kfree(hist_field->event_name);
1584
1585 kfree(hist_field);
1586 }
1587
destroy_hist_field(struct hist_field * hist_field,unsigned int level)1588 static void destroy_hist_field(struct hist_field *hist_field,
1589 unsigned int level)
1590 {
1591 unsigned int i;
1592
1593 if (level > 3)
1594 return;
1595
1596 if (!hist_field)
1597 return;
1598
1599 if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
1600 return; /* var refs will be destroyed separately */
1601
1602 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
1603 destroy_hist_field(hist_field->operands[i], level + 1);
1604
1605 __destroy_hist_field(hist_field);
1606 }
1607
create_hist_field(struct hist_trigger_data * hist_data,struct ftrace_event_field * field,unsigned long flags,char * var_name)1608 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
1609 struct ftrace_event_field *field,
1610 unsigned long flags,
1611 char *var_name)
1612 {
1613 struct hist_field *hist_field;
1614
1615 if (field && is_function_field(field))
1616 return NULL;
1617
1618 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
1619 if (!hist_field)
1620 return NULL;
1621
1622 hist_field->ref = 1;
1623
1624 hist_field->hist_data = hist_data;
1625
1626 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
1627 goto out; /* caller will populate */
1628
1629 if (flags & HIST_FIELD_FL_VAR_REF) {
1630 hist_field->fn = hist_field_var_ref;
1631 goto out;
1632 }
1633
1634 if (flags & HIST_FIELD_FL_HITCOUNT) {
1635 hist_field->fn = hist_field_counter;
1636 hist_field->size = sizeof(u64);
1637 hist_field->type = kstrdup("u64", GFP_KERNEL);
1638 if (!hist_field->type)
1639 goto free;
1640 goto out;
1641 }
1642
1643 if (flags & HIST_FIELD_FL_STACKTRACE) {
1644 hist_field->fn = hist_field_none;
1645 goto out;
1646 }
1647
1648 if (flags & HIST_FIELD_FL_LOG2) {
1649 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
1650 hist_field->fn = hist_field_log2;
1651 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
1652 if (!hist_field->operands[0])
1653 goto free;
1654 hist_field->size = hist_field->operands[0]->size;
1655 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
1656 if (!hist_field->type)
1657 goto free;
1658 goto out;
1659 }
1660
1661 if (flags & HIST_FIELD_FL_TIMESTAMP) {
1662 hist_field->fn = hist_field_timestamp;
1663 hist_field->size = sizeof(u64);
1664 hist_field->type = kstrdup("u64", GFP_KERNEL);
1665 if (!hist_field->type)
1666 goto free;
1667 goto out;
1668 }
1669
1670 if (flags & HIST_FIELD_FL_CPU) {
1671 hist_field->fn = hist_field_cpu;
1672 hist_field->size = sizeof(int);
1673 hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
1674 if (!hist_field->type)
1675 goto free;
1676 goto out;
1677 }
1678
1679 if (WARN_ON_ONCE(!field))
1680 goto out;
1681
1682 /* Pointers to strings are just pointers and dangerous to dereference */
1683 if (is_string_field(field) &&
1684 (field->filter_type != FILTER_PTR_STRING)) {
1685 flags |= HIST_FIELD_FL_STRING;
1686
1687 hist_field->size = MAX_FILTER_STR_VAL;
1688 hist_field->type = kstrdup(field->type, GFP_KERNEL);
1689 if (!hist_field->type)
1690 goto free;
1691
1692 if (field->filter_type == FILTER_STATIC_STRING) {
1693 hist_field->fn = hist_field_string;
1694 hist_field->size = field->size;
1695 } else if (field->filter_type == FILTER_DYN_STRING)
1696 hist_field->fn = hist_field_dynstring;
1697 else
1698 hist_field->fn = hist_field_pstring;
1699 } else {
1700 hist_field->size = field->size;
1701 hist_field->is_signed = field->is_signed;
1702 hist_field->type = kstrdup(field->type, GFP_KERNEL);
1703 if (!hist_field->type)
1704 goto free;
1705
1706 hist_field->fn = select_value_fn(field->size,
1707 field->is_signed);
1708 if (!hist_field->fn) {
1709 destroy_hist_field(hist_field, 0);
1710 return NULL;
1711 }
1712 }
1713 out:
1714 hist_field->field = field;
1715 hist_field->flags = flags;
1716
1717 if (var_name) {
1718 hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
1719 if (!hist_field->var.name)
1720 goto free;
1721 }
1722
1723 return hist_field;
1724 free:
1725 destroy_hist_field(hist_field, 0);
1726 return NULL;
1727 }
1728
destroy_hist_fields(struct hist_trigger_data * hist_data)1729 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
1730 {
1731 unsigned int i;
1732
1733 for (i = 0; i < HIST_FIELDS_MAX; i++) {
1734 if (hist_data->fields[i]) {
1735 destroy_hist_field(hist_data->fields[i], 0);
1736 hist_data->fields[i] = NULL;
1737 }
1738 }
1739
1740 for (i = 0; i < hist_data->n_var_refs; i++) {
1741 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
1742 __destroy_hist_field(hist_data->var_refs[i]);
1743 hist_data->var_refs[i] = NULL;
1744 }
1745 }
1746
init_var_ref(struct hist_field * ref_field,struct hist_field * var_field,char * system,char * event_name)1747 static int init_var_ref(struct hist_field *ref_field,
1748 struct hist_field *var_field,
1749 char *system, char *event_name)
1750 {
1751 int err = 0;
1752
1753 ref_field->var.idx = var_field->var.idx;
1754 ref_field->var.hist_data = var_field->hist_data;
1755 ref_field->size = var_field->size;
1756 ref_field->is_signed = var_field->is_signed;
1757 ref_field->flags |= var_field->flags &
1758 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
1759
1760 if (system) {
1761 ref_field->system = kstrdup(system, GFP_KERNEL);
1762 if (!ref_field->system)
1763 return -ENOMEM;
1764 }
1765
1766 if (event_name) {
1767 ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
1768 if (!ref_field->event_name) {
1769 err = -ENOMEM;
1770 goto free;
1771 }
1772 }
1773
1774 if (var_field->var.name) {
1775 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
1776 if (!ref_field->name) {
1777 err = -ENOMEM;
1778 goto free;
1779 }
1780 } else if (var_field->name) {
1781 ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
1782 if (!ref_field->name) {
1783 err = -ENOMEM;
1784 goto free;
1785 }
1786 }
1787
1788 ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
1789 if (!ref_field->type) {
1790 err = -ENOMEM;
1791 goto free;
1792 }
1793 out:
1794 return err;
1795 free:
1796 kfree(ref_field->system);
1797 ref_field->system = NULL;
1798 kfree(ref_field->event_name);
1799 ref_field->event_name = NULL;
1800 kfree(ref_field->name);
1801 ref_field->name = NULL;
1802
1803 goto out;
1804 }
1805
find_var_ref_idx(struct hist_trigger_data * hist_data,struct hist_field * var_field)1806 static int find_var_ref_idx(struct hist_trigger_data *hist_data,
1807 struct hist_field *var_field)
1808 {
1809 struct hist_field *ref_field;
1810 int i;
1811
1812 for (i = 0; i < hist_data->n_var_refs; i++) {
1813 ref_field = hist_data->var_refs[i];
1814 if (ref_field->var.idx == var_field->var.idx &&
1815 ref_field->var.hist_data == var_field->hist_data)
1816 return i;
1817 }
1818
1819 return -ENOENT;
1820 }
1821
1822 /**
1823 * create_var_ref - Create a variable reference and attach it to trigger
1824 * @hist_data: The trigger that will be referencing the variable
1825 * @var_field: The VAR field to create a reference to
1826 * @system: The optional system string
1827 * @event_name: The optional event_name string
1828 *
1829 * Given a variable hist_field, create a VAR_REF hist_field that
1830 * represents a reference to it.
1831 *
1832 * This function also adds the reference to the trigger that
1833 * now references the variable.
1834 *
1835 * Return: The VAR_REF field if successful, NULL if not
1836 */
create_var_ref(struct hist_trigger_data * hist_data,struct hist_field * var_field,char * system,char * event_name)1837 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
1838 struct hist_field *var_field,
1839 char *system, char *event_name)
1840 {
1841 unsigned long flags = HIST_FIELD_FL_VAR_REF;
1842 struct hist_field *ref_field;
1843 int i;
1844
1845 /* Check if the variable already exists */
1846 for (i = 0; i < hist_data->n_var_refs; i++) {
1847 ref_field = hist_data->var_refs[i];
1848 if (ref_field->var.idx == var_field->var.idx &&
1849 ref_field->var.hist_data == var_field->hist_data) {
1850 get_hist_field(ref_field);
1851 return ref_field;
1852 }
1853 }
1854 /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
1855 if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
1856 return NULL;
1857 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
1858 if (ref_field) {
1859 if (init_var_ref(ref_field, var_field, system, event_name)) {
1860 destroy_hist_field(ref_field, 0);
1861 return NULL;
1862 }
1863
1864 hist_data->var_refs[hist_data->n_var_refs] = ref_field;
1865 ref_field->var_ref_idx = hist_data->n_var_refs++;
1866 }
1867
1868 return ref_field;
1869 }
1870
is_var_ref(char * var_name)1871 static bool is_var_ref(char *var_name)
1872 {
1873 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
1874 return false;
1875
1876 return true;
1877 }
1878
field_name_from_var(struct hist_trigger_data * hist_data,char * var_name)1879 static char *field_name_from_var(struct hist_trigger_data *hist_data,
1880 char *var_name)
1881 {
1882 char *name, *field;
1883 unsigned int i;
1884
1885 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
1886 name = hist_data->attrs->var_defs.name[i];
1887
1888 if (strcmp(var_name, name) == 0) {
1889 field = hist_data->attrs->var_defs.expr[i];
1890 if (contains_operator(field) || is_var_ref(field))
1891 continue;
1892 return field;
1893 }
1894 }
1895
1896 return NULL;
1897 }
1898
local_field_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)1899 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
1900 char *system, char *event_name,
1901 char *var_name)
1902 {
1903 struct trace_event_call *call;
1904
1905 if (system && event_name) {
1906 call = hist_data->event_file->event_call;
1907
1908 if (strcmp(system, call->class->system) != 0)
1909 return NULL;
1910
1911 if (strcmp(event_name, trace_event_name(call)) != 0)
1912 return NULL;
1913 }
1914
1915 if (!!system != !!event_name)
1916 return NULL;
1917
1918 if (!is_var_ref(var_name))
1919 return NULL;
1920
1921 var_name++;
1922
1923 return field_name_from_var(hist_data, var_name);
1924 }
1925
parse_var_ref(struct hist_trigger_data * hist_data,char * system,char * event_name,char * var_name)1926 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
1927 char *system, char *event_name,
1928 char *var_name)
1929 {
1930 struct hist_field *var_field = NULL, *ref_field = NULL;
1931 struct trace_array *tr = hist_data->event_file->tr;
1932
1933 if (!is_var_ref(var_name))
1934 return NULL;
1935
1936 var_name++;
1937
1938 var_field = find_event_var(hist_data, system, event_name, var_name);
1939 if (var_field)
1940 ref_field = create_var_ref(hist_data, var_field,
1941 system, event_name);
1942
1943 if (!ref_field)
1944 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
1945
1946 return ref_field;
1947 }
1948
1949 static struct ftrace_event_field *
parse_field(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_str,unsigned long * flags)1950 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
1951 char *field_str, unsigned long *flags)
1952 {
1953 struct ftrace_event_field *field = NULL;
1954 char *field_name, *modifier, *str;
1955 struct trace_array *tr = file->tr;
1956
1957 modifier = str = kstrdup(field_str, GFP_KERNEL);
1958 if (!modifier)
1959 return ERR_PTR(-ENOMEM);
1960
1961 field_name = strsep(&modifier, ".");
1962 if (modifier) {
1963 if (strcmp(modifier, "hex") == 0)
1964 *flags |= HIST_FIELD_FL_HEX;
1965 else if (strcmp(modifier, "sym") == 0)
1966 *flags |= HIST_FIELD_FL_SYM;
1967 else if (strcmp(modifier, "sym-offset") == 0)
1968 *flags |= HIST_FIELD_FL_SYM_OFFSET;
1969 else if ((strcmp(modifier, "execname") == 0) &&
1970 (strcmp(field_name, "common_pid") == 0))
1971 *flags |= HIST_FIELD_FL_EXECNAME;
1972 else if (strcmp(modifier, "syscall") == 0)
1973 *flags |= HIST_FIELD_FL_SYSCALL;
1974 else if (strcmp(modifier, "log2") == 0)
1975 *flags |= HIST_FIELD_FL_LOG2;
1976 else if (strcmp(modifier, "usecs") == 0)
1977 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
1978 else {
1979 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
1980 field = ERR_PTR(-EINVAL);
1981 goto out;
1982 }
1983 }
1984
1985 if (strcmp(field_name, "common_timestamp") == 0) {
1986 *flags |= HIST_FIELD_FL_TIMESTAMP;
1987 hist_data->enable_timestamps = true;
1988 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
1989 hist_data->attrs->ts_in_usecs = true;
1990 } else if (strcmp(field_name, "common_cpu") == 0)
1991 *flags |= HIST_FIELD_FL_CPU;
1992 else {
1993 field = trace_find_event_field(file->event_call, field_name);
1994 if (!field || !field->size) {
1995 /*
1996 * For backward compatibility, if field_name
1997 * was "cpu", then we treat this the same as
1998 * common_cpu. This also works for "CPU".
1999 */
2000 if (field && field->filter_type == FILTER_CPU) {
2001 *flags |= HIST_FIELD_FL_CPU;
2002 } else {
2003 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
2004 errpos(field_name));
2005 field = ERR_PTR(-EINVAL);
2006 goto out;
2007 }
2008 }
2009 }
2010 out:
2011 kfree(str);
2012
2013 return field;
2014 }
2015
create_alias(struct hist_trigger_data * hist_data,struct hist_field * var_ref,char * var_name)2016 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2017 struct hist_field *var_ref,
2018 char *var_name)
2019 {
2020 struct hist_field *alias = NULL;
2021 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2022
2023 alias = create_hist_field(hist_data, NULL, flags, var_name);
2024 if (!alias)
2025 return NULL;
2026
2027 alias->fn = var_ref->fn;
2028 alias->operands[0] = var_ref;
2029
2030 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2031 destroy_hist_field(alias, 0);
2032 return NULL;
2033 }
2034
2035 alias->var_ref_idx = var_ref->var_ref_idx;
2036
2037 return alias;
2038 }
2039
parse_atom(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long * flags,char * var_name)2040 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2041 struct trace_event_file *file, char *str,
2042 unsigned long *flags, char *var_name)
2043 {
2044 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2045 struct ftrace_event_field *field = NULL;
2046 struct hist_field *hist_field = NULL;
2047 int ret = 0;
2048
2049 s = strchr(str, '.');
2050 if (s) {
2051 s = strchr(++s, '.');
2052 if (s) {
2053 ref_system = strsep(&str, ".");
2054 if (!str) {
2055 ret = -EINVAL;
2056 goto out;
2057 }
2058 ref_event = strsep(&str, ".");
2059 if (!str) {
2060 ret = -EINVAL;
2061 goto out;
2062 }
2063 ref_var = str;
2064 }
2065 }
2066
2067 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2068 if (!s) {
2069 hist_field = parse_var_ref(hist_data, ref_system,
2070 ref_event, ref_var);
2071 if (hist_field) {
2072 if (var_name) {
2073 hist_field = create_alias(hist_data, hist_field, var_name);
2074 if (!hist_field) {
2075 ret = -ENOMEM;
2076 goto out;
2077 }
2078 }
2079 return hist_field;
2080 }
2081 } else
2082 str = s;
2083
2084 field = parse_field(hist_data, file, str, flags);
2085 if (IS_ERR(field)) {
2086 ret = PTR_ERR(field);
2087 goto out;
2088 }
2089
2090 hist_field = create_hist_field(hist_data, field, *flags, var_name);
2091 if (!hist_field) {
2092 ret = -ENOMEM;
2093 goto out;
2094 }
2095
2096 return hist_field;
2097 out:
2098 return ERR_PTR(ret);
2099 }
2100
2101 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2102 struct trace_event_file *file,
2103 char *str, unsigned long flags,
2104 char *var_name, unsigned int level);
2105
parse_unary(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int level)2106 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2107 struct trace_event_file *file,
2108 char *str, unsigned long flags,
2109 char *var_name, unsigned int level)
2110 {
2111 struct hist_field *operand1, *expr = NULL;
2112 unsigned long operand_flags;
2113 int ret = 0;
2114 char *s;
2115
2116 /* we support only -(xxx) i.e. explicit parens required */
2117
2118 if (level > 3) {
2119 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2120 ret = -EINVAL;
2121 goto free;
2122 }
2123
2124 str++; /* skip leading '-' */
2125
2126 s = strchr(str, '(');
2127 if (s)
2128 str++;
2129 else {
2130 ret = -EINVAL;
2131 goto free;
2132 }
2133
2134 s = strrchr(str, ')');
2135 if (s)
2136 *s = '\0';
2137 else {
2138 ret = -EINVAL; /* no closing ')' */
2139 goto free;
2140 }
2141
2142 flags |= HIST_FIELD_FL_EXPR;
2143 expr = create_hist_field(hist_data, NULL, flags, var_name);
2144 if (!expr) {
2145 ret = -ENOMEM;
2146 goto free;
2147 }
2148
2149 operand_flags = 0;
2150 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2151 if (IS_ERR(operand1)) {
2152 ret = PTR_ERR(operand1);
2153 goto free;
2154 }
2155 if (operand1->flags & HIST_FIELD_FL_STRING) {
2156 /* String type can not be the operand of unary operator. */
2157 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
2158 destroy_hist_field(operand1, 0);
2159 ret = -EINVAL;
2160 goto free;
2161 }
2162
2163 expr->flags |= operand1->flags &
2164 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2165 expr->fn = hist_field_unary_minus;
2166 expr->operands[0] = operand1;
2167 expr->size = operand1->size;
2168 expr->is_signed = operand1->is_signed;
2169 expr->operator = FIELD_OP_UNARY_MINUS;
2170 expr->name = expr_str(expr, 0);
2171 expr->type = kstrdup(operand1->type, GFP_KERNEL);
2172 if (!expr->type) {
2173 ret = -ENOMEM;
2174 goto free;
2175 }
2176
2177 return expr;
2178 free:
2179 destroy_hist_field(expr, 0);
2180 return ERR_PTR(ret);
2181 }
2182
check_expr_operands(struct trace_array * tr,struct hist_field * operand1,struct hist_field * operand2)2183 static int check_expr_operands(struct trace_array *tr,
2184 struct hist_field *operand1,
2185 struct hist_field *operand2)
2186 {
2187 unsigned long operand1_flags = operand1->flags;
2188 unsigned long operand2_flags = operand2->flags;
2189
2190 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2191 (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2192 struct hist_field *var;
2193
2194 var = find_var_field(operand1->var.hist_data, operand1->name);
2195 if (!var)
2196 return -EINVAL;
2197 operand1_flags = var->flags;
2198 }
2199
2200 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2201 (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2202 struct hist_field *var;
2203
2204 var = find_var_field(operand2->var.hist_data, operand2->name);
2205 if (!var)
2206 return -EINVAL;
2207 operand2_flags = var->flags;
2208 }
2209
2210 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2211 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2212 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
2213 return -EINVAL;
2214 }
2215
2216 return 0;
2217 }
2218
parse_expr(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * str,unsigned long flags,char * var_name,unsigned int level)2219 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2220 struct trace_event_file *file,
2221 char *str, unsigned long flags,
2222 char *var_name, unsigned int level)
2223 {
2224 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2225 unsigned long operand_flags;
2226 int field_op, ret = -EINVAL;
2227 char *sep, *operand1_str;
2228
2229 if (level > 3) {
2230 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2231 return ERR_PTR(-EINVAL);
2232 }
2233
2234 field_op = contains_operator(str);
2235
2236 if (field_op == FIELD_OP_NONE)
2237 return parse_atom(hist_data, file, str, &flags, var_name);
2238
2239 if (field_op == FIELD_OP_UNARY_MINUS)
2240 return parse_unary(hist_data, file, str, flags, var_name, ++level);
2241
2242 switch (field_op) {
2243 case FIELD_OP_MINUS:
2244 sep = "-";
2245 break;
2246 case FIELD_OP_PLUS:
2247 sep = "+";
2248 break;
2249 default:
2250 goto free;
2251 }
2252
2253 operand1_str = strsep(&str, sep);
2254 if (!operand1_str || !str)
2255 goto free;
2256
2257 operand_flags = 0;
2258 operand1 = parse_atom(hist_data, file, operand1_str,
2259 &operand_flags, NULL);
2260 if (IS_ERR(operand1)) {
2261 ret = PTR_ERR(operand1);
2262 operand1 = NULL;
2263 goto free;
2264 }
2265 if (operand1->flags & HIST_FIELD_FL_STRING) {
2266 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
2267 ret = -EINVAL;
2268 goto free;
2269 }
2270
2271 /* rest of string could be another expression e.g. b+c in a+b+c */
2272 operand_flags = 0;
2273 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2274 if (IS_ERR(operand2)) {
2275 ret = PTR_ERR(operand2);
2276 operand2 = NULL;
2277 goto free;
2278 }
2279 if (operand2->flags & HIST_FIELD_FL_STRING) {
2280 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
2281 ret = -EINVAL;
2282 goto free;
2283 }
2284
2285 ret = check_expr_operands(file->tr, operand1, operand2);
2286 if (ret)
2287 goto free;
2288
2289 flags |= HIST_FIELD_FL_EXPR;
2290
2291 flags |= operand1->flags &
2292 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2293
2294 expr = create_hist_field(hist_data, NULL, flags, var_name);
2295 if (!expr) {
2296 ret = -ENOMEM;
2297 goto free;
2298 }
2299
2300 operand1->read_once = true;
2301 operand2->read_once = true;
2302
2303 expr->operands[0] = operand1;
2304 expr->operands[1] = operand2;
2305
2306 /* The operand sizes should be the same, so just pick one */
2307 expr->size = operand1->size;
2308 expr->is_signed = operand1->is_signed;
2309
2310 expr->operator = field_op;
2311 expr->name = expr_str(expr, 0);
2312 expr->type = kstrdup(operand1->type, GFP_KERNEL);
2313 if (!expr->type) {
2314 ret = -ENOMEM;
2315 goto free;
2316 }
2317
2318 switch (field_op) {
2319 case FIELD_OP_MINUS:
2320 expr->fn = hist_field_minus;
2321 break;
2322 case FIELD_OP_PLUS:
2323 expr->fn = hist_field_plus;
2324 break;
2325 default:
2326 ret = -EINVAL;
2327 goto free;
2328 }
2329
2330 return expr;
2331 free:
2332 destroy_hist_field(operand1, 0);
2333 destroy_hist_field(operand2, 0);
2334 destroy_hist_field(expr, 0);
2335
2336 return ERR_PTR(ret);
2337 }
2338
find_trigger_filter(struct hist_trigger_data * hist_data,struct trace_event_file * file)2339 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
2340 struct trace_event_file *file)
2341 {
2342 struct event_trigger_data *test;
2343
2344 lockdep_assert_held(&event_mutex);
2345
2346 list_for_each_entry(test, &file->triggers, list) {
2347 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2348 if (test->private_data == hist_data)
2349 return test->filter_str;
2350 }
2351 }
2352
2353 return NULL;
2354 }
2355
2356 static struct event_command trigger_hist_cmd;
2357 static int event_hist_trigger_func(struct event_command *cmd_ops,
2358 struct trace_event_file *file,
2359 char *glob, char *cmd, char *param);
2360
compatible_keys(struct hist_trigger_data * target_hist_data,struct hist_trigger_data * hist_data,unsigned int n_keys)2361 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
2362 struct hist_trigger_data *hist_data,
2363 unsigned int n_keys)
2364 {
2365 struct hist_field *target_hist_field, *hist_field;
2366 unsigned int n, i, j;
2367
2368 if (hist_data->n_fields - hist_data->n_vals != n_keys)
2369 return false;
2370
2371 i = hist_data->n_vals;
2372 j = target_hist_data->n_vals;
2373
2374 for (n = 0; n < n_keys; n++) {
2375 hist_field = hist_data->fields[i + n];
2376 target_hist_field = target_hist_data->fields[j + n];
2377
2378 if (strcmp(hist_field->type, target_hist_field->type) != 0)
2379 return false;
2380 if (hist_field->size != target_hist_field->size)
2381 return false;
2382 if (hist_field->is_signed != target_hist_field->is_signed)
2383 return false;
2384 }
2385
2386 return true;
2387 }
2388
2389 static struct hist_trigger_data *
find_compatible_hist(struct hist_trigger_data * target_hist_data,struct trace_event_file * file)2390 find_compatible_hist(struct hist_trigger_data *target_hist_data,
2391 struct trace_event_file *file)
2392 {
2393 struct hist_trigger_data *hist_data;
2394 struct event_trigger_data *test;
2395 unsigned int n_keys;
2396
2397 lockdep_assert_held(&event_mutex);
2398
2399 n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
2400
2401 list_for_each_entry(test, &file->triggers, list) {
2402 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2403 hist_data = test->private_data;
2404
2405 if (compatible_keys(target_hist_data, hist_data, n_keys))
2406 return hist_data;
2407 }
2408 }
2409
2410 return NULL;
2411 }
2412
event_file(struct trace_array * tr,char * system,char * event_name)2413 static struct trace_event_file *event_file(struct trace_array *tr,
2414 char *system, char *event_name)
2415 {
2416 struct trace_event_file *file;
2417
2418 file = __find_event_file(tr, system, event_name);
2419 if (!file)
2420 return ERR_PTR(-EINVAL);
2421
2422 return file;
2423 }
2424
2425 static struct hist_field *
find_synthetic_field_var(struct hist_trigger_data * target_hist_data,char * system,char * event_name,char * field_name)2426 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
2427 char *system, char *event_name, char *field_name)
2428 {
2429 struct hist_field *event_var;
2430 char *synthetic_name;
2431
2432 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2433 if (!synthetic_name)
2434 return ERR_PTR(-ENOMEM);
2435
2436 strcpy(synthetic_name, "synthetic_");
2437 strcat(synthetic_name, field_name);
2438
2439 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
2440
2441 kfree(synthetic_name);
2442
2443 return event_var;
2444 }
2445
2446 /**
2447 * create_field_var_hist - Automatically create a histogram and var for a field
2448 * @target_hist_data: The target hist trigger
2449 * @subsys_name: Optional subsystem name
2450 * @event_name: Optional event name
2451 * @field_name: The name of the field (and the resulting variable)
2452 *
2453 * Hist trigger actions fetch data from variables, not directly from
2454 * events. However, for convenience, users are allowed to directly
2455 * specify an event field in an action, which will be automatically
2456 * converted into a variable on their behalf.
2457
2458 * If a user specifies a field on an event that isn't the event the
2459 * histogram currently being defined (the target event histogram), the
2460 * only way that can be accomplished is if a new hist trigger is
2461 * created and the field variable defined on that.
2462 *
2463 * This function creates a new histogram compatible with the target
2464 * event (meaning a histogram with the same key as the target
2465 * histogram), and creates a variable for the specified field, but
2466 * with 'synthetic_' prepended to the variable name in order to avoid
2467 * collision with normal field variables.
2468 *
2469 * Return: The variable created for the field.
2470 */
2471 static struct hist_field *
create_field_var_hist(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * field_name)2472 create_field_var_hist(struct hist_trigger_data *target_hist_data,
2473 char *subsys_name, char *event_name, char *field_name)
2474 {
2475 struct trace_array *tr = target_hist_data->event_file->tr;
2476 struct hist_field *event_var = ERR_PTR(-EINVAL);
2477 struct hist_trigger_data *hist_data;
2478 unsigned int i, n, first = true;
2479 struct field_var_hist *var_hist;
2480 struct trace_event_file *file;
2481 struct hist_field *key_field;
2482 char *saved_filter;
2483 char *cmd;
2484 int ret;
2485
2486 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
2487 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
2488 return ERR_PTR(-EINVAL);
2489 }
2490
2491 file = event_file(tr, subsys_name, event_name);
2492
2493 if (IS_ERR(file)) {
2494 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
2495 ret = PTR_ERR(file);
2496 return ERR_PTR(ret);
2497 }
2498
2499 /*
2500 * Look for a histogram compatible with target. We'll use the
2501 * found histogram specification to create a new matching
2502 * histogram with our variable on it. target_hist_data is not
2503 * yet a registered histogram so we can't use that.
2504 */
2505 hist_data = find_compatible_hist(target_hist_data, file);
2506 if (!hist_data) {
2507 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
2508 return ERR_PTR(-EINVAL);
2509 }
2510
2511 /* See if a synthetic field variable has already been created */
2512 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
2513 event_name, field_name);
2514 if (!IS_ERR_OR_NULL(event_var))
2515 return event_var;
2516
2517 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
2518 if (!var_hist)
2519 return ERR_PTR(-ENOMEM);
2520
2521 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2522 if (!cmd) {
2523 kfree(var_hist);
2524 return ERR_PTR(-ENOMEM);
2525 }
2526
2527 /* Use the same keys as the compatible histogram */
2528 strcat(cmd, "keys=");
2529
2530 for_each_hist_key_field(i, hist_data) {
2531 key_field = hist_data->fields[i];
2532 if (!first)
2533 strcat(cmd, ",");
2534 strcat(cmd, key_field->field->name);
2535 first = false;
2536 }
2537
2538 /* Create the synthetic field variable specification */
2539 strcat(cmd, ":synthetic_");
2540 strcat(cmd, field_name);
2541 strcat(cmd, "=");
2542 strcat(cmd, field_name);
2543
2544 /* Use the same filter as the compatible histogram */
2545 saved_filter = find_trigger_filter(hist_data, file);
2546 if (saved_filter) {
2547 strcat(cmd, " if ");
2548 strcat(cmd, saved_filter);
2549 }
2550
2551 var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
2552 if (!var_hist->cmd) {
2553 kfree(cmd);
2554 kfree(var_hist);
2555 return ERR_PTR(-ENOMEM);
2556 }
2557
2558 /* Save the compatible histogram information */
2559 var_hist->hist_data = hist_data;
2560
2561 /* Create the new histogram with our variable */
2562 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
2563 "", "hist", cmd);
2564 if (ret) {
2565 kfree(cmd);
2566 kfree(var_hist->cmd);
2567 kfree(var_hist);
2568 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
2569 return ERR_PTR(ret);
2570 }
2571
2572 kfree(cmd);
2573
2574 /* If we can't find the variable, something went wrong */
2575 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
2576 event_name, field_name);
2577 if (IS_ERR_OR_NULL(event_var)) {
2578 kfree(var_hist->cmd);
2579 kfree(var_hist);
2580 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
2581 return ERR_PTR(-EINVAL);
2582 }
2583
2584 n = target_hist_data->n_field_var_hists;
2585 target_hist_data->field_var_hists[n] = var_hist;
2586 target_hist_data->n_field_var_hists++;
2587
2588 return event_var;
2589 }
2590
2591 static struct hist_field *
find_target_event_var(struct hist_trigger_data * hist_data,char * subsys_name,char * event_name,char * var_name)2592 find_target_event_var(struct hist_trigger_data *hist_data,
2593 char *subsys_name, char *event_name, char *var_name)
2594 {
2595 struct trace_event_file *file = hist_data->event_file;
2596 struct hist_field *hist_field = NULL;
2597
2598 if (subsys_name) {
2599 struct trace_event_call *call;
2600
2601 if (!event_name)
2602 return NULL;
2603
2604 call = file->event_call;
2605
2606 if (strcmp(subsys_name, call->class->system) != 0)
2607 return NULL;
2608
2609 if (strcmp(event_name, trace_event_name(call)) != 0)
2610 return NULL;
2611 }
2612
2613 hist_field = find_var_field(hist_data, var_name);
2614
2615 return hist_field;
2616 }
2617
__update_field_vars(struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * rec,struct field_var ** field_vars,unsigned int n_field_vars,unsigned int field_var_str_start)2618 static inline void __update_field_vars(struct tracing_map_elt *elt,
2619 struct ring_buffer_event *rbe,
2620 void *rec,
2621 struct field_var **field_vars,
2622 unsigned int n_field_vars,
2623 unsigned int field_var_str_start)
2624 {
2625 struct hist_elt_data *elt_data = elt->private_data;
2626 unsigned int i, j, var_idx;
2627 u64 var_val;
2628
2629 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
2630 struct field_var *field_var = field_vars[i];
2631 struct hist_field *var = field_var->var;
2632 struct hist_field *val = field_var->val;
2633
2634 var_val = val->fn(val, elt, rbe, rec);
2635 var_idx = var->var.idx;
2636
2637 if (val->flags & HIST_FIELD_FL_STRING) {
2638 char *str = elt_data->field_var_str[j++];
2639 char *val_str = (char *)(uintptr_t)var_val;
2640 unsigned int size;
2641
2642 size = min(val->size, STR_VAR_LEN_MAX);
2643 strscpy(str, val_str, size);
2644 var_val = (u64)(uintptr_t)str;
2645 }
2646 tracing_map_set_var(elt, var_idx, var_val);
2647 }
2648 }
2649
update_field_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct ring_buffer_event * rbe,void * rec)2650 static void update_field_vars(struct hist_trigger_data *hist_data,
2651 struct tracing_map_elt *elt,
2652 struct ring_buffer_event *rbe,
2653 void *rec)
2654 {
2655 __update_field_vars(elt, rbe, rec, hist_data->field_vars,
2656 hist_data->n_field_vars, 0);
2657 }
2658
save_track_data_vars(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2659 static void save_track_data_vars(struct hist_trigger_data *hist_data,
2660 struct tracing_map_elt *elt, void *rec,
2661 struct ring_buffer_event *rbe, void *key,
2662 struct action_data *data, u64 *var_ref_vals)
2663 {
2664 __update_field_vars(elt, rbe, rec, hist_data->save_vars,
2665 hist_data->n_save_vars, hist_data->n_field_var_str);
2666 }
2667
create_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * name,int size,const char * type)2668 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
2669 struct trace_event_file *file,
2670 char *name, int size, const char *type)
2671 {
2672 struct hist_field *var;
2673 int idx;
2674
2675 if (find_var(hist_data, file, name) && !hist_data->remove) {
2676 var = ERR_PTR(-EINVAL);
2677 goto out;
2678 }
2679
2680 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2681 if (!var) {
2682 var = ERR_PTR(-ENOMEM);
2683 goto out;
2684 }
2685
2686 idx = tracing_map_add_var(hist_data->map);
2687 if (idx < 0) {
2688 kfree(var);
2689 var = ERR_PTR(-EINVAL);
2690 goto out;
2691 }
2692
2693 var->ref = 1;
2694 var->flags = HIST_FIELD_FL_VAR;
2695 var->var.idx = idx;
2696 var->var.hist_data = var->hist_data = hist_data;
2697 var->size = size;
2698 var->var.name = kstrdup(name, GFP_KERNEL);
2699 var->type = kstrdup(type, GFP_KERNEL);
2700 if (!var->var.name || !var->type) {
2701 kfree(var->var.name);
2702 kfree(var->type);
2703 kfree(var);
2704 var = ERR_PTR(-ENOMEM);
2705 }
2706 out:
2707 return var;
2708 }
2709
create_field_var(struct hist_trigger_data * hist_data,struct trace_event_file * file,char * field_name)2710 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
2711 struct trace_event_file *file,
2712 char *field_name)
2713 {
2714 struct hist_field *val = NULL, *var = NULL;
2715 unsigned long flags = HIST_FIELD_FL_VAR;
2716 struct trace_array *tr = file->tr;
2717 struct field_var *field_var;
2718 int ret = 0;
2719
2720 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
2721 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
2722 ret = -EINVAL;
2723 goto err;
2724 }
2725
2726 val = parse_atom(hist_data, file, field_name, &flags, NULL);
2727 if (IS_ERR(val)) {
2728 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
2729 ret = PTR_ERR(val);
2730 goto err;
2731 }
2732
2733 var = create_var(hist_data, file, field_name, val->size, val->type);
2734 if (IS_ERR(var)) {
2735 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
2736 kfree(val);
2737 ret = PTR_ERR(var);
2738 goto err;
2739 }
2740
2741 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
2742 if (!field_var) {
2743 kfree(val);
2744 kfree(var);
2745 ret = -ENOMEM;
2746 goto err;
2747 }
2748
2749 field_var->var = var;
2750 field_var->val = val;
2751 out:
2752 return field_var;
2753 err:
2754 field_var = ERR_PTR(ret);
2755 goto out;
2756 }
2757
2758 /**
2759 * create_target_field_var - Automatically create a variable for a field
2760 * @target_hist_data: The target hist trigger
2761 * @subsys_name: Optional subsystem name
2762 * @event_name: Optional event name
2763 * @var_name: The name of the field (and the resulting variable)
2764 *
2765 * Hist trigger actions fetch data from variables, not directly from
2766 * events. However, for convenience, users are allowed to directly
2767 * specify an event field in an action, which will be automatically
2768 * converted into a variable on their behalf.
2769
2770 * This function creates a field variable with the name var_name on
2771 * the hist trigger currently being defined on the target event. If
2772 * subsys_name and event_name are specified, this function simply
2773 * verifies that they do in fact match the target event subsystem and
2774 * event name.
2775 *
2776 * Return: The variable created for the field.
2777 */
2778 static struct field_var *
create_target_field_var(struct hist_trigger_data * target_hist_data,char * subsys_name,char * event_name,char * var_name)2779 create_target_field_var(struct hist_trigger_data *target_hist_data,
2780 char *subsys_name, char *event_name, char *var_name)
2781 {
2782 struct trace_event_file *file = target_hist_data->event_file;
2783
2784 if (subsys_name) {
2785 struct trace_event_call *call;
2786
2787 if (!event_name)
2788 return NULL;
2789
2790 call = file->event_call;
2791
2792 if (strcmp(subsys_name, call->class->system) != 0)
2793 return NULL;
2794
2795 if (strcmp(event_name, trace_event_name(call)) != 0)
2796 return NULL;
2797 }
2798
2799 return create_field_var(target_hist_data, file, var_name);
2800 }
2801
check_track_val_max(u64 track_val,u64 var_val)2802 static bool check_track_val_max(u64 track_val, u64 var_val)
2803 {
2804 if (var_val <= track_val)
2805 return false;
2806
2807 return true;
2808 }
2809
check_track_val_changed(u64 track_val,u64 var_val)2810 static bool check_track_val_changed(u64 track_val, u64 var_val)
2811 {
2812 if (var_val == track_val)
2813 return false;
2814
2815 return true;
2816 }
2817
get_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)2818 static u64 get_track_val(struct hist_trigger_data *hist_data,
2819 struct tracing_map_elt *elt,
2820 struct action_data *data)
2821 {
2822 unsigned int track_var_idx = data->track_data.track_var->var.idx;
2823 u64 track_val;
2824
2825 track_val = tracing_map_read_var(elt, track_var_idx);
2826
2827 return track_val;
2828 }
2829
save_track_val(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data,u64 var_val)2830 static void save_track_val(struct hist_trigger_data *hist_data,
2831 struct tracing_map_elt *elt,
2832 struct action_data *data, u64 var_val)
2833 {
2834 unsigned int track_var_idx = data->track_data.track_var->var.idx;
2835
2836 tracing_map_set_var(elt, track_var_idx, var_val);
2837 }
2838
save_track_data(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2839 static void save_track_data(struct hist_trigger_data *hist_data,
2840 struct tracing_map_elt *elt, void *rec,
2841 struct ring_buffer_event *rbe, void *key,
2842 struct action_data *data, u64 *var_ref_vals)
2843 {
2844 if (data->track_data.save_data)
2845 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
2846 }
2847
check_track_val(struct tracing_map_elt * elt,struct action_data * data,u64 var_val)2848 static bool check_track_val(struct tracing_map_elt *elt,
2849 struct action_data *data,
2850 u64 var_val)
2851 {
2852 struct hist_trigger_data *hist_data;
2853 u64 track_val;
2854
2855 hist_data = data->track_data.track_var->hist_data;
2856 track_val = get_track_val(hist_data, elt, data);
2857
2858 return data->track_data.check_val(track_val, var_val);
2859 }
2860
2861 #ifdef CONFIG_TRACER_SNAPSHOT
cond_snapshot_update(struct trace_array * tr,void * cond_data)2862 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
2863 {
2864 /* called with tr->max_lock held */
2865 struct track_data *track_data = tr->cond_snapshot->cond_data;
2866 struct hist_elt_data *elt_data, *track_elt_data;
2867 struct snapshot_context *context = cond_data;
2868 struct action_data *action;
2869 u64 track_val;
2870
2871 if (!track_data)
2872 return false;
2873
2874 action = track_data->action_data;
2875
2876 track_val = get_track_val(track_data->hist_data, context->elt,
2877 track_data->action_data);
2878
2879 if (!action->track_data.check_val(track_data->track_val, track_val))
2880 return false;
2881
2882 track_data->track_val = track_val;
2883 memcpy(track_data->key, context->key, track_data->key_len);
2884
2885 elt_data = context->elt->private_data;
2886 track_elt_data = track_data->elt.private_data;
2887 if (elt_data->comm)
2888 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
2889
2890 track_data->updated = true;
2891
2892 return true;
2893 }
2894
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2895 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
2896 struct tracing_map_elt *elt, void *rec,
2897 struct ring_buffer_event *rbe, void *key,
2898 struct action_data *data,
2899 u64 *var_ref_vals)
2900 {
2901 struct trace_event_file *file = hist_data->event_file;
2902 struct snapshot_context context;
2903
2904 context.elt = elt;
2905 context.key = key;
2906
2907 tracing_snapshot_cond(file->tr, &context);
2908 }
2909
2910 static void hist_trigger_print_key(struct seq_file *m,
2911 struct hist_trigger_data *hist_data,
2912 void *key,
2913 struct tracing_map_elt *elt);
2914
snapshot_action(struct hist_trigger_data * hist_data)2915 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
2916 {
2917 unsigned int i;
2918
2919 if (!hist_data->n_actions)
2920 return NULL;
2921
2922 for (i = 0; i < hist_data->n_actions; i++) {
2923 struct action_data *data = hist_data->actions[i];
2924
2925 if (data->action == ACTION_SNAPSHOT)
2926 return data;
2927 }
2928
2929 return NULL;
2930 }
2931
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)2932 static void track_data_snapshot_print(struct seq_file *m,
2933 struct hist_trigger_data *hist_data)
2934 {
2935 struct trace_event_file *file = hist_data->event_file;
2936 struct track_data *track_data;
2937 struct action_data *action;
2938
2939 track_data = tracing_cond_snapshot_data(file->tr);
2940 if (!track_data)
2941 return;
2942
2943 if (!track_data->updated)
2944 return;
2945
2946 action = snapshot_action(hist_data);
2947 if (!action)
2948 return;
2949
2950 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n");
2951 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
2952 action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
2953 action->track_data.var_str, track_data->track_val);
2954
2955 seq_puts(m, "\ttriggered by event with key: ");
2956 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
2957 seq_putc(m, '\n');
2958 }
2959 #else
cond_snapshot_update(struct trace_array * tr,void * cond_data)2960 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
2961 {
2962 return false;
2963 }
save_track_data_snapshot(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)2964 static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
2965 struct tracing_map_elt *elt, void *rec,
2966 struct ring_buffer_event *rbe, void *key,
2967 struct action_data *data,
2968 u64 *var_ref_vals) {}
track_data_snapshot_print(struct seq_file * m,struct hist_trigger_data * hist_data)2969 static void track_data_snapshot_print(struct seq_file *m,
2970 struct hist_trigger_data *hist_data) {}
2971 #endif /* CONFIG_TRACER_SNAPSHOT */
2972
track_data_print(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,struct action_data * data)2973 static void track_data_print(struct seq_file *m,
2974 struct hist_trigger_data *hist_data,
2975 struct tracing_map_elt *elt,
2976 struct action_data *data)
2977 {
2978 u64 track_val = get_track_val(hist_data, elt, data);
2979 unsigned int i, save_var_idx;
2980
2981 if (data->handler == HANDLER_ONMAX)
2982 seq_printf(m, "\n\tmax: %10llu", track_val);
2983 else if (data->handler == HANDLER_ONCHANGE)
2984 seq_printf(m, "\n\tchanged: %10llu", track_val);
2985
2986 if (data->action == ACTION_SNAPSHOT)
2987 return;
2988
2989 for (i = 0; i < hist_data->n_save_vars; i++) {
2990 struct hist_field *save_val = hist_data->save_vars[i]->val;
2991 struct hist_field *save_var = hist_data->save_vars[i]->var;
2992 u64 val;
2993
2994 save_var_idx = save_var->var.idx;
2995
2996 val = tracing_map_read_var(elt, save_var_idx);
2997
2998 if (save_val->flags & HIST_FIELD_FL_STRING) {
2999 seq_printf(m, " %s: %-32s", save_var->var.name,
3000 (char *)(uintptr_t)(val));
3001 } else
3002 seq_printf(m, " %s: %10llu", save_var->var.name, val);
3003 }
3004 }
3005
ontrack_action(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,struct action_data * data,u64 * var_ref_vals)3006 static void ontrack_action(struct hist_trigger_data *hist_data,
3007 struct tracing_map_elt *elt, void *rec,
3008 struct ring_buffer_event *rbe, void *key,
3009 struct action_data *data, u64 *var_ref_vals)
3010 {
3011 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
3012
3013 if (check_track_val(elt, data, var_val)) {
3014 save_track_val(hist_data, elt, data, var_val);
3015 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
3016 }
3017 }
3018
action_data_destroy(struct action_data * data)3019 static void action_data_destroy(struct action_data *data)
3020 {
3021 unsigned int i;
3022
3023 lockdep_assert_held(&event_mutex);
3024
3025 kfree(data->action_name);
3026
3027 for (i = 0; i < data->n_params; i++)
3028 kfree(data->params[i]);
3029
3030 if (data->synth_event)
3031 data->synth_event->ref--;
3032
3033 kfree(data->synth_event_name);
3034
3035 kfree(data);
3036 }
3037
track_data_destroy(struct hist_trigger_data * hist_data,struct action_data * data)3038 static void track_data_destroy(struct hist_trigger_data *hist_data,
3039 struct action_data *data)
3040 {
3041 struct trace_event_file *file = hist_data->event_file;
3042
3043 destroy_hist_field(data->track_data.track_var, 0);
3044
3045 if (data->action == ACTION_SNAPSHOT) {
3046 struct track_data *track_data;
3047
3048 track_data = tracing_cond_snapshot_data(file->tr);
3049 if (track_data && track_data->hist_data == hist_data) {
3050 tracing_snapshot_cond_disable(file->tr);
3051 track_data_free(track_data);
3052 }
3053 }
3054
3055 kfree(data->track_data.var_str);
3056
3057 action_data_destroy(data);
3058 }
3059
3060 static int action_create(struct hist_trigger_data *hist_data,
3061 struct action_data *data);
3062
track_data_create(struct hist_trigger_data * hist_data,struct action_data * data)3063 static int track_data_create(struct hist_trigger_data *hist_data,
3064 struct action_data *data)
3065 {
3066 struct hist_field *var_field, *ref_field, *track_var = NULL;
3067 struct trace_event_file *file = hist_data->event_file;
3068 struct trace_array *tr = file->tr;
3069 char *track_data_var_str;
3070 int ret = 0;
3071
3072 track_data_var_str = data->track_data.var_str;
3073 if (track_data_var_str[0] != '$') {
3074 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3075 return -EINVAL;
3076 }
3077 track_data_var_str++;
3078
3079 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3080 if (!var_field) {
3081 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3082 return -EINVAL;
3083 }
3084
3085 ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
3086 if (!ref_field)
3087 return -ENOMEM;
3088
3089 data->track_data.var_ref = ref_field;
3090
3091 if (data->handler == HANDLER_ONMAX)
3092 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3093 if (IS_ERR(track_var)) {
3094 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3095 ret = PTR_ERR(track_var);
3096 goto out;
3097 }
3098
3099 if (data->handler == HANDLER_ONCHANGE)
3100 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3101 if (IS_ERR(track_var)) {
3102 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3103 ret = PTR_ERR(track_var);
3104 goto out;
3105 }
3106 data->track_data.track_var = track_var;
3107
3108 ret = action_create(hist_data, data);
3109 out:
3110 return ret;
3111 }
3112
parse_action_params(struct trace_array * tr,char * params,struct action_data * data)3113 static int parse_action_params(struct trace_array *tr, char *params,
3114 struct action_data *data)
3115 {
3116 char *param, *saved_param;
3117 bool first_param = true;
3118 int ret = 0;
3119
3120 while (params) {
3121 if (data->n_params >= SYNTH_FIELDS_MAX) {
3122 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3123 ret = -EINVAL;
3124 goto out;
3125 }
3126
3127 param = strsep(¶ms, ",");
3128 if (!param) {
3129 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3130 ret = -EINVAL;
3131 goto out;
3132 }
3133
3134 param = strstrip(param);
3135 if (strlen(param) < 2) {
3136 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3137 ret = -EINVAL;
3138 goto out;
3139 }
3140
3141 saved_param = kstrdup(param, GFP_KERNEL);
3142 if (!saved_param) {
3143 ret = -ENOMEM;
3144 goto out;
3145 }
3146
3147 if (first_param && data->use_trace_keyword) {
3148 data->synth_event_name = saved_param;
3149 first_param = false;
3150 continue;
3151 }
3152 first_param = false;
3153
3154 data->params[data->n_params++] = saved_param;
3155 }
3156 out:
3157 return ret;
3158 }
3159
action_parse(struct trace_array * tr,char * str,struct action_data * data,enum handler_id handler)3160 static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3161 enum handler_id handler)
3162 {
3163 char *action_name;
3164 int ret = 0;
3165
3166 strsep(&str, ".");
3167 if (!str) {
3168 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3169 ret = -EINVAL;
3170 goto out;
3171 }
3172
3173 action_name = strsep(&str, "(");
3174 if (!action_name || !str) {
3175 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3176 ret = -EINVAL;
3177 goto out;
3178 }
3179
3180 if (str_has_prefix(action_name, "save")) {
3181 char *params = strsep(&str, ")");
3182
3183 if (!params) {
3184 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3185 ret = -EINVAL;
3186 goto out;
3187 }
3188
3189 ret = parse_action_params(tr, params, data);
3190 if (ret)
3191 goto out;
3192
3193 if (handler == HANDLER_ONMAX)
3194 data->track_data.check_val = check_track_val_max;
3195 else if (handler == HANDLER_ONCHANGE)
3196 data->track_data.check_val = check_track_val_changed;
3197 else {
3198 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3199 ret = -EINVAL;
3200 goto out;
3201 }
3202
3203 data->track_data.save_data = save_track_data_vars;
3204 data->fn = ontrack_action;
3205 data->action = ACTION_SAVE;
3206 } else if (str_has_prefix(action_name, "snapshot")) {
3207 char *params = strsep(&str, ")");
3208
3209 if (!str) {
3210 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3211 ret = -EINVAL;
3212 goto out;
3213 }
3214
3215 if (handler == HANDLER_ONMAX)
3216 data->track_data.check_val = check_track_val_max;
3217 else if (handler == HANDLER_ONCHANGE)
3218 data->track_data.check_val = check_track_val_changed;
3219 else {
3220 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3221 ret = -EINVAL;
3222 goto out;
3223 }
3224
3225 data->track_data.save_data = save_track_data_snapshot;
3226 data->fn = ontrack_action;
3227 data->action = ACTION_SNAPSHOT;
3228 } else {
3229 char *params = strsep(&str, ")");
3230
3231 if (str_has_prefix(action_name, "trace"))
3232 data->use_trace_keyword = true;
3233
3234 if (params) {
3235 ret = parse_action_params(tr, params, data);
3236 if (ret)
3237 goto out;
3238 }
3239
3240 if (handler == HANDLER_ONMAX)
3241 data->track_data.check_val = check_track_val_max;
3242 else if (handler == HANDLER_ONCHANGE)
3243 data->track_data.check_val = check_track_val_changed;
3244
3245 if (handler != HANDLER_ONMATCH) {
3246 data->track_data.save_data = action_trace;
3247 data->fn = ontrack_action;
3248 } else
3249 data->fn = action_trace;
3250
3251 data->action = ACTION_TRACE;
3252 }
3253
3254 data->action_name = kstrdup(action_name, GFP_KERNEL);
3255 if (!data->action_name) {
3256 ret = -ENOMEM;
3257 goto out;
3258 }
3259
3260 data->handler = handler;
3261 out:
3262 return ret;
3263 }
3264
track_data_parse(struct hist_trigger_data * hist_data,char * str,enum handler_id handler)3265 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
3266 char *str, enum handler_id handler)
3267 {
3268 struct action_data *data;
3269 int ret = -EINVAL;
3270 char *var_str;
3271
3272 data = kzalloc(sizeof(*data), GFP_KERNEL);
3273 if (!data)
3274 return ERR_PTR(-ENOMEM);
3275
3276 var_str = strsep(&str, ")");
3277 if (!var_str || !str) {
3278 ret = -EINVAL;
3279 goto free;
3280 }
3281
3282 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
3283 if (!data->track_data.var_str) {
3284 ret = -ENOMEM;
3285 goto free;
3286 }
3287
3288 ret = action_parse(hist_data->event_file->tr, str, data, handler);
3289 if (ret)
3290 goto free;
3291 out:
3292 return data;
3293 free:
3294 track_data_destroy(hist_data, data);
3295 data = ERR_PTR(ret);
3296 goto out;
3297 }
3298
onmatch_destroy(struct action_data * data)3299 static void onmatch_destroy(struct action_data *data)
3300 {
3301 kfree(data->match_data.event);
3302 kfree(data->match_data.event_system);
3303
3304 action_data_destroy(data);
3305 }
3306
destroy_field_var(struct field_var * field_var)3307 static void destroy_field_var(struct field_var *field_var)
3308 {
3309 if (!field_var)
3310 return;
3311
3312 destroy_hist_field(field_var->var, 0);
3313 destroy_hist_field(field_var->val, 0);
3314
3315 kfree(field_var);
3316 }
3317
destroy_field_vars(struct hist_trigger_data * hist_data)3318 static void destroy_field_vars(struct hist_trigger_data *hist_data)
3319 {
3320 unsigned int i;
3321
3322 for (i = 0; i < hist_data->n_field_vars; i++)
3323 destroy_field_var(hist_data->field_vars[i]);
3324
3325 for (i = 0; i < hist_data->n_save_vars; i++)
3326 destroy_field_var(hist_data->save_vars[i]);
3327 }
3328
save_field_var(struct hist_trigger_data * hist_data,struct field_var * field_var)3329 static void save_field_var(struct hist_trigger_data *hist_data,
3330 struct field_var *field_var)
3331 {
3332 hist_data->field_vars[hist_data->n_field_vars++] = field_var;
3333
3334 if (field_var->val->flags & HIST_FIELD_FL_STRING)
3335 hist_data->n_field_var_str++;
3336 }
3337
3338
check_synth_field(struct synth_event * event,struct hist_field * hist_field,unsigned int field_pos)3339 static int check_synth_field(struct synth_event *event,
3340 struct hist_field *hist_field,
3341 unsigned int field_pos)
3342 {
3343 struct synth_field *field;
3344
3345 if (field_pos >= event->n_fields)
3346 return -EINVAL;
3347
3348 field = event->fields[field_pos];
3349
3350 /*
3351 * A dynamic string synth field can accept static or
3352 * dynamic. A static string synth field can only accept a
3353 * same-sized static string, which is checked for later.
3354 */
3355 if (strstr(hist_field->type, "char[") && field->is_string
3356 && field->is_dynamic)
3357 return 0;
3358
3359 if (strcmp(field->type, hist_field->type) != 0) {
3360 if (field->size != hist_field->size ||
3361 (!field->is_string && field->is_signed != hist_field->is_signed))
3362 return -EINVAL;
3363 }
3364
3365 return 0;
3366 }
3367
3368 static struct hist_field *
trace_action_find_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)3369 trace_action_find_var(struct hist_trigger_data *hist_data,
3370 struct action_data *data,
3371 char *system, char *event, char *var)
3372 {
3373 struct trace_array *tr = hist_data->event_file->tr;
3374 struct hist_field *hist_field;
3375
3376 var++; /* skip '$' */
3377
3378 hist_field = find_target_event_var(hist_data, system, event, var);
3379 if (!hist_field) {
3380 if (!system && data->handler == HANDLER_ONMATCH) {
3381 system = data->match_data.event_system;
3382 event = data->match_data.event;
3383 }
3384
3385 hist_field = find_event_var(hist_data, system, event, var);
3386 }
3387
3388 if (!hist_field)
3389 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
3390
3391 return hist_field;
3392 }
3393
3394 static struct hist_field *
trace_action_create_field_var(struct hist_trigger_data * hist_data,struct action_data * data,char * system,char * event,char * var)3395 trace_action_create_field_var(struct hist_trigger_data *hist_data,
3396 struct action_data *data, char *system,
3397 char *event, char *var)
3398 {
3399 struct hist_field *hist_field = NULL;
3400 struct field_var *field_var;
3401
3402 /*
3403 * First try to create a field var on the target event (the
3404 * currently being defined). This will create a variable for
3405 * unqualified fields on the target event, or if qualified,
3406 * target fields that have qualified names matching the target.
3407 */
3408 field_var = create_target_field_var(hist_data, system, event, var);
3409
3410 if (field_var && !IS_ERR(field_var)) {
3411 save_field_var(hist_data, field_var);
3412 hist_field = field_var->var;
3413 } else {
3414 field_var = NULL;
3415 /*
3416 * If no explicit system.event is specfied, default to
3417 * looking for fields on the onmatch(system.event.xxx)
3418 * event.
3419 */
3420 if (!system && data->handler == HANDLER_ONMATCH) {
3421 system = data->match_data.event_system;
3422 event = data->match_data.event;
3423 }
3424
3425 if (!event)
3426 goto free;
3427 /*
3428 * At this point, we're looking at a field on another
3429 * event. Because we can't modify a hist trigger on
3430 * another event to add a variable for a field, we need
3431 * to create a new trigger on that event and create the
3432 * variable at the same time.
3433 */
3434 hist_field = create_field_var_hist(hist_data, system, event, var);
3435 if (IS_ERR(hist_field))
3436 goto free;
3437 }
3438 out:
3439 return hist_field;
3440 free:
3441 destroy_field_var(field_var);
3442 hist_field = NULL;
3443 goto out;
3444 }
3445
trace_action_create(struct hist_trigger_data * hist_data,struct action_data * data)3446 static int trace_action_create(struct hist_trigger_data *hist_data,
3447 struct action_data *data)
3448 {
3449 struct trace_array *tr = hist_data->event_file->tr;
3450 char *event_name, *param, *system = NULL;
3451 struct hist_field *hist_field, *var_ref;
3452 unsigned int i;
3453 unsigned int field_pos = 0;
3454 struct synth_event *event;
3455 char *synth_event_name;
3456 int var_ref_idx, ret = 0;
3457
3458 lockdep_assert_held(&event_mutex);
3459
3460 /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
3461 if (data->n_params > SYNTH_FIELDS_MAX)
3462 return -EINVAL;
3463
3464 if (data->use_trace_keyword)
3465 synth_event_name = data->synth_event_name;
3466 else
3467 synth_event_name = data->action_name;
3468
3469 event = find_synth_event(synth_event_name);
3470 if (!event) {
3471 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
3472 return -EINVAL;
3473 }
3474
3475 event->ref++;
3476
3477 for (i = 0; i < data->n_params; i++) {
3478 char *p;
3479
3480 p = param = kstrdup(data->params[i], GFP_KERNEL);
3481 if (!param) {
3482 ret = -ENOMEM;
3483 goto err;
3484 }
3485
3486 system = strsep(¶m, ".");
3487 if (!param) {
3488 param = (char *)system;
3489 system = event_name = NULL;
3490 } else {
3491 event_name = strsep(¶m, ".");
3492 if (!param) {
3493 kfree(p);
3494 ret = -EINVAL;
3495 goto err;
3496 }
3497 }
3498
3499 if (param[0] == '$')
3500 hist_field = trace_action_find_var(hist_data, data,
3501 system, event_name,
3502 param);
3503 else
3504 hist_field = trace_action_create_field_var(hist_data,
3505 data,
3506 system,
3507 event_name,
3508 param);
3509
3510 if (!hist_field) {
3511 kfree(p);
3512 ret = -EINVAL;
3513 goto err;
3514 }
3515
3516 if (check_synth_field(event, hist_field, field_pos) == 0) {
3517 var_ref = create_var_ref(hist_data, hist_field,
3518 system, event_name);
3519 if (!var_ref) {
3520 kfree(p);
3521 ret = -ENOMEM;
3522 goto err;
3523 }
3524
3525 var_ref_idx = find_var_ref_idx(hist_data, var_ref);
3526 if (WARN_ON(var_ref_idx < 0)) {
3527 kfree(p);
3528 ret = var_ref_idx;
3529 goto err;
3530 }
3531
3532 data->var_ref_idx[i] = var_ref_idx;
3533
3534 field_pos++;
3535 kfree(p);
3536 continue;
3537 }
3538
3539 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
3540 kfree(p);
3541 ret = -EINVAL;
3542 goto err;
3543 }
3544
3545 if (field_pos != event->n_fields) {
3546 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
3547 ret = -EINVAL;
3548 goto err;
3549 }
3550
3551 data->synth_event = event;
3552 out:
3553 return ret;
3554 err:
3555 event->ref--;
3556
3557 goto out;
3558 }
3559
action_create(struct hist_trigger_data * hist_data,struct action_data * data)3560 static int action_create(struct hist_trigger_data *hist_data,
3561 struct action_data *data)
3562 {
3563 struct trace_event_file *file = hist_data->event_file;
3564 struct trace_array *tr = file->tr;
3565 struct track_data *track_data;
3566 struct field_var *field_var;
3567 unsigned int i;
3568 char *param;
3569 int ret = 0;
3570
3571 if (data->action == ACTION_TRACE)
3572 return trace_action_create(hist_data, data);
3573
3574 if (data->action == ACTION_SNAPSHOT) {
3575 track_data = track_data_alloc(hist_data->key_size, data, hist_data);
3576 if (IS_ERR(track_data)) {
3577 ret = PTR_ERR(track_data);
3578 goto out;
3579 }
3580
3581 ret = tracing_snapshot_cond_enable(file->tr, track_data,
3582 cond_snapshot_update);
3583 if (ret)
3584 track_data_free(track_data);
3585
3586 goto out;
3587 }
3588
3589 if (data->action == ACTION_SAVE) {
3590 if (hist_data->n_save_vars) {
3591 ret = -EEXIST;
3592 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
3593 goto out;
3594 }
3595
3596 for (i = 0; i < data->n_params; i++) {
3597 param = kstrdup(data->params[i], GFP_KERNEL);
3598 if (!param) {
3599 ret = -ENOMEM;
3600 goto out;
3601 }
3602
3603 field_var = create_target_field_var(hist_data, NULL, NULL, param);
3604 if (IS_ERR(field_var)) {
3605 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
3606 errpos(param));
3607 ret = PTR_ERR(field_var);
3608 kfree(param);
3609 goto out;
3610 }
3611
3612 hist_data->save_vars[hist_data->n_save_vars++] = field_var;
3613 if (field_var->val->flags & HIST_FIELD_FL_STRING)
3614 hist_data->n_save_var_str++;
3615 kfree(param);
3616 }
3617 }
3618 out:
3619 return ret;
3620 }
3621
onmatch_create(struct hist_trigger_data * hist_data,struct action_data * data)3622 static int onmatch_create(struct hist_trigger_data *hist_data,
3623 struct action_data *data)
3624 {
3625 return action_create(hist_data, data);
3626 }
3627
onmatch_parse(struct trace_array * tr,char * str)3628 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
3629 {
3630 char *match_event, *match_event_system;
3631 struct action_data *data;
3632 int ret = -EINVAL;
3633
3634 data = kzalloc(sizeof(*data), GFP_KERNEL);
3635 if (!data)
3636 return ERR_PTR(-ENOMEM);
3637
3638 match_event = strsep(&str, ")");
3639 if (!match_event || !str) {
3640 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
3641 goto free;
3642 }
3643
3644 match_event_system = strsep(&match_event, ".");
3645 if (!match_event) {
3646 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
3647 goto free;
3648 }
3649
3650 if (IS_ERR(event_file(tr, match_event_system, match_event))) {
3651 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
3652 goto free;
3653 }
3654
3655 data->match_data.event = kstrdup(match_event, GFP_KERNEL);
3656 if (!data->match_data.event) {
3657 ret = -ENOMEM;
3658 goto free;
3659 }
3660
3661 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
3662 if (!data->match_data.event_system) {
3663 ret = -ENOMEM;
3664 goto free;
3665 }
3666
3667 ret = action_parse(tr, str, data, HANDLER_ONMATCH);
3668 if (ret)
3669 goto free;
3670 out:
3671 return data;
3672 free:
3673 onmatch_destroy(data);
3674 data = ERR_PTR(ret);
3675 goto out;
3676 }
3677
create_hitcount_val(struct hist_trigger_data * hist_data)3678 static int create_hitcount_val(struct hist_trigger_data *hist_data)
3679 {
3680 hist_data->fields[HITCOUNT_IDX] =
3681 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
3682 if (!hist_data->fields[HITCOUNT_IDX])
3683 return -ENOMEM;
3684
3685 hist_data->n_vals++;
3686 hist_data->n_fields++;
3687
3688 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
3689 return -EINVAL;
3690
3691 return 0;
3692 }
3693
__create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * field_str,unsigned long flags)3694 static int __create_val_field(struct hist_trigger_data *hist_data,
3695 unsigned int val_idx,
3696 struct trace_event_file *file,
3697 char *var_name, char *field_str,
3698 unsigned long flags)
3699 {
3700 struct hist_field *hist_field;
3701 int ret = 0;
3702
3703 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
3704 if (IS_ERR(hist_field)) {
3705 ret = PTR_ERR(hist_field);
3706 goto out;
3707 }
3708
3709 hist_data->fields[val_idx] = hist_field;
3710
3711 ++hist_data->n_vals;
3712 ++hist_data->n_fields;
3713
3714 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3715 ret = -EINVAL;
3716 out:
3717 return ret;
3718 }
3719
create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * field_str)3720 static int create_val_field(struct hist_trigger_data *hist_data,
3721 unsigned int val_idx,
3722 struct trace_event_file *file,
3723 char *field_str)
3724 {
3725 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
3726 return -EINVAL;
3727
3728 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
3729 }
3730
create_var_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * var_name,char * expr_str)3731 static int create_var_field(struct hist_trigger_data *hist_data,
3732 unsigned int val_idx,
3733 struct trace_event_file *file,
3734 char *var_name, char *expr_str)
3735 {
3736 struct trace_array *tr = hist_data->event_file->tr;
3737 unsigned long flags = 0;
3738 int ret;
3739
3740 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3741 return -EINVAL;
3742
3743 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
3744 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
3745 return -EINVAL;
3746 }
3747
3748 flags |= HIST_FIELD_FL_VAR;
3749 hist_data->n_vars++;
3750 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
3751 return -EINVAL;
3752
3753 ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
3754
3755 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_STRING)
3756 hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++;
3757
3758 return ret;
3759 }
3760
create_val_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3761 static int create_val_fields(struct hist_trigger_data *hist_data,
3762 struct trace_event_file *file)
3763 {
3764 char *fields_str, *field_str;
3765 unsigned int i, j = 1;
3766 int ret;
3767
3768 ret = create_hitcount_val(hist_data);
3769 if (ret)
3770 goto out;
3771
3772 fields_str = hist_data->attrs->vals_str;
3773 if (!fields_str)
3774 goto out;
3775
3776 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
3777 j < TRACING_MAP_VALS_MAX; i++) {
3778 field_str = strsep(&fields_str, ",");
3779 if (!field_str)
3780 break;
3781
3782 if (strcmp(field_str, "hitcount") == 0)
3783 continue;
3784
3785 ret = create_val_field(hist_data, j++, file, field_str);
3786 if (ret)
3787 goto out;
3788 }
3789
3790 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
3791 ret = -EINVAL;
3792 out:
3793 return ret;
3794 }
3795
create_key_field(struct hist_trigger_data * hist_data,unsigned int key_idx,unsigned int key_offset,struct trace_event_file * file,char * field_str)3796 static int create_key_field(struct hist_trigger_data *hist_data,
3797 unsigned int key_idx,
3798 unsigned int key_offset,
3799 struct trace_event_file *file,
3800 char *field_str)
3801 {
3802 struct trace_array *tr = hist_data->event_file->tr;
3803 struct hist_field *hist_field = NULL;
3804 unsigned long flags = 0;
3805 unsigned int key_size;
3806 int ret = 0;
3807
3808 if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
3809 return -EINVAL;
3810
3811 flags |= HIST_FIELD_FL_KEY;
3812
3813 if (strcmp(field_str, "stacktrace") == 0) {
3814 flags |= HIST_FIELD_FL_STACKTRACE;
3815 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
3816 hist_field = create_hist_field(hist_data, NULL, flags, NULL);
3817 } else {
3818 hist_field = parse_expr(hist_data, file, field_str, flags,
3819 NULL, 0);
3820 if (IS_ERR(hist_field)) {
3821 ret = PTR_ERR(hist_field);
3822 goto out;
3823 }
3824
3825 if (field_has_hist_vars(hist_field, 0)) {
3826 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
3827 destroy_hist_field(hist_field, 0);
3828 ret = -EINVAL;
3829 goto out;
3830 }
3831
3832 key_size = hist_field->size;
3833 }
3834
3835 hist_data->fields[key_idx] = hist_field;
3836
3837 key_size = ALIGN(key_size, sizeof(u64));
3838 hist_data->fields[key_idx]->size = key_size;
3839 hist_data->fields[key_idx]->offset = key_offset;
3840
3841 hist_data->key_size += key_size;
3842
3843 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
3844 ret = -EINVAL;
3845 goto out;
3846 }
3847
3848 hist_data->n_keys++;
3849 hist_data->n_fields++;
3850
3851 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
3852 return -EINVAL;
3853
3854 ret = key_size;
3855 out:
3856 return ret;
3857 }
3858
create_key_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3859 static int create_key_fields(struct hist_trigger_data *hist_data,
3860 struct trace_event_file *file)
3861 {
3862 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
3863 char *fields_str, *field_str;
3864 int ret = -EINVAL;
3865
3866 fields_str = hist_data->attrs->keys_str;
3867 if (!fields_str)
3868 goto out;
3869
3870 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
3871 field_str = strsep(&fields_str, ",");
3872 if (!field_str)
3873 break;
3874 ret = create_key_field(hist_data, i, key_offset,
3875 file, field_str);
3876 if (ret < 0)
3877 goto out;
3878 key_offset += ret;
3879 }
3880 if (fields_str) {
3881 ret = -EINVAL;
3882 goto out;
3883 }
3884 ret = 0;
3885 out:
3886 return ret;
3887 }
3888
create_var_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3889 static int create_var_fields(struct hist_trigger_data *hist_data,
3890 struct trace_event_file *file)
3891 {
3892 unsigned int i, j = hist_data->n_vals;
3893 int ret = 0;
3894
3895 unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
3896
3897 for (i = 0; i < n_vars; i++) {
3898 char *var_name = hist_data->attrs->var_defs.name[i];
3899 char *expr = hist_data->attrs->var_defs.expr[i];
3900
3901 ret = create_var_field(hist_data, j++, file, var_name, expr);
3902 if (ret)
3903 goto out;
3904 }
3905 out:
3906 return ret;
3907 }
3908
free_var_defs(struct hist_trigger_data * hist_data)3909 static void free_var_defs(struct hist_trigger_data *hist_data)
3910 {
3911 unsigned int i;
3912
3913 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
3914 kfree(hist_data->attrs->var_defs.name[i]);
3915 kfree(hist_data->attrs->var_defs.expr[i]);
3916 }
3917
3918 hist_data->attrs->var_defs.n_vars = 0;
3919 }
3920
parse_var_defs(struct hist_trigger_data * hist_data)3921 static int parse_var_defs(struct hist_trigger_data *hist_data)
3922 {
3923 struct trace_array *tr = hist_data->event_file->tr;
3924 char *s, *str, *var_name, *field_str;
3925 unsigned int i, j, n_vars = 0;
3926 int ret = 0;
3927
3928 for (i = 0; i < hist_data->attrs->n_assignments; i++) {
3929 str = hist_data->attrs->assignment_str[i];
3930 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
3931 field_str = strsep(&str, ",");
3932 if (!field_str)
3933 break;
3934
3935 var_name = strsep(&field_str, "=");
3936 if (!var_name || !field_str) {
3937 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
3938 errpos(var_name));
3939 ret = -EINVAL;
3940 goto free;
3941 }
3942
3943 if (n_vars == TRACING_MAP_VARS_MAX) {
3944 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
3945 ret = -EINVAL;
3946 goto free;
3947 }
3948
3949 s = kstrdup(var_name, GFP_KERNEL);
3950 if (!s) {
3951 ret = -ENOMEM;
3952 goto free;
3953 }
3954 hist_data->attrs->var_defs.name[n_vars] = s;
3955
3956 s = kstrdup(field_str, GFP_KERNEL);
3957 if (!s) {
3958 kfree(hist_data->attrs->var_defs.name[n_vars]);
3959 hist_data->attrs->var_defs.name[n_vars] = NULL;
3960 ret = -ENOMEM;
3961 goto free;
3962 }
3963 hist_data->attrs->var_defs.expr[n_vars++] = s;
3964
3965 hist_data->attrs->var_defs.n_vars = n_vars;
3966 }
3967 }
3968
3969 return ret;
3970 free:
3971 free_var_defs(hist_data);
3972
3973 return ret;
3974 }
3975
create_hist_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)3976 static int create_hist_fields(struct hist_trigger_data *hist_data,
3977 struct trace_event_file *file)
3978 {
3979 int ret;
3980
3981 ret = parse_var_defs(hist_data);
3982 if (ret)
3983 goto out;
3984
3985 ret = create_val_fields(hist_data, file);
3986 if (ret)
3987 goto out;
3988
3989 ret = create_var_fields(hist_data, file);
3990 if (ret)
3991 goto out;
3992
3993 ret = create_key_fields(hist_data, file);
3994 if (ret)
3995 goto out;
3996 out:
3997 free_var_defs(hist_data);
3998
3999 return ret;
4000 }
4001
is_descending(struct trace_array * tr,const char * str)4002 static int is_descending(struct trace_array *tr, const char *str)
4003 {
4004 if (!str)
4005 return 0;
4006
4007 if (strcmp(str, "descending") == 0)
4008 return 1;
4009
4010 if (strcmp(str, "ascending") == 0)
4011 return 0;
4012
4013 hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
4014
4015 return -EINVAL;
4016 }
4017
create_sort_keys(struct hist_trigger_data * hist_data)4018 static int create_sort_keys(struct hist_trigger_data *hist_data)
4019 {
4020 struct trace_array *tr = hist_data->event_file->tr;
4021 char *fields_str = hist_data->attrs->sort_key_str;
4022 struct tracing_map_sort_key *sort_key;
4023 int descending, ret = 0;
4024 unsigned int i, j, k;
4025
4026 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4027
4028 if (!fields_str)
4029 goto out;
4030
4031 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4032 struct hist_field *hist_field;
4033 char *field_str, *field_name;
4034 const char *test_name;
4035
4036 sort_key = &hist_data->sort_keys[i];
4037
4038 field_str = strsep(&fields_str, ",");
4039 if (!field_str)
4040 break;
4041
4042 if (!*field_str) {
4043 ret = -EINVAL;
4044 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4045 break;
4046 }
4047
4048 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4049 hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
4050 ret = -EINVAL;
4051 break;
4052 }
4053
4054 field_name = strsep(&field_str, ".");
4055 if (!field_name || !*field_name) {
4056 ret = -EINVAL;
4057 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
4058 break;
4059 }
4060
4061 if (strcmp(field_name, "hitcount") == 0) {
4062 descending = is_descending(tr, field_str);
4063 if (descending < 0) {
4064 ret = descending;
4065 break;
4066 }
4067 sort_key->descending = descending;
4068 continue;
4069 }
4070
4071 for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4072 unsigned int idx;
4073
4074 hist_field = hist_data->fields[j];
4075 if (hist_field->flags & HIST_FIELD_FL_VAR)
4076 continue;
4077
4078 idx = k++;
4079
4080 test_name = hist_field_name(hist_field, 0);
4081
4082 if (strcmp(field_name, test_name) == 0) {
4083 sort_key->field_idx = idx;
4084 descending = is_descending(tr, field_str);
4085 if (descending < 0) {
4086 ret = descending;
4087 goto out;
4088 }
4089 sort_key->descending = descending;
4090 break;
4091 }
4092 }
4093 if (j == hist_data->n_fields) {
4094 ret = -EINVAL;
4095 hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
4096 break;
4097 }
4098 }
4099
4100 hist_data->n_sort_keys = i;
4101 out:
4102 return ret;
4103 }
4104
destroy_actions(struct hist_trigger_data * hist_data)4105 static void destroy_actions(struct hist_trigger_data *hist_data)
4106 {
4107 unsigned int i;
4108
4109 for (i = 0; i < hist_data->n_actions; i++) {
4110 struct action_data *data = hist_data->actions[i];
4111
4112 if (data->handler == HANDLER_ONMATCH)
4113 onmatch_destroy(data);
4114 else if (data->handler == HANDLER_ONMAX ||
4115 data->handler == HANDLER_ONCHANGE)
4116 track_data_destroy(hist_data, data);
4117 else
4118 kfree(data);
4119 }
4120 }
4121
parse_actions(struct hist_trigger_data * hist_data)4122 static int parse_actions(struct hist_trigger_data *hist_data)
4123 {
4124 struct trace_array *tr = hist_data->event_file->tr;
4125 struct action_data *data;
4126 unsigned int i;
4127 int ret = 0;
4128 char *str;
4129 int len;
4130
4131 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4132 str = hist_data->attrs->action_str[i];
4133
4134 if ((len = str_has_prefix(str, "onmatch("))) {
4135 char *action_str = str + len;
4136
4137 data = onmatch_parse(tr, action_str);
4138 if (IS_ERR(data)) {
4139 ret = PTR_ERR(data);
4140 break;
4141 }
4142 } else if ((len = str_has_prefix(str, "onmax("))) {
4143 char *action_str = str + len;
4144
4145 data = track_data_parse(hist_data, action_str,
4146 HANDLER_ONMAX);
4147 if (IS_ERR(data)) {
4148 ret = PTR_ERR(data);
4149 break;
4150 }
4151 } else if ((len = str_has_prefix(str, "onchange("))) {
4152 char *action_str = str + len;
4153
4154 data = track_data_parse(hist_data, action_str,
4155 HANDLER_ONCHANGE);
4156 if (IS_ERR(data)) {
4157 ret = PTR_ERR(data);
4158 break;
4159 }
4160 } else {
4161 ret = -EINVAL;
4162 break;
4163 }
4164
4165 hist_data->actions[hist_data->n_actions++] = data;
4166 }
4167
4168 return ret;
4169 }
4170
create_actions(struct hist_trigger_data * hist_data)4171 static int create_actions(struct hist_trigger_data *hist_data)
4172 {
4173 struct action_data *data;
4174 unsigned int i;
4175 int ret = 0;
4176
4177 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4178 data = hist_data->actions[i];
4179
4180 if (data->handler == HANDLER_ONMATCH) {
4181 ret = onmatch_create(hist_data, data);
4182 if (ret)
4183 break;
4184 } else if (data->handler == HANDLER_ONMAX ||
4185 data->handler == HANDLER_ONCHANGE) {
4186 ret = track_data_create(hist_data, data);
4187 if (ret)
4188 break;
4189 } else {
4190 ret = -EINVAL;
4191 break;
4192 }
4193 }
4194
4195 return ret;
4196 }
4197
print_actions(struct seq_file * m,struct hist_trigger_data * hist_data,struct tracing_map_elt * elt)4198 static void print_actions(struct seq_file *m,
4199 struct hist_trigger_data *hist_data,
4200 struct tracing_map_elt *elt)
4201 {
4202 unsigned int i;
4203
4204 for (i = 0; i < hist_data->n_actions; i++) {
4205 struct action_data *data = hist_data->actions[i];
4206
4207 if (data->action == ACTION_SNAPSHOT)
4208 continue;
4209
4210 if (data->handler == HANDLER_ONMAX ||
4211 data->handler == HANDLER_ONCHANGE)
4212 track_data_print(m, hist_data, elt, data);
4213 }
4214 }
4215
print_action_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4216 static void print_action_spec(struct seq_file *m,
4217 struct hist_trigger_data *hist_data,
4218 struct action_data *data)
4219 {
4220 unsigned int i;
4221
4222 if (data->action == ACTION_SAVE) {
4223 for (i = 0; i < hist_data->n_save_vars; i++) {
4224 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
4225 if (i < hist_data->n_save_vars - 1)
4226 seq_puts(m, ",");
4227 }
4228 } else if (data->action == ACTION_TRACE) {
4229 if (data->use_trace_keyword)
4230 seq_printf(m, "%s", data->synth_event_name);
4231 for (i = 0; i < data->n_params; i++) {
4232 if (i || data->use_trace_keyword)
4233 seq_puts(m, ",");
4234 seq_printf(m, "%s", data->params[i]);
4235 }
4236 }
4237 }
4238
print_track_data_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4239 static void print_track_data_spec(struct seq_file *m,
4240 struct hist_trigger_data *hist_data,
4241 struct action_data *data)
4242 {
4243 if (data->handler == HANDLER_ONMAX)
4244 seq_puts(m, ":onmax(");
4245 else if (data->handler == HANDLER_ONCHANGE)
4246 seq_puts(m, ":onchange(");
4247 seq_printf(m, "%s", data->track_data.var_str);
4248 seq_printf(m, ").%s(", data->action_name);
4249
4250 print_action_spec(m, hist_data, data);
4251
4252 seq_puts(m, ")");
4253 }
4254
print_onmatch_spec(struct seq_file * m,struct hist_trigger_data * hist_data,struct action_data * data)4255 static void print_onmatch_spec(struct seq_file *m,
4256 struct hist_trigger_data *hist_data,
4257 struct action_data *data)
4258 {
4259 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
4260 data->match_data.event);
4261
4262 seq_printf(m, "%s(", data->action_name);
4263
4264 print_action_spec(m, hist_data, data);
4265
4266 seq_puts(m, ")");
4267 }
4268
actions_match(struct hist_trigger_data * hist_data,struct hist_trigger_data * hist_data_test)4269 static bool actions_match(struct hist_trigger_data *hist_data,
4270 struct hist_trigger_data *hist_data_test)
4271 {
4272 unsigned int i, j;
4273
4274 if (hist_data->n_actions != hist_data_test->n_actions)
4275 return false;
4276
4277 for (i = 0; i < hist_data->n_actions; i++) {
4278 struct action_data *data = hist_data->actions[i];
4279 struct action_data *data_test = hist_data_test->actions[i];
4280 char *action_name, *action_name_test;
4281
4282 if (data->handler != data_test->handler)
4283 return false;
4284 if (data->action != data_test->action)
4285 return false;
4286
4287 if (data->n_params != data_test->n_params)
4288 return false;
4289
4290 for (j = 0; j < data->n_params; j++) {
4291 if (strcmp(data->params[j], data_test->params[j]) != 0)
4292 return false;
4293 }
4294
4295 if (data->use_trace_keyword)
4296 action_name = data->synth_event_name;
4297 else
4298 action_name = data->action_name;
4299
4300 if (data_test->use_trace_keyword)
4301 action_name_test = data_test->synth_event_name;
4302 else
4303 action_name_test = data_test->action_name;
4304
4305 if (strcmp(action_name, action_name_test) != 0)
4306 return false;
4307
4308 if (data->handler == HANDLER_ONMATCH) {
4309 if (strcmp(data->match_data.event_system,
4310 data_test->match_data.event_system) != 0)
4311 return false;
4312 if (strcmp(data->match_data.event,
4313 data_test->match_data.event) != 0)
4314 return false;
4315 } else if (data->handler == HANDLER_ONMAX ||
4316 data->handler == HANDLER_ONCHANGE) {
4317 if (strcmp(data->track_data.var_str,
4318 data_test->track_data.var_str) != 0)
4319 return false;
4320 }
4321 }
4322
4323 return true;
4324 }
4325
4326
print_actions_spec(struct seq_file * m,struct hist_trigger_data * hist_data)4327 static void print_actions_spec(struct seq_file *m,
4328 struct hist_trigger_data *hist_data)
4329 {
4330 unsigned int i;
4331
4332 for (i = 0; i < hist_data->n_actions; i++) {
4333 struct action_data *data = hist_data->actions[i];
4334
4335 if (data->handler == HANDLER_ONMATCH)
4336 print_onmatch_spec(m, hist_data, data);
4337 else if (data->handler == HANDLER_ONMAX ||
4338 data->handler == HANDLER_ONCHANGE)
4339 print_track_data_spec(m, hist_data, data);
4340 }
4341 }
4342
destroy_field_var_hists(struct hist_trigger_data * hist_data)4343 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
4344 {
4345 unsigned int i;
4346
4347 for (i = 0; i < hist_data->n_field_var_hists; i++) {
4348 kfree(hist_data->field_var_hists[i]->cmd);
4349 kfree(hist_data->field_var_hists[i]);
4350 }
4351 }
4352
destroy_hist_data(struct hist_trigger_data * hist_data)4353 static void destroy_hist_data(struct hist_trigger_data *hist_data)
4354 {
4355 if (!hist_data)
4356 return;
4357
4358 destroy_hist_trigger_attrs(hist_data->attrs);
4359 destroy_hist_fields(hist_data);
4360 tracing_map_destroy(hist_data->map);
4361
4362 destroy_actions(hist_data);
4363 destroy_field_vars(hist_data);
4364 destroy_field_var_hists(hist_data);
4365
4366 kfree(hist_data);
4367 }
4368
create_tracing_map_fields(struct hist_trigger_data * hist_data)4369 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
4370 {
4371 struct tracing_map *map = hist_data->map;
4372 struct ftrace_event_field *field;
4373 struct hist_field *hist_field;
4374 int i, idx = 0;
4375
4376 for_each_hist_field(i, hist_data) {
4377 hist_field = hist_data->fields[i];
4378 if (hist_field->flags & HIST_FIELD_FL_KEY) {
4379 tracing_map_cmp_fn_t cmp_fn;
4380
4381 field = hist_field->field;
4382
4383 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
4384 cmp_fn = tracing_map_cmp_none;
4385 else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
4386 cmp_fn = tracing_map_cmp_num(hist_field->size,
4387 hist_field->is_signed);
4388 else if (is_string_field(field))
4389 cmp_fn = tracing_map_cmp_string;
4390 else
4391 cmp_fn = tracing_map_cmp_num(field->size,
4392 field->is_signed);
4393 idx = tracing_map_add_key_field(map,
4394 hist_field->offset,
4395 cmp_fn);
4396 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
4397 idx = tracing_map_add_sum_field(map);
4398
4399 if (idx < 0)
4400 return idx;
4401
4402 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4403 idx = tracing_map_add_var(map);
4404 if (idx < 0)
4405 return idx;
4406 hist_field->var.idx = idx;
4407 hist_field->var.hist_data = hist_data;
4408 }
4409 }
4410
4411 return 0;
4412 }
4413
4414 static struct hist_trigger_data *
create_hist_data(unsigned int map_bits,struct hist_trigger_attrs * attrs,struct trace_event_file * file,bool remove)4415 create_hist_data(unsigned int map_bits,
4416 struct hist_trigger_attrs *attrs,
4417 struct trace_event_file *file,
4418 bool remove)
4419 {
4420 const struct tracing_map_ops *map_ops = NULL;
4421 struct hist_trigger_data *hist_data;
4422 int ret = 0;
4423
4424 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
4425 if (!hist_data)
4426 return ERR_PTR(-ENOMEM);
4427
4428 hist_data->attrs = attrs;
4429 hist_data->remove = remove;
4430 hist_data->event_file = file;
4431
4432 ret = parse_actions(hist_data);
4433 if (ret)
4434 goto free;
4435
4436 ret = create_hist_fields(hist_data, file);
4437 if (ret)
4438 goto free;
4439
4440 ret = create_sort_keys(hist_data);
4441 if (ret)
4442 goto free;
4443
4444 map_ops = &hist_trigger_elt_data_ops;
4445
4446 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
4447 map_ops, hist_data);
4448 if (IS_ERR(hist_data->map)) {
4449 ret = PTR_ERR(hist_data->map);
4450 hist_data->map = NULL;
4451 goto free;
4452 }
4453
4454 ret = create_tracing_map_fields(hist_data);
4455 if (ret)
4456 goto free;
4457 out:
4458 return hist_data;
4459 free:
4460 hist_data->attrs = NULL;
4461
4462 destroy_hist_data(hist_data);
4463
4464 hist_data = ERR_PTR(ret);
4465
4466 goto out;
4467 }
4468
hist_trigger_elt_update(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,u64 * var_ref_vals)4469 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
4470 struct tracing_map_elt *elt, void *rec,
4471 struct ring_buffer_event *rbe,
4472 u64 *var_ref_vals)
4473 {
4474 struct hist_elt_data *elt_data;
4475 struct hist_field *hist_field;
4476 unsigned int i, var_idx;
4477 u64 hist_val;
4478
4479 elt_data = elt->private_data;
4480 elt_data->var_ref_vals = var_ref_vals;
4481
4482 for_each_hist_val_field(i, hist_data) {
4483 hist_field = hist_data->fields[i];
4484 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4485 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4486 var_idx = hist_field->var.idx;
4487
4488 if (hist_field->flags & HIST_FIELD_FL_STRING) {
4489 unsigned int str_start, var_str_idx, idx;
4490 char *str, *val_str;
4491 unsigned int size;
4492
4493 str_start = hist_data->n_field_var_str +
4494 hist_data->n_save_var_str;
4495 var_str_idx = hist_field->var_str_idx;
4496 idx = str_start + var_str_idx;
4497
4498 str = elt_data->field_var_str[idx];
4499 val_str = (char *)(uintptr_t)hist_val;
4500
4501 size = min(hist_field->size, STR_VAR_LEN_MAX);
4502 strscpy(str, val_str, size);
4503
4504 hist_val = (u64)(uintptr_t)str;
4505 }
4506 tracing_map_set_var(elt, var_idx, hist_val);
4507 continue;
4508 }
4509 tracing_map_update_sum(elt, i, hist_val);
4510 }
4511
4512 for_each_hist_key_field(i, hist_data) {
4513 hist_field = hist_data->fields[i];
4514 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4515 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4516 var_idx = hist_field->var.idx;
4517 tracing_map_set_var(elt, var_idx, hist_val);
4518 }
4519 }
4520
4521 update_field_vars(hist_data, elt, rbe, rec);
4522 }
4523
add_to_key(char * compound_key,void * key,struct hist_field * key_field,void * rec)4524 static inline void add_to_key(char *compound_key, void *key,
4525 struct hist_field *key_field, void *rec)
4526 {
4527 size_t size = key_field->size;
4528
4529 if (key_field->flags & HIST_FIELD_FL_STRING) {
4530 struct ftrace_event_field *field;
4531
4532 field = key_field->field;
4533 if (field->filter_type == FILTER_DYN_STRING)
4534 size = *(u32 *)(rec + field->offset) >> 16;
4535 else if (field->filter_type == FILTER_STATIC_STRING)
4536 size = field->size;
4537
4538 /* ensure NULL-termination */
4539 if (size > key_field->size - 1)
4540 size = key_field->size - 1;
4541
4542 strncpy(compound_key + key_field->offset, (char *)key, size);
4543 } else
4544 memcpy(compound_key + key_field->offset, key, size);
4545 }
4546
4547 static void
hist_trigger_actions(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec,struct ring_buffer_event * rbe,void * key,u64 * var_ref_vals)4548 hist_trigger_actions(struct hist_trigger_data *hist_data,
4549 struct tracing_map_elt *elt, void *rec,
4550 struct ring_buffer_event *rbe, void *key,
4551 u64 *var_ref_vals)
4552 {
4553 struct action_data *data;
4554 unsigned int i;
4555
4556 for (i = 0; i < hist_data->n_actions; i++) {
4557 data = hist_data->actions[i];
4558 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
4559 }
4560 }
4561
event_hist_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * rbe)4562 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
4563 struct ring_buffer_event *rbe)
4564 {
4565 struct hist_trigger_data *hist_data = data->private_data;
4566 bool use_compound_key = (hist_data->n_keys > 1);
4567 unsigned long entries[HIST_STACKTRACE_DEPTH];
4568 u64 var_ref_vals[TRACING_MAP_VARS_MAX];
4569 char compound_key[HIST_KEY_SIZE_MAX];
4570 struct tracing_map_elt *elt = NULL;
4571 struct hist_field *key_field;
4572 u64 field_contents;
4573 void *key = NULL;
4574 unsigned int i;
4575
4576 memset(compound_key, 0, hist_data->key_size);
4577
4578 for_each_hist_key_field(i, hist_data) {
4579 key_field = hist_data->fields[i];
4580
4581 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4582 memset(entries, 0, HIST_STACKTRACE_SIZE);
4583 stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
4584 HIST_STACKTRACE_SKIP);
4585 key = entries;
4586 } else {
4587 field_contents = key_field->fn(key_field, elt, rbe, rec);
4588 if (key_field->flags & HIST_FIELD_FL_STRING) {
4589 key = (void *)(unsigned long)field_contents;
4590 use_compound_key = true;
4591 } else
4592 key = (void *)&field_contents;
4593 }
4594
4595 if (use_compound_key)
4596 add_to_key(compound_key, key, key_field, rec);
4597 }
4598
4599 if (use_compound_key)
4600 key = compound_key;
4601
4602 if (hist_data->n_var_refs &&
4603 !resolve_var_refs(hist_data, key, var_ref_vals, false))
4604 return;
4605
4606 elt = tracing_map_insert(hist_data->map, key);
4607 if (!elt)
4608 return;
4609
4610 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
4611
4612 if (resolve_var_refs(hist_data, key, var_ref_vals, true))
4613 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
4614 }
4615
hist_trigger_stacktrace_print(struct seq_file * m,unsigned long * stacktrace_entries,unsigned int max_entries)4616 static void hist_trigger_stacktrace_print(struct seq_file *m,
4617 unsigned long *stacktrace_entries,
4618 unsigned int max_entries)
4619 {
4620 char str[KSYM_SYMBOL_LEN];
4621 unsigned int spaces = 8;
4622 unsigned int i;
4623
4624 for (i = 0; i < max_entries; i++) {
4625 if (!stacktrace_entries[i])
4626 return;
4627
4628 seq_printf(m, "%*c", 1 + spaces, ' ');
4629 sprint_symbol(str, stacktrace_entries[i]);
4630 seq_printf(m, "%s\n", str);
4631 }
4632 }
4633
hist_trigger_print_key(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)4634 static void hist_trigger_print_key(struct seq_file *m,
4635 struct hist_trigger_data *hist_data,
4636 void *key,
4637 struct tracing_map_elt *elt)
4638 {
4639 struct hist_field *key_field;
4640 char str[KSYM_SYMBOL_LEN];
4641 bool multiline = false;
4642 const char *field_name;
4643 unsigned int i;
4644 u64 uval;
4645
4646 seq_puts(m, "{ ");
4647
4648 for_each_hist_key_field(i, hist_data) {
4649 key_field = hist_data->fields[i];
4650
4651 if (i > hist_data->n_vals)
4652 seq_puts(m, ", ");
4653
4654 field_name = hist_field_name(key_field, 0);
4655
4656 if (key_field->flags & HIST_FIELD_FL_HEX) {
4657 uval = *(u64 *)(key + key_field->offset);
4658 seq_printf(m, "%s: %llx", field_name, uval);
4659 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
4660 uval = *(u64 *)(key + key_field->offset);
4661 sprint_symbol_no_offset(str, uval);
4662 seq_printf(m, "%s: [%llx] %-45s", field_name,
4663 uval, str);
4664 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
4665 uval = *(u64 *)(key + key_field->offset);
4666 sprint_symbol(str, uval);
4667 seq_printf(m, "%s: [%llx] %-55s", field_name,
4668 uval, str);
4669 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
4670 struct hist_elt_data *elt_data = elt->private_data;
4671 char *comm;
4672
4673 if (WARN_ON_ONCE(!elt_data))
4674 return;
4675
4676 comm = elt_data->comm;
4677
4678 uval = *(u64 *)(key + key_field->offset);
4679 seq_printf(m, "%s: %-16s[%10llu]", field_name,
4680 comm, uval);
4681 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
4682 const char *syscall_name;
4683
4684 uval = *(u64 *)(key + key_field->offset);
4685 syscall_name = get_syscall_name(uval);
4686 if (!syscall_name)
4687 syscall_name = "unknown_syscall";
4688
4689 seq_printf(m, "%s: %-30s[%3llu]", field_name,
4690 syscall_name, uval);
4691 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4692 seq_puts(m, "stacktrace:\n");
4693 hist_trigger_stacktrace_print(m,
4694 key + key_field->offset,
4695 HIST_STACKTRACE_DEPTH);
4696 multiline = true;
4697 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
4698 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
4699 *(u64 *)(key + key_field->offset));
4700 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
4701 seq_printf(m, "%s: %-50s", field_name,
4702 (char *)(key + key_field->offset));
4703 } else {
4704 uval = *(u64 *)(key + key_field->offset);
4705 seq_printf(m, "%s: %10llu", field_name, uval);
4706 }
4707 }
4708
4709 if (!multiline)
4710 seq_puts(m, " ");
4711
4712 seq_puts(m, "}");
4713 }
4714
hist_trigger_entry_print(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)4715 static void hist_trigger_entry_print(struct seq_file *m,
4716 struct hist_trigger_data *hist_data,
4717 void *key,
4718 struct tracing_map_elt *elt)
4719 {
4720 const char *field_name;
4721 unsigned int i;
4722
4723 hist_trigger_print_key(m, hist_data, key, elt);
4724
4725 seq_printf(m, " hitcount: %10llu",
4726 tracing_map_read_sum(elt, HITCOUNT_IDX));
4727
4728 for (i = 1; i < hist_data->n_vals; i++) {
4729 field_name = hist_field_name(hist_data->fields[i], 0);
4730
4731 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
4732 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
4733 continue;
4734
4735 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
4736 seq_printf(m, " %s: %10llx", field_name,
4737 tracing_map_read_sum(elt, i));
4738 } else {
4739 seq_printf(m, " %s: %10llu", field_name,
4740 tracing_map_read_sum(elt, i));
4741 }
4742 }
4743
4744 print_actions(m, hist_data, elt);
4745
4746 seq_puts(m, "\n");
4747 }
4748
print_entries(struct seq_file * m,struct hist_trigger_data * hist_data)4749 static int print_entries(struct seq_file *m,
4750 struct hist_trigger_data *hist_data)
4751 {
4752 struct tracing_map_sort_entry **sort_entries = NULL;
4753 struct tracing_map *map = hist_data->map;
4754 int i, n_entries;
4755
4756 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
4757 hist_data->n_sort_keys,
4758 &sort_entries);
4759 if (n_entries < 0)
4760 return n_entries;
4761
4762 for (i = 0; i < n_entries; i++)
4763 hist_trigger_entry_print(m, hist_data,
4764 sort_entries[i]->key,
4765 sort_entries[i]->elt);
4766
4767 tracing_map_destroy_sort_entries(sort_entries, n_entries);
4768
4769 return n_entries;
4770 }
4771
hist_trigger_show(struct seq_file * m,struct event_trigger_data * data,int n)4772 static void hist_trigger_show(struct seq_file *m,
4773 struct event_trigger_data *data, int n)
4774 {
4775 struct hist_trigger_data *hist_data;
4776 int n_entries;
4777
4778 if (n > 0)
4779 seq_puts(m, "\n\n");
4780
4781 seq_puts(m, "# event histogram\n#\n# trigger info: ");
4782 data->ops->print(m, data->ops, data);
4783 seq_puts(m, "#\n\n");
4784
4785 hist_data = data->private_data;
4786 n_entries = print_entries(m, hist_data);
4787 if (n_entries < 0)
4788 n_entries = 0;
4789
4790 track_data_snapshot_print(m, hist_data);
4791
4792 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
4793 (u64)atomic64_read(&hist_data->map->hits),
4794 n_entries, (u64)atomic64_read(&hist_data->map->drops));
4795 }
4796
hist_show(struct seq_file * m,void * v)4797 static int hist_show(struct seq_file *m, void *v)
4798 {
4799 struct event_trigger_data *data;
4800 struct trace_event_file *event_file;
4801 int n = 0, ret = 0;
4802
4803 mutex_lock(&event_mutex);
4804
4805 event_file = event_file_data(m->private);
4806 if (unlikely(!event_file)) {
4807 ret = -ENODEV;
4808 goto out_unlock;
4809 }
4810
4811 list_for_each_entry(data, &event_file->triggers, list) {
4812 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
4813 hist_trigger_show(m, data, n++);
4814 }
4815
4816 out_unlock:
4817 mutex_unlock(&event_mutex);
4818
4819 return ret;
4820 }
4821
event_hist_open(struct inode * inode,struct file * file)4822 static int event_hist_open(struct inode *inode, struct file *file)
4823 {
4824 int ret;
4825
4826 ret = security_locked_down(LOCKDOWN_TRACEFS);
4827 if (ret)
4828 return ret;
4829
4830 return single_open(file, hist_show, file);
4831 }
4832
4833 const struct file_operations event_hist_fops = {
4834 .open = event_hist_open,
4835 .read = seq_read,
4836 .llseek = seq_lseek,
4837 .release = single_release,
4838 };
4839
4840 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
hist_field_debug_show_flags(struct seq_file * m,unsigned long flags)4841 static void hist_field_debug_show_flags(struct seq_file *m,
4842 unsigned long flags)
4843 {
4844 seq_puts(m, " flags:\n");
4845
4846 if (flags & HIST_FIELD_FL_KEY)
4847 seq_puts(m, " HIST_FIELD_FL_KEY\n");
4848 else if (flags & HIST_FIELD_FL_HITCOUNT)
4849 seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n");
4850 else if (flags & HIST_FIELD_FL_VAR)
4851 seq_puts(m, " HIST_FIELD_FL_VAR\n");
4852 else if (flags & HIST_FIELD_FL_VAR_REF)
4853 seq_puts(m, " HIST_FIELD_FL_VAR_REF\n");
4854 else
4855 seq_puts(m, " VAL: normal u64 value\n");
4856
4857 if (flags & HIST_FIELD_FL_ALIAS)
4858 seq_puts(m, " HIST_FIELD_FL_ALIAS\n");
4859 }
4860
hist_field_debug_show(struct seq_file * m,struct hist_field * field,unsigned long flags)4861 static int hist_field_debug_show(struct seq_file *m,
4862 struct hist_field *field, unsigned long flags)
4863 {
4864 if ((field->flags & flags) != flags) {
4865 seq_printf(m, "ERROR: bad flags - %lx\n", flags);
4866 return -EINVAL;
4867 }
4868
4869 hist_field_debug_show_flags(m, field->flags);
4870 if (field->field)
4871 seq_printf(m, " ftrace_event_field name: %s\n",
4872 field->field->name);
4873
4874 if (field->flags & HIST_FIELD_FL_VAR) {
4875 seq_printf(m, " var.name: %s\n", field->var.name);
4876 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
4877 field->var.idx);
4878 }
4879
4880 if (field->flags & HIST_FIELD_FL_ALIAS)
4881 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4882 field->var_ref_idx);
4883
4884 if (field->flags & HIST_FIELD_FL_VAR_REF) {
4885 seq_printf(m, " name: %s\n", field->name);
4886 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
4887 field->var.idx);
4888 seq_printf(m, " var.hist_data: %p\n", field->var.hist_data);
4889 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4890 field->var_ref_idx);
4891 if (field->system)
4892 seq_printf(m, " system: %s\n", field->system);
4893 if (field->event_name)
4894 seq_printf(m, " event_name: %s\n", field->event_name);
4895 }
4896
4897 seq_printf(m, " type: %s\n", field->type);
4898 seq_printf(m, " size: %u\n", field->size);
4899 seq_printf(m, " is_signed: %u\n", field->is_signed);
4900
4901 return 0;
4902 }
4903
field_var_debug_show(struct seq_file * m,struct field_var * field_var,unsigned int i,bool save_vars)4904 static int field_var_debug_show(struct seq_file *m,
4905 struct field_var *field_var, unsigned int i,
4906 bool save_vars)
4907 {
4908 const char *vars_name = save_vars ? "save_vars" : "field_vars";
4909 struct hist_field *field;
4910 int ret = 0;
4911
4912 seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i);
4913
4914 field = field_var->var;
4915
4916 seq_printf(m, "\n %s[%d].var:\n", vars_name, i);
4917
4918 hist_field_debug_show_flags(m, field->flags);
4919 seq_printf(m, " var.name: %s\n", field->var.name);
4920 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
4921 field->var.idx);
4922
4923 field = field_var->val;
4924
4925 seq_printf(m, "\n %s[%d].val:\n", vars_name, i);
4926 if (field->field)
4927 seq_printf(m, " ftrace_event_field name: %s\n",
4928 field->field->name);
4929 else {
4930 ret = -EINVAL;
4931 goto out;
4932 }
4933
4934 seq_printf(m, " type: %s\n", field->type);
4935 seq_printf(m, " size: %u\n", field->size);
4936 seq_printf(m, " is_signed: %u\n", field->is_signed);
4937 out:
4938 return ret;
4939 }
4940
hist_action_debug_show(struct seq_file * m,struct action_data * data,int i)4941 static int hist_action_debug_show(struct seq_file *m,
4942 struct action_data *data, int i)
4943 {
4944 int ret = 0;
4945
4946 if (data->handler == HANDLER_ONMAX ||
4947 data->handler == HANDLER_ONCHANGE) {
4948 seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i);
4949 ret = hist_field_debug_show(m, data->track_data.var_ref,
4950 HIST_FIELD_FL_VAR_REF);
4951 if (ret)
4952 goto out;
4953
4954 seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i);
4955 ret = hist_field_debug_show(m, data->track_data.track_var,
4956 HIST_FIELD_FL_VAR);
4957 if (ret)
4958 goto out;
4959 }
4960
4961 if (data->handler == HANDLER_ONMATCH) {
4962 seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n",
4963 i, data->match_data.event_system);
4964 seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n",
4965 i, data->match_data.event);
4966 }
4967 out:
4968 return ret;
4969 }
4970
hist_actions_debug_show(struct seq_file * m,struct hist_trigger_data * hist_data)4971 static int hist_actions_debug_show(struct seq_file *m,
4972 struct hist_trigger_data *hist_data)
4973 {
4974 int i, ret = 0;
4975
4976 if (hist_data->n_actions)
4977 seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
4978
4979 for (i = 0; i < hist_data->n_actions; i++) {
4980 struct action_data *action = hist_data->actions[i];
4981
4982 ret = hist_action_debug_show(m, action, i);
4983 if (ret)
4984 goto out;
4985 }
4986
4987 if (hist_data->n_save_vars)
4988 seq_puts(m, "\n save action variables (save() params):\n");
4989
4990 for (i = 0; i < hist_data->n_save_vars; i++) {
4991 ret = field_var_debug_show(m, hist_data->save_vars[i], i, true);
4992 if (ret)
4993 goto out;
4994 }
4995 out:
4996 return ret;
4997 }
4998
hist_trigger_debug_show(struct seq_file * m,struct event_trigger_data * data,int n)4999 static void hist_trigger_debug_show(struct seq_file *m,
5000 struct event_trigger_data *data, int n)
5001 {
5002 struct hist_trigger_data *hist_data;
5003 int i, ret;
5004
5005 if (n > 0)
5006 seq_puts(m, "\n\n");
5007
5008 seq_puts(m, "# event histogram\n#\n# trigger info: ");
5009 data->ops->print(m, data->ops, data);
5010 seq_puts(m, "#\n\n");
5011
5012 hist_data = data->private_data;
5013
5014 seq_printf(m, "hist_data: %p\n\n", hist_data);
5015 seq_printf(m, " n_vals: %u\n", hist_data->n_vals);
5016 seq_printf(m, " n_keys: %u\n", hist_data->n_keys);
5017 seq_printf(m, " n_fields: %u\n", hist_data->n_fields);
5018
5019 seq_puts(m, "\n val fields:\n\n");
5020
5021 seq_puts(m, " hist_data->fields[0]:\n");
5022 ret = hist_field_debug_show(m, hist_data->fields[0],
5023 HIST_FIELD_FL_HITCOUNT);
5024 if (ret)
5025 return;
5026
5027 for (i = 1; i < hist_data->n_vals; i++) {
5028 seq_printf(m, "\n hist_data->fields[%d]:\n", i);
5029 ret = hist_field_debug_show(m, hist_data->fields[i], 0);
5030 if (ret)
5031 return;
5032 }
5033
5034 seq_puts(m, "\n key fields:\n");
5035
5036 for (i = hist_data->n_vals; i < hist_data->n_fields; i++) {
5037 seq_printf(m, "\n hist_data->fields[%d]:\n", i);
5038 ret = hist_field_debug_show(m, hist_data->fields[i],
5039 HIST_FIELD_FL_KEY);
5040 if (ret)
5041 return;
5042 }
5043
5044 if (hist_data->n_var_refs)
5045 seq_puts(m, "\n variable reference fields:\n");
5046
5047 for (i = 0; i < hist_data->n_var_refs; i++) {
5048 seq_printf(m, "\n hist_data->var_refs[%d]:\n", i);
5049 ret = hist_field_debug_show(m, hist_data->var_refs[i],
5050 HIST_FIELD_FL_VAR_REF);
5051 if (ret)
5052 return;
5053 }
5054
5055 if (hist_data->n_field_vars)
5056 seq_puts(m, "\n field variables:\n");
5057
5058 for (i = 0; i < hist_data->n_field_vars; i++) {
5059 ret = field_var_debug_show(m, hist_data->field_vars[i], i, false);
5060 if (ret)
5061 return;
5062 }
5063
5064 ret = hist_actions_debug_show(m, hist_data);
5065 if (ret)
5066 return;
5067 }
5068
hist_debug_show(struct seq_file * m,void * v)5069 static int hist_debug_show(struct seq_file *m, void *v)
5070 {
5071 struct event_trigger_data *data;
5072 struct trace_event_file *event_file;
5073 int n = 0, ret = 0;
5074
5075 mutex_lock(&event_mutex);
5076
5077 event_file = event_file_data(m->private);
5078 if (unlikely(!event_file)) {
5079 ret = -ENODEV;
5080 goto out_unlock;
5081 }
5082
5083 list_for_each_entry(data, &event_file->triggers, list) {
5084 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
5085 hist_trigger_debug_show(m, data, n++);
5086 }
5087
5088 out_unlock:
5089 mutex_unlock(&event_mutex);
5090
5091 return ret;
5092 }
5093
event_hist_debug_open(struct inode * inode,struct file * file)5094 static int event_hist_debug_open(struct inode *inode, struct file *file)
5095 {
5096 int ret;
5097
5098 ret = security_locked_down(LOCKDOWN_TRACEFS);
5099 if (ret)
5100 return ret;
5101
5102 return single_open(file, hist_debug_show, file);
5103 }
5104
5105 const struct file_operations event_hist_debug_fops = {
5106 .open = event_hist_debug_open,
5107 .read = seq_read,
5108 .llseek = seq_lseek,
5109 .release = single_release,
5110 };
5111 #endif
5112
hist_field_print(struct seq_file * m,struct hist_field * hist_field)5113 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
5114 {
5115 const char *field_name = hist_field_name(hist_field, 0);
5116
5117 if (hist_field->var.name)
5118 seq_printf(m, "%s=", hist_field->var.name);
5119
5120 if (hist_field->flags & HIST_FIELD_FL_CPU)
5121 seq_puts(m, "common_cpu");
5122 else if (field_name) {
5123 if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
5124 hist_field->flags & HIST_FIELD_FL_ALIAS)
5125 seq_putc(m, '$');
5126 seq_printf(m, "%s", field_name);
5127 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
5128 seq_puts(m, "common_timestamp");
5129
5130 if (hist_field->flags) {
5131 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
5132 !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
5133 const char *flags = get_hist_field_flags(hist_field);
5134
5135 if (flags)
5136 seq_printf(m, ".%s", flags);
5137 }
5138 }
5139 }
5140
event_hist_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)5141 static int event_hist_trigger_print(struct seq_file *m,
5142 struct event_trigger_ops *ops,
5143 struct event_trigger_data *data)
5144 {
5145 struct hist_trigger_data *hist_data = data->private_data;
5146 struct hist_field *field;
5147 bool have_var = false;
5148 unsigned int i;
5149
5150 seq_puts(m, "hist:");
5151
5152 if (data->name)
5153 seq_printf(m, "%s:", data->name);
5154
5155 seq_puts(m, "keys=");
5156
5157 for_each_hist_key_field(i, hist_data) {
5158 field = hist_data->fields[i];
5159
5160 if (i > hist_data->n_vals)
5161 seq_puts(m, ",");
5162
5163 if (field->flags & HIST_FIELD_FL_STACKTRACE)
5164 seq_puts(m, "stacktrace");
5165 else
5166 hist_field_print(m, field);
5167 }
5168
5169 seq_puts(m, ":vals=");
5170
5171 for_each_hist_val_field(i, hist_data) {
5172 field = hist_data->fields[i];
5173 if (field->flags & HIST_FIELD_FL_VAR) {
5174 have_var = true;
5175 continue;
5176 }
5177
5178 if (i == HITCOUNT_IDX)
5179 seq_puts(m, "hitcount");
5180 else {
5181 seq_puts(m, ",");
5182 hist_field_print(m, field);
5183 }
5184 }
5185
5186 if (have_var) {
5187 unsigned int n = 0;
5188
5189 seq_puts(m, ":");
5190
5191 for_each_hist_val_field(i, hist_data) {
5192 field = hist_data->fields[i];
5193
5194 if (field->flags & HIST_FIELD_FL_VAR) {
5195 if (n++)
5196 seq_puts(m, ",");
5197 hist_field_print(m, field);
5198 }
5199 }
5200 }
5201
5202 seq_puts(m, ":sort=");
5203
5204 for (i = 0; i < hist_data->n_sort_keys; i++) {
5205 struct tracing_map_sort_key *sort_key;
5206 unsigned int idx, first_key_idx;
5207
5208 /* skip VAR vals */
5209 first_key_idx = hist_data->n_vals - hist_data->n_vars;
5210
5211 sort_key = &hist_data->sort_keys[i];
5212 idx = sort_key->field_idx;
5213
5214 if (WARN_ON(idx >= HIST_FIELDS_MAX))
5215 return -EINVAL;
5216
5217 if (i > 0)
5218 seq_puts(m, ",");
5219
5220 if (idx == HITCOUNT_IDX)
5221 seq_puts(m, "hitcount");
5222 else {
5223 if (idx >= first_key_idx)
5224 idx += hist_data->n_vars;
5225 hist_field_print(m, hist_data->fields[idx]);
5226 }
5227
5228 if (sort_key->descending)
5229 seq_puts(m, ".descending");
5230 }
5231 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
5232 if (hist_data->enable_timestamps)
5233 seq_printf(m, ":clock=%s", hist_data->attrs->clock);
5234
5235 print_actions_spec(m, hist_data);
5236
5237 if (data->filter_str)
5238 seq_printf(m, " if %s", data->filter_str);
5239
5240 if (data->paused)
5241 seq_puts(m, " [paused]");
5242 else
5243 seq_puts(m, " [active]");
5244
5245 seq_putc(m, '\n');
5246
5247 return 0;
5248 }
5249
event_hist_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)5250 static int event_hist_trigger_init(struct event_trigger_ops *ops,
5251 struct event_trigger_data *data)
5252 {
5253 struct hist_trigger_data *hist_data = data->private_data;
5254
5255 if (!data->ref && hist_data->attrs->name)
5256 save_named_trigger(hist_data->attrs->name, data);
5257
5258 data->ref++;
5259
5260 return 0;
5261 }
5262
unregister_field_var_hists(struct hist_trigger_data * hist_data)5263 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
5264 {
5265 struct trace_event_file *file;
5266 unsigned int i;
5267 char *cmd;
5268 int ret;
5269
5270 for (i = 0; i < hist_data->n_field_var_hists; i++) {
5271 file = hist_data->field_var_hists[i]->hist_data->event_file;
5272 cmd = hist_data->field_var_hists[i]->cmd;
5273 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
5274 "!hist", "hist", cmd);
5275 }
5276 }
5277
event_hist_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)5278 static void event_hist_trigger_free(struct event_trigger_ops *ops,
5279 struct event_trigger_data *data)
5280 {
5281 struct hist_trigger_data *hist_data = data->private_data;
5282
5283 if (WARN_ON_ONCE(data->ref <= 0))
5284 return;
5285
5286 data->ref--;
5287 if (!data->ref) {
5288 if (data->name)
5289 del_named_trigger(data);
5290
5291 trigger_data_free(data);
5292
5293 remove_hist_vars(hist_data);
5294
5295 unregister_field_var_hists(hist_data);
5296
5297 destroy_hist_data(hist_data);
5298 }
5299 }
5300
5301 static struct event_trigger_ops event_hist_trigger_ops = {
5302 .func = event_hist_trigger,
5303 .print = event_hist_trigger_print,
5304 .init = event_hist_trigger_init,
5305 .free = event_hist_trigger_free,
5306 };
5307
event_hist_trigger_named_init(struct event_trigger_ops * ops,struct event_trigger_data * data)5308 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5309 struct event_trigger_data *data)
5310 {
5311 data->ref++;
5312
5313 save_named_trigger(data->named_data->name, data);
5314
5315 event_hist_trigger_init(ops, data->named_data);
5316
5317 return 0;
5318 }
5319
event_hist_trigger_named_free(struct event_trigger_ops * ops,struct event_trigger_data * data)5320 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5321 struct event_trigger_data *data)
5322 {
5323 if (WARN_ON_ONCE(data->ref <= 0))
5324 return;
5325
5326 event_hist_trigger_free(ops, data->named_data);
5327
5328 data->ref--;
5329 if (!data->ref) {
5330 del_named_trigger(data);
5331 trigger_data_free(data);
5332 }
5333 }
5334
5335 static struct event_trigger_ops event_hist_trigger_named_ops = {
5336 .func = event_hist_trigger,
5337 .print = event_hist_trigger_print,
5338 .init = event_hist_trigger_named_init,
5339 .free = event_hist_trigger_named_free,
5340 };
5341
event_hist_get_trigger_ops(char * cmd,char * param)5342 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5343 char *param)
5344 {
5345 return &event_hist_trigger_ops;
5346 }
5347
hist_clear(struct event_trigger_data * data)5348 static void hist_clear(struct event_trigger_data *data)
5349 {
5350 struct hist_trigger_data *hist_data = data->private_data;
5351
5352 if (data->name)
5353 pause_named_trigger(data);
5354
5355 tracepoint_synchronize_unregister();
5356
5357 tracing_map_clear(hist_data->map);
5358
5359 if (data->name)
5360 unpause_named_trigger(data);
5361 }
5362
compatible_field(struct ftrace_event_field * field,struct ftrace_event_field * test_field)5363 static bool compatible_field(struct ftrace_event_field *field,
5364 struct ftrace_event_field *test_field)
5365 {
5366 if (field == test_field)
5367 return true;
5368 if (field == NULL || test_field == NULL)
5369 return false;
5370 if (strcmp(field->name, test_field->name) != 0)
5371 return false;
5372 if (strcmp(field->type, test_field->type) != 0)
5373 return false;
5374 if (field->size != test_field->size)
5375 return false;
5376 if (field->is_signed != test_field->is_signed)
5377 return false;
5378
5379 return true;
5380 }
5381
hist_trigger_match(struct event_trigger_data * data,struct event_trigger_data * data_test,struct event_trigger_data * named_data,bool ignore_filter)5382 static bool hist_trigger_match(struct event_trigger_data *data,
5383 struct event_trigger_data *data_test,
5384 struct event_trigger_data *named_data,
5385 bool ignore_filter)
5386 {
5387 struct tracing_map_sort_key *sort_key, *sort_key_test;
5388 struct hist_trigger_data *hist_data, *hist_data_test;
5389 struct hist_field *key_field, *key_field_test;
5390 unsigned int i;
5391
5392 if (named_data && (named_data != data_test) &&
5393 (named_data != data_test->named_data))
5394 return false;
5395
5396 if (!named_data && is_named_trigger(data_test))
5397 return false;
5398
5399 hist_data = data->private_data;
5400 hist_data_test = data_test->private_data;
5401
5402 if (hist_data->n_vals != hist_data_test->n_vals ||
5403 hist_data->n_fields != hist_data_test->n_fields ||
5404 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5405 return false;
5406
5407 if (!ignore_filter) {
5408 if ((data->filter_str && !data_test->filter_str) ||
5409 (!data->filter_str && data_test->filter_str))
5410 return false;
5411 }
5412
5413 for_each_hist_field(i, hist_data) {
5414 key_field = hist_data->fields[i];
5415 key_field_test = hist_data_test->fields[i];
5416
5417 if (key_field->flags != key_field_test->flags)
5418 return false;
5419 if (!compatible_field(key_field->field, key_field_test->field))
5420 return false;
5421 if (key_field->offset != key_field_test->offset)
5422 return false;
5423 if (key_field->size != key_field_test->size)
5424 return false;
5425 if (key_field->is_signed != key_field_test->is_signed)
5426 return false;
5427 if (!!key_field->var.name != !!key_field_test->var.name)
5428 return false;
5429 if (key_field->var.name &&
5430 strcmp(key_field->var.name, key_field_test->var.name) != 0)
5431 return false;
5432 }
5433
5434 for (i = 0; i < hist_data->n_sort_keys; i++) {
5435 sort_key = &hist_data->sort_keys[i];
5436 sort_key_test = &hist_data_test->sort_keys[i];
5437
5438 if (sort_key->field_idx != sort_key_test->field_idx ||
5439 sort_key->descending != sort_key_test->descending)
5440 return false;
5441 }
5442
5443 if (!ignore_filter && data->filter_str &&
5444 (strcmp(data->filter_str, data_test->filter_str) != 0))
5445 return false;
5446
5447 if (!actions_match(hist_data, hist_data_test))
5448 return false;
5449
5450 return true;
5451 }
5452
hist_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)5453 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5454 struct event_trigger_data *data,
5455 struct trace_event_file *file)
5456 {
5457 struct hist_trigger_data *hist_data = data->private_data;
5458 struct event_trigger_data *test, *named_data = NULL;
5459 struct trace_array *tr = file->tr;
5460 int ret = 0;
5461
5462 if (hist_data->attrs->name) {
5463 named_data = find_named_trigger(hist_data->attrs->name);
5464 if (named_data) {
5465 if (!hist_trigger_match(data, named_data, named_data,
5466 true)) {
5467 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5468 ret = -EINVAL;
5469 goto out;
5470 }
5471 }
5472 }
5473
5474 if (hist_data->attrs->name && !named_data)
5475 goto new;
5476
5477 lockdep_assert_held(&event_mutex);
5478
5479 list_for_each_entry(test, &file->triggers, list) {
5480 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5481 if (!hist_trigger_match(data, test, named_data, false))
5482 continue;
5483 if (hist_data->attrs->pause)
5484 test->paused = true;
5485 else if (hist_data->attrs->cont)
5486 test->paused = false;
5487 else if (hist_data->attrs->clear)
5488 hist_clear(test);
5489 else {
5490 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5491 ret = -EEXIST;
5492 }
5493 goto out;
5494 }
5495 }
5496 new:
5497 if (hist_data->attrs->cont || hist_data->attrs->clear) {
5498 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5499 ret = -ENOENT;
5500 goto out;
5501 }
5502
5503 if (hist_data->attrs->pause)
5504 data->paused = true;
5505
5506 if (named_data) {
5507 data->private_data = named_data->private_data;
5508 set_named_trigger_data(data, named_data);
5509 data->ops = &event_hist_trigger_named_ops;
5510 }
5511
5512 if (data->ops->init) {
5513 ret = data->ops->init(data->ops, data);
5514 if (ret < 0)
5515 goto out;
5516 }
5517
5518 if (hist_data->enable_timestamps) {
5519 char *clock = hist_data->attrs->clock;
5520
5521 ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5522 if (ret) {
5523 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
5524 goto out;
5525 }
5526
5527 tracing_set_time_stamp_abs(file->tr, true);
5528 }
5529
5530 if (named_data)
5531 destroy_hist_data(hist_data);
5532
5533 ret++;
5534 out:
5535 return ret;
5536 }
5537
hist_trigger_enable(struct event_trigger_data * data,struct trace_event_file * file)5538 static int hist_trigger_enable(struct event_trigger_data *data,
5539 struct trace_event_file *file)
5540 {
5541 int ret = 0;
5542
5543 list_add_tail_rcu(&data->list, &file->triggers);
5544
5545 update_cond_flag(file);
5546
5547 if (trace_event_trigger_enable_disable(file, 1) < 0) {
5548 list_del_rcu(&data->list);
5549 update_cond_flag(file);
5550 ret--;
5551 }
5552
5553 return ret;
5554 }
5555
have_hist_trigger_match(struct event_trigger_data * data,struct trace_event_file * file)5556 static bool have_hist_trigger_match(struct event_trigger_data *data,
5557 struct trace_event_file *file)
5558 {
5559 struct hist_trigger_data *hist_data = data->private_data;
5560 struct event_trigger_data *test, *named_data = NULL;
5561 bool match = false;
5562
5563 lockdep_assert_held(&event_mutex);
5564
5565 if (hist_data->attrs->name)
5566 named_data = find_named_trigger(hist_data->attrs->name);
5567
5568 list_for_each_entry(test, &file->triggers, list) {
5569 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5570 if (hist_trigger_match(data, test, named_data, false)) {
5571 match = true;
5572 break;
5573 }
5574 }
5575 }
5576
5577 return match;
5578 }
5579
hist_trigger_check_refs(struct event_trigger_data * data,struct trace_event_file * file)5580 static bool hist_trigger_check_refs(struct event_trigger_data *data,
5581 struct trace_event_file *file)
5582 {
5583 struct hist_trigger_data *hist_data = data->private_data;
5584 struct event_trigger_data *test, *named_data = NULL;
5585
5586 lockdep_assert_held(&event_mutex);
5587
5588 if (hist_data->attrs->name)
5589 named_data = find_named_trigger(hist_data->attrs->name);
5590
5591 list_for_each_entry(test, &file->triggers, list) {
5592 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5593 if (!hist_trigger_match(data, test, named_data, false))
5594 continue;
5595 hist_data = test->private_data;
5596 if (check_var_refs(hist_data))
5597 return true;
5598 break;
5599 }
5600 }
5601
5602 return false;
5603 }
5604
hist_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)5605 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
5606 struct event_trigger_data *data,
5607 struct trace_event_file *file)
5608 {
5609 struct hist_trigger_data *hist_data = data->private_data;
5610 struct event_trigger_data *test, *named_data = NULL;
5611 bool unregistered = false;
5612
5613 lockdep_assert_held(&event_mutex);
5614
5615 if (hist_data->attrs->name)
5616 named_data = find_named_trigger(hist_data->attrs->name);
5617
5618 list_for_each_entry(test, &file->triggers, list) {
5619 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5620 if (!hist_trigger_match(data, test, named_data, false))
5621 continue;
5622 unregistered = true;
5623 list_del_rcu(&test->list);
5624 trace_event_trigger_enable_disable(file, 0);
5625 update_cond_flag(file);
5626 break;
5627 }
5628 }
5629
5630 if (unregistered && test->ops->free)
5631 test->ops->free(test->ops, test);
5632
5633 if (hist_data->enable_timestamps) {
5634 if (!hist_data->remove || unregistered)
5635 tracing_set_time_stamp_abs(file->tr, false);
5636 }
5637 }
5638
hist_file_check_refs(struct trace_event_file * file)5639 static bool hist_file_check_refs(struct trace_event_file *file)
5640 {
5641 struct hist_trigger_data *hist_data;
5642 struct event_trigger_data *test;
5643
5644 lockdep_assert_held(&event_mutex);
5645
5646 list_for_each_entry(test, &file->triggers, list) {
5647 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5648 hist_data = test->private_data;
5649 if (check_var_refs(hist_data))
5650 return true;
5651 }
5652 }
5653
5654 return false;
5655 }
5656
hist_unreg_all(struct trace_event_file * file)5657 static void hist_unreg_all(struct trace_event_file *file)
5658 {
5659 struct event_trigger_data *test, *n;
5660 struct hist_trigger_data *hist_data;
5661 struct synth_event *se;
5662 const char *se_name;
5663
5664 lockdep_assert_held(&event_mutex);
5665
5666 if (hist_file_check_refs(file))
5667 return;
5668
5669 list_for_each_entry_safe(test, n, &file->triggers, list) {
5670 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5671 hist_data = test->private_data;
5672 list_del_rcu(&test->list);
5673 trace_event_trigger_enable_disable(file, 0);
5674
5675 se_name = trace_event_name(file->event_call);
5676 se = find_synth_event(se_name);
5677 if (se)
5678 se->ref--;
5679
5680 update_cond_flag(file);
5681 if (hist_data->enable_timestamps)
5682 tracing_set_time_stamp_abs(file->tr, false);
5683 if (test->ops->free)
5684 test->ops->free(test->ops, test);
5685 }
5686 }
5687 }
5688
event_hist_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)5689 static int event_hist_trigger_func(struct event_command *cmd_ops,
5690 struct trace_event_file *file,
5691 char *glob, char *cmd, char *param)
5692 {
5693 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
5694 struct event_trigger_data *trigger_data;
5695 struct hist_trigger_attrs *attrs;
5696 struct event_trigger_ops *trigger_ops;
5697 struct hist_trigger_data *hist_data;
5698 struct synth_event *se;
5699 const char *se_name;
5700 bool remove = false;
5701 char *trigger, *p;
5702 int ret = 0;
5703
5704 lockdep_assert_held(&event_mutex);
5705
5706 if (glob && strlen(glob)) {
5707 hist_err_clear();
5708 last_cmd_set(file, param);
5709 }
5710
5711 if (!param)
5712 return -EINVAL;
5713
5714 if (glob[0] == '!')
5715 remove = true;
5716
5717 /*
5718 * separate the trigger from the filter (k:v [if filter])
5719 * allowing for whitespace in the trigger
5720 */
5721 p = trigger = param;
5722 do {
5723 p = strstr(p, "if");
5724 if (!p)
5725 break;
5726 if (p == param)
5727 return -EINVAL;
5728 if (*(p - 1) != ' ' && *(p - 1) != '\t') {
5729 p++;
5730 continue;
5731 }
5732 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
5733 return -EINVAL;
5734 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
5735 p++;
5736 continue;
5737 }
5738 break;
5739 } while (p);
5740
5741 if (!p)
5742 param = NULL;
5743 else {
5744 *(p - 1) = '\0';
5745 param = strstrip(p);
5746 trigger = strstrip(trigger);
5747 }
5748
5749 attrs = parse_hist_trigger_attrs(file->tr, trigger);
5750 if (IS_ERR(attrs))
5751 return PTR_ERR(attrs);
5752
5753 if (attrs->map_bits)
5754 hist_trigger_bits = attrs->map_bits;
5755
5756 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
5757 if (IS_ERR(hist_data)) {
5758 destroy_hist_trigger_attrs(attrs);
5759 return PTR_ERR(hist_data);
5760 }
5761
5762 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
5763
5764 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
5765 if (!trigger_data) {
5766 ret = -ENOMEM;
5767 goto out_free;
5768 }
5769
5770 trigger_data->count = -1;
5771 trigger_data->ops = trigger_ops;
5772 trigger_data->cmd_ops = cmd_ops;
5773
5774 INIT_LIST_HEAD(&trigger_data->list);
5775 RCU_INIT_POINTER(trigger_data->filter, NULL);
5776
5777 trigger_data->private_data = hist_data;
5778
5779 /* if param is non-empty, it's supposed to be a filter */
5780 if (param && cmd_ops->set_filter) {
5781 ret = cmd_ops->set_filter(param, trigger_data, file);
5782 if (ret < 0)
5783 goto out_free;
5784 }
5785
5786 if (remove) {
5787 if (!have_hist_trigger_match(trigger_data, file))
5788 goto out_free;
5789
5790 if (hist_trigger_check_refs(trigger_data, file)) {
5791 ret = -EBUSY;
5792 goto out_free;
5793 }
5794
5795 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5796 se_name = trace_event_name(file->event_call);
5797 se = find_synth_event(se_name);
5798 if (se)
5799 se->ref--;
5800 ret = 0;
5801 goto out_free;
5802 }
5803
5804 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
5805 /*
5806 * The above returns on success the # of triggers registered,
5807 * but if it didn't register any it returns zero. Consider no
5808 * triggers registered a failure too.
5809 */
5810 if (!ret) {
5811 if (!(attrs->pause || attrs->cont || attrs->clear))
5812 ret = -ENOENT;
5813 goto out_free;
5814 } else if (ret < 0)
5815 goto out_free;
5816
5817 if (get_named_trigger_data(trigger_data))
5818 goto enable;
5819
5820 ret = create_actions(hist_data);
5821 if (ret)
5822 goto out_unreg;
5823
5824 if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
5825 ret = save_hist_vars(hist_data);
5826 if (ret)
5827 goto out_unreg;
5828 }
5829
5830 ret = tracing_map_init(hist_data->map);
5831 if (ret)
5832 goto out_unreg;
5833 enable:
5834 ret = hist_trigger_enable(trigger_data, file);
5835 if (ret)
5836 goto out_unreg;
5837
5838 se_name = trace_event_name(file->event_call);
5839 se = find_synth_event(se_name);
5840 if (se)
5841 se->ref++;
5842 /* Just return zero, not the number of registered triggers */
5843 ret = 0;
5844 out:
5845 if (ret == 0 && glob[0])
5846 hist_err_clear();
5847
5848 return ret;
5849 out_unreg:
5850 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5851 out_free:
5852 if (cmd_ops->set_filter)
5853 cmd_ops->set_filter(NULL, trigger_data, NULL);
5854
5855 remove_hist_vars(hist_data);
5856
5857 kfree(trigger_data);
5858
5859 destroy_hist_data(hist_data);
5860 goto out;
5861 }
5862
5863 static struct event_command trigger_hist_cmd = {
5864 .name = "hist",
5865 .trigger_type = ETT_EVENT_HIST,
5866 .flags = EVENT_CMD_FL_NEEDS_REC,
5867 .func = event_hist_trigger_func,
5868 .reg = hist_register_trigger,
5869 .unreg = hist_unregister_trigger,
5870 .unreg_all = hist_unreg_all,
5871 .get_trigger_ops = event_hist_get_trigger_ops,
5872 .set_filter = set_trigger_filter,
5873 };
5874
register_trigger_hist_cmd(void)5875 __init int register_trigger_hist_cmd(void)
5876 {
5877 int ret;
5878
5879 ret = register_event_command(&trigger_hist_cmd);
5880 WARN_ON(ret < 0);
5881
5882 return ret;
5883 }
5884
5885 static void
hist_enable_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)5886 hist_enable_trigger(struct event_trigger_data *data, void *rec,
5887 struct ring_buffer_event *event)
5888 {
5889 struct enable_trigger_data *enable_data = data->private_data;
5890 struct event_trigger_data *test;
5891
5892 list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
5893 lockdep_is_held(&event_mutex)) {
5894 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5895 if (enable_data->enable)
5896 test->paused = false;
5897 else
5898 test->paused = true;
5899 }
5900 }
5901 }
5902
5903 static void
hist_enable_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)5904 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
5905 struct ring_buffer_event *event)
5906 {
5907 if (!data->count)
5908 return;
5909
5910 if (data->count != -1)
5911 (data->count)--;
5912
5913 hist_enable_trigger(data, rec, event);
5914 }
5915
5916 static struct event_trigger_ops hist_enable_trigger_ops = {
5917 .func = hist_enable_trigger,
5918 .print = event_enable_trigger_print,
5919 .init = event_trigger_init,
5920 .free = event_enable_trigger_free,
5921 };
5922
5923 static struct event_trigger_ops hist_enable_count_trigger_ops = {
5924 .func = hist_enable_count_trigger,
5925 .print = event_enable_trigger_print,
5926 .init = event_trigger_init,
5927 .free = event_enable_trigger_free,
5928 };
5929
5930 static struct event_trigger_ops hist_disable_trigger_ops = {
5931 .func = hist_enable_trigger,
5932 .print = event_enable_trigger_print,
5933 .init = event_trigger_init,
5934 .free = event_enable_trigger_free,
5935 };
5936
5937 static struct event_trigger_ops hist_disable_count_trigger_ops = {
5938 .func = hist_enable_count_trigger,
5939 .print = event_enable_trigger_print,
5940 .init = event_trigger_init,
5941 .free = event_enable_trigger_free,
5942 };
5943
5944 static struct event_trigger_ops *
hist_enable_get_trigger_ops(char * cmd,char * param)5945 hist_enable_get_trigger_ops(char *cmd, char *param)
5946 {
5947 struct event_trigger_ops *ops;
5948 bool enable;
5949
5950 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
5951
5952 if (enable)
5953 ops = param ? &hist_enable_count_trigger_ops :
5954 &hist_enable_trigger_ops;
5955 else
5956 ops = param ? &hist_disable_count_trigger_ops :
5957 &hist_disable_trigger_ops;
5958
5959 return ops;
5960 }
5961
hist_enable_unreg_all(struct trace_event_file * file)5962 static void hist_enable_unreg_all(struct trace_event_file *file)
5963 {
5964 struct event_trigger_data *test, *n;
5965
5966 list_for_each_entry_safe(test, n, &file->triggers, list) {
5967 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
5968 list_del_rcu(&test->list);
5969 update_cond_flag(file);
5970 trace_event_trigger_enable_disable(file, 0);
5971 if (test->ops->free)
5972 test->ops->free(test->ops, test);
5973 }
5974 }
5975 }
5976
5977 static struct event_command trigger_hist_enable_cmd = {
5978 .name = ENABLE_HIST_STR,
5979 .trigger_type = ETT_HIST_ENABLE,
5980 .func = event_enable_trigger_func,
5981 .reg = event_enable_register_trigger,
5982 .unreg = event_enable_unregister_trigger,
5983 .unreg_all = hist_enable_unreg_all,
5984 .get_trigger_ops = hist_enable_get_trigger_ops,
5985 .set_filter = set_trigger_filter,
5986 };
5987
5988 static struct event_command trigger_hist_disable_cmd = {
5989 .name = DISABLE_HIST_STR,
5990 .trigger_type = ETT_HIST_ENABLE,
5991 .func = event_enable_trigger_func,
5992 .reg = event_enable_register_trigger,
5993 .unreg = event_enable_unregister_trigger,
5994 .unreg_all = hist_enable_unreg_all,
5995 .get_trigger_ops = hist_enable_get_trigger_ops,
5996 .set_filter = set_trigger_filter,
5997 };
5998
unregister_trigger_hist_enable_disable_cmds(void)5999 static __init void unregister_trigger_hist_enable_disable_cmds(void)
6000 {
6001 unregister_event_command(&trigger_hist_enable_cmd);
6002 unregister_event_command(&trigger_hist_disable_cmd);
6003 }
6004
register_trigger_hist_enable_disable_cmds(void)6005 __init int register_trigger_hist_enable_disable_cmds(void)
6006 {
6007 int ret;
6008
6009 ret = register_event_command(&trigger_hist_enable_cmd);
6010 if (WARN_ON(ret < 0))
6011 return ret;
6012 ret = register_event_command(&trigger_hist_disable_cmd);
6013 if (WARN_ON(ret < 0))
6014 unregister_trigger_hist_enable_disable_cmds();
6015
6016 return ret;
6017 }
6018