1 /*
2 * trace_events_hist - trace event hist triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
15 */
16
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
22 #include <linux/rculist.h>
23
24 #include "tracing_map.h"
25 #include "trace.h"
26
27 struct hist_field;
28
29 typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
30
31 struct hist_field {
32 struct ftrace_event_field *field;
33 unsigned long flags;
34 hist_field_fn_t fn;
35 unsigned int size;
36 unsigned int offset;
37 };
38
hist_field_none(struct hist_field * field,void * event)39 static u64 hist_field_none(struct hist_field *field, void *event)
40 {
41 return 0;
42 }
43
hist_field_counter(struct hist_field * field,void * event)44 static u64 hist_field_counter(struct hist_field *field, void *event)
45 {
46 return 1;
47 }
48
hist_field_string(struct hist_field * hist_field,void * event)49 static u64 hist_field_string(struct hist_field *hist_field, void *event)
50 {
51 char *addr = (char *)(event + hist_field->field->offset);
52
53 return (u64)(unsigned long)addr;
54 }
55
hist_field_dynstring(struct hist_field * hist_field,void * event)56 static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
57 {
58 u32 str_item = *(u32 *)(event + hist_field->field->offset);
59 int str_loc = str_item & 0xffff;
60 char *addr = (char *)(event + str_loc);
61
62 return (u64)(unsigned long)addr;
63 }
64
hist_field_pstring(struct hist_field * hist_field,void * event)65 static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
66 {
67 char **addr = (char **)(event + hist_field->field->offset);
68
69 return (u64)(unsigned long)*addr;
70 }
71
hist_field_log2(struct hist_field * hist_field,void * event)72 static u64 hist_field_log2(struct hist_field *hist_field, void *event)
73 {
74 u64 val = *(u64 *)(event + hist_field->field->offset);
75
76 return (u64) ilog2(roundup_pow_of_two(val));
77 }
78
79 #define DEFINE_HIST_FIELD_FN(type) \
80 static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
81 { \
82 type *addr = (type *)(event + hist_field->field->offset); \
83 \
84 return (u64)(unsigned long)*addr; \
85 }
86
87 DEFINE_HIST_FIELD_FN(s64);
88 DEFINE_HIST_FIELD_FN(u64);
89 DEFINE_HIST_FIELD_FN(s32);
90 DEFINE_HIST_FIELD_FN(u32);
91 DEFINE_HIST_FIELD_FN(s16);
92 DEFINE_HIST_FIELD_FN(u16);
93 DEFINE_HIST_FIELD_FN(s8);
94 DEFINE_HIST_FIELD_FN(u8);
95
96 #define for_each_hist_field(i, hist_data) \
97 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
98
99 #define for_each_hist_val_field(i, hist_data) \
100 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
101
102 #define for_each_hist_key_field(i, hist_data) \
103 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
104
105 #define HIST_STACKTRACE_DEPTH 16
106 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
107 #define HIST_STACKTRACE_SKIP 5
108
109 #define HITCOUNT_IDX 0
110 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
111
112 enum hist_field_flags {
113 HIST_FIELD_FL_HITCOUNT = 1,
114 HIST_FIELD_FL_KEY = 2,
115 HIST_FIELD_FL_STRING = 4,
116 HIST_FIELD_FL_HEX = 8,
117 HIST_FIELD_FL_SYM = 16,
118 HIST_FIELD_FL_SYM_OFFSET = 32,
119 HIST_FIELD_FL_EXECNAME = 64,
120 HIST_FIELD_FL_SYSCALL = 128,
121 HIST_FIELD_FL_STACKTRACE = 256,
122 HIST_FIELD_FL_LOG2 = 512,
123 };
124
125 struct hist_trigger_attrs {
126 char *keys_str;
127 char *vals_str;
128 char *sort_key_str;
129 char *name;
130 bool pause;
131 bool cont;
132 bool clear;
133 unsigned int map_bits;
134 };
135
136 struct hist_trigger_data {
137 struct hist_field *fields[TRACING_MAP_FIELDS_MAX];
138 unsigned int n_vals;
139 unsigned int n_keys;
140 unsigned int n_fields;
141 unsigned int key_size;
142 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
143 unsigned int n_sort_keys;
144 struct trace_event_file *event_file;
145 struct hist_trigger_attrs *attrs;
146 struct tracing_map *map;
147 };
148
select_value_fn(int field_size,int field_is_signed)149 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
150 {
151 hist_field_fn_t fn = NULL;
152
153 switch (field_size) {
154 case 8:
155 if (field_is_signed)
156 fn = hist_field_s64;
157 else
158 fn = hist_field_u64;
159 break;
160 case 4:
161 if (field_is_signed)
162 fn = hist_field_s32;
163 else
164 fn = hist_field_u32;
165 break;
166 case 2:
167 if (field_is_signed)
168 fn = hist_field_s16;
169 else
170 fn = hist_field_u16;
171 break;
172 case 1:
173 if (field_is_signed)
174 fn = hist_field_s8;
175 else
176 fn = hist_field_u8;
177 break;
178 }
179
180 return fn;
181 }
182
parse_map_size(char * str)183 static int parse_map_size(char *str)
184 {
185 unsigned long size, map_bits;
186 int ret;
187
188 strsep(&str, "=");
189 if (!str) {
190 ret = -EINVAL;
191 goto out;
192 }
193
194 ret = kstrtoul(str, 0, &size);
195 if (ret)
196 goto out;
197
198 map_bits = ilog2(roundup_pow_of_two(size));
199 if (map_bits < TRACING_MAP_BITS_MIN ||
200 map_bits > TRACING_MAP_BITS_MAX)
201 ret = -EINVAL;
202 else
203 ret = map_bits;
204 out:
205 return ret;
206 }
207
destroy_hist_trigger_attrs(struct hist_trigger_attrs * attrs)208 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
209 {
210 if (!attrs)
211 return;
212
213 kfree(attrs->name);
214 kfree(attrs->sort_key_str);
215 kfree(attrs->keys_str);
216 kfree(attrs->vals_str);
217 kfree(attrs);
218 }
219
parse_hist_trigger_attrs(char * trigger_str)220 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
221 {
222 struct hist_trigger_attrs *attrs;
223 int ret = 0;
224
225 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
226 if (!attrs)
227 return ERR_PTR(-ENOMEM);
228
229 while (trigger_str) {
230 char *str = strsep(&trigger_str, ":");
231
232 if ((strncmp(str, "key=", strlen("key=")) == 0) ||
233 (strncmp(str, "keys=", strlen("keys=")) == 0))
234 attrs->keys_str = kstrdup(str, GFP_KERNEL);
235 else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
236 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
237 (strncmp(str, "values=", strlen("values=")) == 0))
238 attrs->vals_str = kstrdup(str, GFP_KERNEL);
239 else if (strncmp(str, "sort=", strlen("sort=")) == 0)
240 attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
241 else if (strncmp(str, "name=", strlen("name=")) == 0)
242 attrs->name = kstrdup(str, GFP_KERNEL);
243 else if (strcmp(str, "pause") == 0)
244 attrs->pause = true;
245 else if ((strcmp(str, "cont") == 0) ||
246 (strcmp(str, "continue") == 0))
247 attrs->cont = true;
248 else if (strcmp(str, "clear") == 0)
249 attrs->clear = true;
250 else if (strncmp(str, "size=", strlen("size=")) == 0) {
251 int map_bits = parse_map_size(str);
252
253 if (map_bits < 0) {
254 ret = map_bits;
255 goto free;
256 }
257 attrs->map_bits = map_bits;
258 } else {
259 ret = -EINVAL;
260 goto free;
261 }
262 }
263
264 if (!attrs->keys_str) {
265 ret = -EINVAL;
266 goto free;
267 }
268
269 return attrs;
270 free:
271 destroy_hist_trigger_attrs(attrs);
272
273 return ERR_PTR(ret);
274 }
275
save_comm(char * comm,struct task_struct * task)276 static inline void save_comm(char *comm, struct task_struct *task)
277 {
278 if (!task->pid) {
279 strcpy(comm, "<idle>");
280 return;
281 }
282
283 if (WARN_ON_ONCE(task->pid < 0)) {
284 strcpy(comm, "<XXX>");
285 return;
286 }
287
288 memcpy(comm, task->comm, TASK_COMM_LEN);
289 }
290
hist_trigger_elt_comm_free(struct tracing_map_elt * elt)291 static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
292 {
293 kfree((char *)elt->private_data);
294 }
295
hist_trigger_elt_comm_alloc(struct tracing_map_elt * elt)296 static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
297 {
298 struct hist_trigger_data *hist_data = elt->map->private_data;
299 struct hist_field *key_field;
300 unsigned int i;
301
302 for_each_hist_key_field(i, hist_data) {
303 key_field = hist_data->fields[i];
304
305 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
306 unsigned int size = TASK_COMM_LEN + 1;
307
308 elt->private_data = kzalloc(size, GFP_KERNEL);
309 if (!elt->private_data)
310 return -ENOMEM;
311 break;
312 }
313 }
314
315 return 0;
316 }
317
hist_trigger_elt_comm_copy(struct tracing_map_elt * to,struct tracing_map_elt * from)318 static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
319 struct tracing_map_elt *from)
320 {
321 char *comm_from = from->private_data;
322 char *comm_to = to->private_data;
323
324 if (comm_from)
325 memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
326 }
327
hist_trigger_elt_comm_init(struct tracing_map_elt * elt)328 static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
329 {
330 char *comm = elt->private_data;
331
332 if (comm)
333 save_comm(comm, current);
334 }
335
336 static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
337 .elt_alloc = hist_trigger_elt_comm_alloc,
338 .elt_copy = hist_trigger_elt_comm_copy,
339 .elt_free = hist_trigger_elt_comm_free,
340 .elt_init = hist_trigger_elt_comm_init,
341 };
342
destroy_hist_field(struct hist_field * hist_field)343 static void destroy_hist_field(struct hist_field *hist_field)
344 {
345 kfree(hist_field);
346 }
347
create_hist_field(struct ftrace_event_field * field,unsigned long flags)348 static struct hist_field *create_hist_field(struct ftrace_event_field *field,
349 unsigned long flags)
350 {
351 struct hist_field *hist_field;
352
353 if (field && is_function_field(field))
354 return NULL;
355
356 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
357 if (!hist_field)
358 return NULL;
359
360 if (flags & HIST_FIELD_FL_HITCOUNT) {
361 hist_field->fn = hist_field_counter;
362 goto out;
363 }
364
365 if (flags & HIST_FIELD_FL_STACKTRACE) {
366 hist_field->fn = hist_field_none;
367 goto out;
368 }
369
370 if (flags & HIST_FIELD_FL_LOG2) {
371 hist_field->fn = hist_field_log2;
372 goto out;
373 }
374
375 if (WARN_ON_ONCE(!field))
376 goto out;
377
378 if (is_string_field(field)) {
379 flags |= HIST_FIELD_FL_STRING;
380
381 if (field->filter_type == FILTER_STATIC_STRING)
382 hist_field->fn = hist_field_string;
383 else if (field->filter_type == FILTER_DYN_STRING)
384 hist_field->fn = hist_field_dynstring;
385 else
386 hist_field->fn = hist_field_pstring;
387 } else {
388 hist_field->fn = select_value_fn(field->size,
389 field->is_signed);
390 if (!hist_field->fn) {
391 destroy_hist_field(hist_field);
392 return NULL;
393 }
394 }
395 out:
396 hist_field->field = field;
397 hist_field->flags = flags;
398
399 return hist_field;
400 }
401
destroy_hist_fields(struct hist_trigger_data * hist_data)402 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
403 {
404 unsigned int i;
405
406 for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
407 if (hist_data->fields[i]) {
408 destroy_hist_field(hist_data->fields[i]);
409 hist_data->fields[i] = NULL;
410 }
411 }
412 }
413
create_hitcount_val(struct hist_trigger_data * hist_data)414 static int create_hitcount_val(struct hist_trigger_data *hist_data)
415 {
416 hist_data->fields[HITCOUNT_IDX] =
417 create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
418 if (!hist_data->fields[HITCOUNT_IDX])
419 return -ENOMEM;
420
421 hist_data->n_vals++;
422
423 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
424 return -EINVAL;
425
426 return 0;
427 }
428
create_val_field(struct hist_trigger_data * hist_data,unsigned int val_idx,struct trace_event_file * file,char * field_str)429 static int create_val_field(struct hist_trigger_data *hist_data,
430 unsigned int val_idx,
431 struct trace_event_file *file,
432 char *field_str)
433 {
434 struct ftrace_event_field *field = NULL;
435 unsigned long flags = 0;
436 char *field_name;
437 int ret = 0;
438
439 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
440 return -EINVAL;
441
442 field_name = strsep(&field_str, ".");
443 if (field_str) {
444 if (strcmp(field_str, "hex") == 0)
445 flags |= HIST_FIELD_FL_HEX;
446 else {
447 ret = -EINVAL;
448 goto out;
449 }
450 }
451
452 field = trace_find_event_field(file->event_call, field_name);
453 if (!field || !field->size) {
454 ret = -EINVAL;
455 goto out;
456 }
457
458 hist_data->fields[val_idx] = create_hist_field(field, flags);
459 if (!hist_data->fields[val_idx]) {
460 ret = -ENOMEM;
461 goto out;
462 }
463
464 ++hist_data->n_vals;
465
466 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
467 ret = -EINVAL;
468 out:
469 return ret;
470 }
471
create_val_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)472 static int create_val_fields(struct hist_trigger_data *hist_data,
473 struct trace_event_file *file)
474 {
475 char *fields_str, *field_str;
476 unsigned int i, j;
477 int ret;
478
479 ret = create_hitcount_val(hist_data);
480 if (ret)
481 goto out;
482
483 fields_str = hist_data->attrs->vals_str;
484 if (!fields_str)
485 goto out;
486
487 strsep(&fields_str, "=");
488 if (!fields_str)
489 goto out;
490
491 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
492 j < TRACING_MAP_VALS_MAX; i++) {
493 field_str = strsep(&fields_str, ",");
494 if (!field_str)
495 break;
496 if (strcmp(field_str, "hitcount") == 0)
497 continue;
498 ret = create_val_field(hist_data, j++, file, field_str);
499 if (ret)
500 goto out;
501 }
502 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
503 ret = -EINVAL;
504 out:
505 return ret;
506 }
507
create_key_field(struct hist_trigger_data * hist_data,unsigned int key_idx,unsigned int key_offset,struct trace_event_file * file,char * field_str)508 static int create_key_field(struct hist_trigger_data *hist_data,
509 unsigned int key_idx,
510 unsigned int key_offset,
511 struct trace_event_file *file,
512 char *field_str)
513 {
514 struct ftrace_event_field *field = NULL;
515 unsigned long flags = 0;
516 unsigned int key_size;
517 int ret = 0;
518
519 if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
520 return -EINVAL;
521
522 flags |= HIST_FIELD_FL_KEY;
523
524 if (strcmp(field_str, "stacktrace") == 0) {
525 flags |= HIST_FIELD_FL_STACKTRACE;
526 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
527 } else {
528 char *field_name = strsep(&field_str, ".");
529
530 if (field_str) {
531 if (strcmp(field_str, "hex") == 0)
532 flags |= HIST_FIELD_FL_HEX;
533 else if (strcmp(field_str, "sym") == 0)
534 flags |= HIST_FIELD_FL_SYM;
535 else if (strcmp(field_str, "sym-offset") == 0)
536 flags |= HIST_FIELD_FL_SYM_OFFSET;
537 else if ((strcmp(field_str, "execname") == 0) &&
538 (strcmp(field_name, "common_pid") == 0))
539 flags |= HIST_FIELD_FL_EXECNAME;
540 else if (strcmp(field_str, "syscall") == 0)
541 flags |= HIST_FIELD_FL_SYSCALL;
542 else if (strcmp(field_str, "log2") == 0)
543 flags |= HIST_FIELD_FL_LOG2;
544 else {
545 ret = -EINVAL;
546 goto out;
547 }
548 }
549
550 field = trace_find_event_field(file->event_call, field_name);
551 if (!field || !field->size) {
552 ret = -EINVAL;
553 goto out;
554 }
555
556 if (is_string_field(field))
557 key_size = MAX_FILTER_STR_VAL;
558 else
559 key_size = field->size;
560 }
561
562 hist_data->fields[key_idx] = create_hist_field(field, flags);
563 if (!hist_data->fields[key_idx]) {
564 ret = -ENOMEM;
565 goto out;
566 }
567
568 key_size = ALIGN(key_size, sizeof(u64));
569 hist_data->fields[key_idx]->size = key_size;
570 hist_data->fields[key_idx]->offset = key_offset;
571 hist_data->key_size += key_size;
572 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
573 ret = -EINVAL;
574 goto out;
575 }
576
577 hist_data->n_keys++;
578
579 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
580 return -EINVAL;
581
582 ret = key_size;
583 out:
584 return ret;
585 }
586
create_key_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)587 static int create_key_fields(struct hist_trigger_data *hist_data,
588 struct trace_event_file *file)
589 {
590 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
591 char *fields_str, *field_str;
592 int ret = -EINVAL;
593
594 fields_str = hist_data->attrs->keys_str;
595 if (!fields_str)
596 goto out;
597
598 strsep(&fields_str, "=");
599 if (!fields_str)
600 goto out;
601
602 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
603 field_str = strsep(&fields_str, ",");
604 if (!field_str)
605 break;
606 ret = create_key_field(hist_data, i, key_offset,
607 file, field_str);
608 if (ret < 0)
609 goto out;
610 key_offset += ret;
611 }
612 if (fields_str) {
613 ret = -EINVAL;
614 goto out;
615 }
616 ret = 0;
617 out:
618 return ret;
619 }
620
create_hist_fields(struct hist_trigger_data * hist_data,struct trace_event_file * file)621 static int create_hist_fields(struct hist_trigger_data *hist_data,
622 struct trace_event_file *file)
623 {
624 int ret;
625
626 ret = create_val_fields(hist_data, file);
627 if (ret)
628 goto out;
629
630 ret = create_key_fields(hist_data, file);
631 if (ret)
632 goto out;
633
634 hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
635 out:
636 return ret;
637 }
638
is_descending(const char * str)639 static int is_descending(const char *str)
640 {
641 if (!str)
642 return 0;
643
644 if (strcmp(str, "descending") == 0)
645 return 1;
646
647 if (strcmp(str, "ascending") == 0)
648 return 0;
649
650 return -EINVAL;
651 }
652
create_sort_keys(struct hist_trigger_data * hist_data)653 static int create_sort_keys(struct hist_trigger_data *hist_data)
654 {
655 char *fields_str = hist_data->attrs->sort_key_str;
656 struct ftrace_event_field *field = NULL;
657 struct tracing_map_sort_key *sort_key;
658 int descending, ret = 0;
659 unsigned int i, j;
660
661 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
662
663 if (!fields_str)
664 goto out;
665
666 strsep(&fields_str, "=");
667 if (!fields_str) {
668 ret = -EINVAL;
669 goto out;
670 }
671
672 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
673 char *field_str, *field_name;
674
675 sort_key = &hist_data->sort_keys[i];
676
677 field_str = strsep(&fields_str, ",");
678 if (!field_str) {
679 if (i == 0)
680 ret = -EINVAL;
681 break;
682 }
683
684 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
685 ret = -EINVAL;
686 break;
687 }
688
689 field_name = strsep(&field_str, ".");
690 if (!field_name) {
691 ret = -EINVAL;
692 break;
693 }
694
695 if (strcmp(field_name, "hitcount") == 0) {
696 descending = is_descending(field_str);
697 if (descending < 0) {
698 ret = descending;
699 break;
700 }
701 sort_key->descending = descending;
702 continue;
703 }
704
705 for (j = 1; j < hist_data->n_fields; j++) {
706 field = hist_data->fields[j]->field;
707 if (field && (strcmp(field_name, field->name) == 0)) {
708 sort_key->field_idx = j;
709 descending = is_descending(field_str);
710 if (descending < 0) {
711 ret = descending;
712 goto out;
713 }
714 sort_key->descending = descending;
715 break;
716 }
717 }
718 if (j == hist_data->n_fields) {
719 ret = -EINVAL;
720 break;
721 }
722 }
723 hist_data->n_sort_keys = i;
724 out:
725 return ret;
726 }
727
destroy_hist_data(struct hist_trigger_data * hist_data)728 static void destroy_hist_data(struct hist_trigger_data *hist_data)
729 {
730 destroy_hist_trigger_attrs(hist_data->attrs);
731 destroy_hist_fields(hist_data);
732 tracing_map_destroy(hist_data->map);
733 kfree(hist_data);
734 }
735
create_tracing_map_fields(struct hist_trigger_data * hist_data)736 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
737 {
738 struct tracing_map *map = hist_data->map;
739 struct ftrace_event_field *field;
740 struct hist_field *hist_field;
741 int i, idx;
742
743 for_each_hist_field(i, hist_data) {
744 hist_field = hist_data->fields[i];
745 if (hist_field->flags & HIST_FIELD_FL_KEY) {
746 tracing_map_cmp_fn_t cmp_fn;
747
748 field = hist_field->field;
749
750 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
751 cmp_fn = tracing_map_cmp_none;
752 else if (is_string_field(field))
753 cmp_fn = tracing_map_cmp_string;
754 else
755 cmp_fn = tracing_map_cmp_num(field->size,
756 field->is_signed);
757 idx = tracing_map_add_key_field(map,
758 hist_field->offset,
759 cmp_fn);
760
761 } else
762 idx = tracing_map_add_sum_field(map);
763
764 if (idx < 0)
765 return idx;
766 }
767
768 return 0;
769 }
770
need_tracing_map_ops(struct hist_trigger_data * hist_data)771 static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
772 {
773 struct hist_field *key_field;
774 unsigned int i;
775
776 for_each_hist_key_field(i, hist_data) {
777 key_field = hist_data->fields[i];
778
779 if (key_field->flags & HIST_FIELD_FL_EXECNAME)
780 return true;
781 }
782
783 return false;
784 }
785
786 static struct hist_trigger_data *
create_hist_data(unsigned int map_bits,struct hist_trigger_attrs * attrs,struct trace_event_file * file)787 create_hist_data(unsigned int map_bits,
788 struct hist_trigger_attrs *attrs,
789 struct trace_event_file *file)
790 {
791 const struct tracing_map_ops *map_ops = NULL;
792 struct hist_trigger_data *hist_data;
793 int ret = 0;
794
795 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
796 if (!hist_data)
797 return ERR_PTR(-ENOMEM);
798
799 hist_data->attrs = attrs;
800
801 ret = create_hist_fields(hist_data, file);
802 if (ret)
803 goto free;
804
805 ret = create_sort_keys(hist_data);
806 if (ret)
807 goto free;
808
809 if (need_tracing_map_ops(hist_data))
810 map_ops = &hist_trigger_elt_comm_ops;
811
812 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
813 map_ops, hist_data);
814 if (IS_ERR(hist_data->map)) {
815 ret = PTR_ERR(hist_data->map);
816 hist_data->map = NULL;
817 goto free;
818 }
819
820 ret = create_tracing_map_fields(hist_data);
821 if (ret)
822 goto free;
823
824 ret = tracing_map_init(hist_data->map);
825 if (ret)
826 goto free;
827
828 hist_data->event_file = file;
829 out:
830 return hist_data;
831 free:
832 hist_data->attrs = NULL;
833
834 destroy_hist_data(hist_data);
835
836 hist_data = ERR_PTR(ret);
837
838 goto out;
839 }
840
hist_trigger_elt_update(struct hist_trigger_data * hist_data,struct tracing_map_elt * elt,void * rec)841 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
842 struct tracing_map_elt *elt,
843 void *rec)
844 {
845 struct hist_field *hist_field;
846 unsigned int i;
847 u64 hist_val;
848
849 for_each_hist_val_field(i, hist_data) {
850 hist_field = hist_data->fields[i];
851 hist_val = hist_field->fn(hist_field, rec);
852 tracing_map_update_sum(elt, i, hist_val);
853 }
854 }
855
add_to_key(char * compound_key,void * key,struct hist_field * key_field,void * rec)856 static inline void add_to_key(char *compound_key, void *key,
857 struct hist_field *key_field, void *rec)
858 {
859 size_t size = key_field->size;
860
861 if (key_field->flags & HIST_FIELD_FL_STRING) {
862 struct ftrace_event_field *field;
863
864 field = key_field->field;
865 if (field->filter_type == FILTER_DYN_STRING)
866 size = *(u32 *)(rec + field->offset) >> 16;
867 else if (field->filter_type == FILTER_PTR_STRING)
868 size = strlen(key);
869 else if (field->filter_type == FILTER_STATIC_STRING)
870 size = field->size;
871
872 /* ensure NULL-termination */
873 if (size > key_field->size - 1)
874 size = key_field->size - 1;
875
876 strncpy(compound_key + key_field->offset, (char *)key, size);
877 } else
878 memcpy(compound_key + key_field->offset, key, size);
879 }
880
event_hist_trigger(struct event_trigger_data * data,void * rec)881 static void event_hist_trigger(struct event_trigger_data *data, void *rec)
882 {
883 struct hist_trigger_data *hist_data = data->private_data;
884 bool use_compound_key = (hist_data->n_keys > 1);
885 unsigned long entries[HIST_STACKTRACE_DEPTH];
886 char compound_key[HIST_KEY_SIZE_MAX];
887 struct stack_trace stacktrace;
888 struct hist_field *key_field;
889 struct tracing_map_elt *elt;
890 u64 field_contents;
891 void *key = NULL;
892 unsigned int i;
893
894 memset(compound_key, 0, hist_data->key_size);
895
896 for_each_hist_key_field(i, hist_data) {
897 key_field = hist_data->fields[i];
898
899 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
900 stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
901 stacktrace.entries = entries;
902 stacktrace.nr_entries = 0;
903 stacktrace.skip = HIST_STACKTRACE_SKIP;
904
905 memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
906 save_stack_trace(&stacktrace);
907
908 key = entries;
909 } else {
910 field_contents = key_field->fn(key_field, rec);
911 if (key_field->flags & HIST_FIELD_FL_STRING) {
912 key = (void *)(unsigned long)field_contents;
913 use_compound_key = true;
914 } else
915 key = (void *)&field_contents;
916 }
917
918 if (use_compound_key)
919 add_to_key(compound_key, key, key_field, rec);
920 }
921
922 if (use_compound_key)
923 key = compound_key;
924
925 elt = tracing_map_insert(hist_data->map, key);
926 if (elt)
927 hist_trigger_elt_update(hist_data, elt, rec);
928 }
929
hist_trigger_stacktrace_print(struct seq_file * m,unsigned long * stacktrace_entries,unsigned int max_entries)930 static void hist_trigger_stacktrace_print(struct seq_file *m,
931 unsigned long *stacktrace_entries,
932 unsigned int max_entries)
933 {
934 char str[KSYM_SYMBOL_LEN];
935 unsigned int spaces = 8;
936 unsigned int i;
937
938 for (i = 0; i < max_entries; i++) {
939 if (stacktrace_entries[i] == ULONG_MAX)
940 return;
941
942 seq_printf(m, "%*c", 1 + spaces, ' ');
943 sprint_symbol(str, stacktrace_entries[i]);
944 seq_printf(m, "%s\n", str);
945 }
946 }
947
948 static void
hist_trigger_entry_print(struct seq_file * m,struct hist_trigger_data * hist_data,void * key,struct tracing_map_elt * elt)949 hist_trigger_entry_print(struct seq_file *m,
950 struct hist_trigger_data *hist_data, void *key,
951 struct tracing_map_elt *elt)
952 {
953 struct hist_field *key_field;
954 char str[KSYM_SYMBOL_LEN];
955 bool multiline = false;
956 unsigned int i;
957 u64 uval;
958
959 seq_puts(m, "{ ");
960
961 for_each_hist_key_field(i, hist_data) {
962 key_field = hist_data->fields[i];
963
964 if (i > hist_data->n_vals)
965 seq_puts(m, ", ");
966
967 if (key_field->flags & HIST_FIELD_FL_HEX) {
968 uval = *(u64 *)(key + key_field->offset);
969 seq_printf(m, "%s: %llx",
970 key_field->field->name, uval);
971 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
972 uval = *(u64 *)(key + key_field->offset);
973 sprint_symbol_no_offset(str, uval);
974 seq_printf(m, "%s: [%llx] %-45s",
975 key_field->field->name, uval, str);
976 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
977 uval = *(u64 *)(key + key_field->offset);
978 sprint_symbol(str, uval);
979 seq_printf(m, "%s: [%llx] %-55s",
980 key_field->field->name, uval, str);
981 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
982 char *comm = elt->private_data;
983
984 uval = *(u64 *)(key + key_field->offset);
985 seq_printf(m, "%s: %-16s[%10llu]",
986 key_field->field->name, comm, uval);
987 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
988 const char *syscall_name;
989
990 uval = *(u64 *)(key + key_field->offset);
991 syscall_name = get_syscall_name(uval);
992 if (!syscall_name)
993 syscall_name = "unknown_syscall";
994
995 seq_printf(m, "%s: %-30s[%3llu]",
996 key_field->field->name, syscall_name, uval);
997 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
998 seq_puts(m, "stacktrace:\n");
999 hist_trigger_stacktrace_print(m,
1000 key + key_field->offset,
1001 HIST_STACKTRACE_DEPTH);
1002 multiline = true;
1003 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
1004 seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name,
1005 *(u64 *)(key + key_field->offset));
1006 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
1007 seq_printf(m, "%s: %-50s", key_field->field->name,
1008 (char *)(key + key_field->offset));
1009 } else {
1010 uval = *(u64 *)(key + key_field->offset);
1011 seq_printf(m, "%s: %10llu", key_field->field->name,
1012 uval);
1013 }
1014 }
1015
1016 if (!multiline)
1017 seq_puts(m, " ");
1018
1019 seq_puts(m, "}");
1020
1021 seq_printf(m, " hitcount: %10llu",
1022 tracing_map_read_sum(elt, HITCOUNT_IDX));
1023
1024 for (i = 1; i < hist_data->n_vals; i++) {
1025 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
1026 seq_printf(m, " %s: %10llx",
1027 hist_data->fields[i]->field->name,
1028 tracing_map_read_sum(elt, i));
1029 } else {
1030 seq_printf(m, " %s: %10llu",
1031 hist_data->fields[i]->field->name,
1032 tracing_map_read_sum(elt, i));
1033 }
1034 }
1035
1036 seq_puts(m, "\n");
1037 }
1038
print_entries(struct seq_file * m,struct hist_trigger_data * hist_data)1039 static int print_entries(struct seq_file *m,
1040 struct hist_trigger_data *hist_data)
1041 {
1042 struct tracing_map_sort_entry **sort_entries = NULL;
1043 struct tracing_map *map = hist_data->map;
1044 int i, n_entries;
1045
1046 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
1047 hist_data->n_sort_keys,
1048 &sort_entries);
1049 if (n_entries < 0)
1050 return n_entries;
1051
1052 for (i = 0; i < n_entries; i++)
1053 hist_trigger_entry_print(m, hist_data,
1054 sort_entries[i]->key,
1055 sort_entries[i]->elt);
1056
1057 tracing_map_destroy_sort_entries(sort_entries, n_entries);
1058
1059 return n_entries;
1060 }
1061
hist_trigger_show(struct seq_file * m,struct event_trigger_data * data,int n)1062 static void hist_trigger_show(struct seq_file *m,
1063 struct event_trigger_data *data, int n)
1064 {
1065 struct hist_trigger_data *hist_data;
1066 int n_entries, ret = 0;
1067
1068 if (n > 0)
1069 seq_puts(m, "\n\n");
1070
1071 seq_puts(m, "# event histogram\n#\n# trigger info: ");
1072 data->ops->print(m, data->ops, data);
1073 seq_puts(m, "#\n\n");
1074
1075 hist_data = data->private_data;
1076 n_entries = print_entries(m, hist_data);
1077 if (n_entries < 0) {
1078 ret = n_entries;
1079 n_entries = 0;
1080 }
1081
1082 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1083 (u64)atomic64_read(&hist_data->map->hits),
1084 n_entries, (u64)atomic64_read(&hist_data->map->drops));
1085 }
1086
hist_show(struct seq_file * m,void * v)1087 static int hist_show(struct seq_file *m, void *v)
1088 {
1089 struct event_trigger_data *data;
1090 struct trace_event_file *event_file;
1091 int n = 0, ret = 0;
1092
1093 mutex_lock(&event_mutex);
1094
1095 event_file = event_file_data(m->private);
1096 if (unlikely(!event_file)) {
1097 ret = -ENODEV;
1098 goto out_unlock;
1099 }
1100
1101 list_for_each_entry_rcu(data, &event_file->triggers, list) {
1102 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
1103 hist_trigger_show(m, data, n++);
1104 }
1105
1106 out_unlock:
1107 mutex_unlock(&event_mutex);
1108
1109 return ret;
1110 }
1111
event_hist_open(struct inode * inode,struct file * file)1112 static int event_hist_open(struct inode *inode, struct file *file)
1113 {
1114 return single_open(file, hist_show, file);
1115 }
1116
1117 const struct file_operations event_hist_fops = {
1118 .open = event_hist_open,
1119 .read = seq_read,
1120 .llseek = seq_lseek,
1121 .release = single_release,
1122 };
1123
get_hist_field_flags(struct hist_field * hist_field)1124 static const char *get_hist_field_flags(struct hist_field *hist_field)
1125 {
1126 const char *flags_str = NULL;
1127
1128 if (hist_field->flags & HIST_FIELD_FL_HEX)
1129 flags_str = "hex";
1130 else if (hist_field->flags & HIST_FIELD_FL_SYM)
1131 flags_str = "sym";
1132 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1133 flags_str = "sym-offset";
1134 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1135 flags_str = "execname";
1136 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1137 flags_str = "syscall";
1138 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
1139 flags_str = "log2";
1140
1141 return flags_str;
1142 }
1143
hist_field_print(struct seq_file * m,struct hist_field * hist_field)1144 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
1145 {
1146 seq_printf(m, "%s", hist_field->field->name);
1147 if (hist_field->flags) {
1148 const char *flags_str = get_hist_field_flags(hist_field);
1149
1150 if (flags_str)
1151 seq_printf(m, ".%s", flags_str);
1152 }
1153 }
1154
event_hist_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1155 static int event_hist_trigger_print(struct seq_file *m,
1156 struct event_trigger_ops *ops,
1157 struct event_trigger_data *data)
1158 {
1159 struct hist_trigger_data *hist_data = data->private_data;
1160 struct hist_field *key_field;
1161 unsigned int i;
1162
1163 seq_puts(m, "hist:");
1164
1165 if (data->name)
1166 seq_printf(m, "%s:", data->name);
1167
1168 seq_puts(m, "keys=");
1169
1170 for_each_hist_key_field(i, hist_data) {
1171 key_field = hist_data->fields[i];
1172
1173 if (i > hist_data->n_vals)
1174 seq_puts(m, ",");
1175
1176 if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
1177 seq_puts(m, "stacktrace");
1178 else
1179 hist_field_print(m, key_field);
1180 }
1181
1182 seq_puts(m, ":vals=");
1183
1184 for_each_hist_val_field(i, hist_data) {
1185 if (i == HITCOUNT_IDX)
1186 seq_puts(m, "hitcount");
1187 else {
1188 seq_puts(m, ",");
1189 hist_field_print(m, hist_data->fields[i]);
1190 }
1191 }
1192
1193 seq_puts(m, ":sort=");
1194
1195 for (i = 0; i < hist_data->n_sort_keys; i++) {
1196 struct tracing_map_sort_key *sort_key;
1197
1198 sort_key = &hist_data->sort_keys[i];
1199
1200 if (i > 0)
1201 seq_puts(m, ",");
1202
1203 if (sort_key->field_idx == HITCOUNT_IDX)
1204 seq_puts(m, "hitcount");
1205 else {
1206 unsigned int idx = sort_key->field_idx;
1207
1208 if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
1209 return -EINVAL;
1210
1211 hist_field_print(m, hist_data->fields[idx]);
1212 }
1213
1214 if (sort_key->descending)
1215 seq_puts(m, ".descending");
1216 }
1217
1218 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
1219
1220 if (data->filter_str)
1221 seq_printf(m, " if %s", data->filter_str);
1222
1223 if (data->paused)
1224 seq_puts(m, " [paused]");
1225 else
1226 seq_puts(m, " [active]");
1227
1228 seq_putc(m, '\n');
1229
1230 return 0;
1231 }
1232
event_hist_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)1233 static int event_hist_trigger_init(struct event_trigger_ops *ops,
1234 struct event_trigger_data *data)
1235 {
1236 struct hist_trigger_data *hist_data = data->private_data;
1237
1238 if (!data->ref && hist_data->attrs->name)
1239 save_named_trigger(hist_data->attrs->name, data);
1240
1241 data->ref++;
1242
1243 return 0;
1244 }
1245
event_hist_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)1246 static void event_hist_trigger_free(struct event_trigger_ops *ops,
1247 struct event_trigger_data *data)
1248 {
1249 struct hist_trigger_data *hist_data = data->private_data;
1250
1251 if (WARN_ON_ONCE(data->ref <= 0))
1252 return;
1253
1254 data->ref--;
1255 if (!data->ref) {
1256 if (data->name)
1257 del_named_trigger(data);
1258 trigger_data_free(data);
1259 destroy_hist_data(hist_data);
1260 }
1261 }
1262
1263 static struct event_trigger_ops event_hist_trigger_ops = {
1264 .func = event_hist_trigger,
1265 .print = event_hist_trigger_print,
1266 .init = event_hist_trigger_init,
1267 .free = event_hist_trigger_free,
1268 };
1269
event_hist_trigger_named_init(struct event_trigger_ops * ops,struct event_trigger_data * data)1270 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
1271 struct event_trigger_data *data)
1272 {
1273 data->ref++;
1274
1275 save_named_trigger(data->named_data->name, data);
1276
1277 event_hist_trigger_init(ops, data->named_data);
1278
1279 return 0;
1280 }
1281
event_hist_trigger_named_free(struct event_trigger_ops * ops,struct event_trigger_data * data)1282 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
1283 struct event_trigger_data *data)
1284 {
1285 if (WARN_ON_ONCE(data->ref <= 0))
1286 return;
1287
1288 event_hist_trigger_free(ops, data->named_data);
1289
1290 data->ref--;
1291 if (!data->ref) {
1292 del_named_trigger(data);
1293 trigger_data_free(data);
1294 }
1295 }
1296
1297 static struct event_trigger_ops event_hist_trigger_named_ops = {
1298 .func = event_hist_trigger,
1299 .print = event_hist_trigger_print,
1300 .init = event_hist_trigger_named_init,
1301 .free = event_hist_trigger_named_free,
1302 };
1303
event_hist_get_trigger_ops(char * cmd,char * param)1304 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
1305 char *param)
1306 {
1307 return &event_hist_trigger_ops;
1308 }
1309
hist_clear(struct event_trigger_data * data)1310 static void hist_clear(struct event_trigger_data *data)
1311 {
1312 struct hist_trigger_data *hist_data = data->private_data;
1313
1314 if (data->name)
1315 pause_named_trigger(data);
1316
1317 synchronize_sched();
1318
1319 tracing_map_clear(hist_data->map);
1320
1321 if (data->name)
1322 unpause_named_trigger(data);
1323 }
1324
compatible_field(struct ftrace_event_field * field,struct ftrace_event_field * test_field)1325 static bool compatible_field(struct ftrace_event_field *field,
1326 struct ftrace_event_field *test_field)
1327 {
1328 if (field == test_field)
1329 return true;
1330 if (field == NULL || test_field == NULL)
1331 return false;
1332 if (strcmp(field->name, test_field->name) != 0)
1333 return false;
1334 if (strcmp(field->type, test_field->type) != 0)
1335 return false;
1336 if (field->size != test_field->size)
1337 return false;
1338 if (field->is_signed != test_field->is_signed)
1339 return false;
1340
1341 return true;
1342 }
1343
hist_trigger_match(struct event_trigger_data * data,struct event_trigger_data * data_test,struct event_trigger_data * named_data,bool ignore_filter)1344 static bool hist_trigger_match(struct event_trigger_data *data,
1345 struct event_trigger_data *data_test,
1346 struct event_trigger_data *named_data,
1347 bool ignore_filter)
1348 {
1349 struct tracing_map_sort_key *sort_key, *sort_key_test;
1350 struct hist_trigger_data *hist_data, *hist_data_test;
1351 struct hist_field *key_field, *key_field_test;
1352 unsigned int i;
1353
1354 if (named_data && (named_data != data_test) &&
1355 (named_data != data_test->named_data))
1356 return false;
1357
1358 if (!named_data && is_named_trigger(data_test))
1359 return false;
1360
1361 hist_data = data->private_data;
1362 hist_data_test = data_test->private_data;
1363
1364 if (hist_data->n_vals != hist_data_test->n_vals ||
1365 hist_data->n_fields != hist_data_test->n_fields ||
1366 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
1367 return false;
1368
1369 if (!ignore_filter) {
1370 if ((data->filter_str && !data_test->filter_str) ||
1371 (!data->filter_str && data_test->filter_str))
1372 return false;
1373 }
1374
1375 for_each_hist_field(i, hist_data) {
1376 key_field = hist_data->fields[i];
1377 key_field_test = hist_data_test->fields[i];
1378
1379 if (key_field->flags != key_field_test->flags)
1380 return false;
1381 if (!compatible_field(key_field->field, key_field_test->field))
1382 return false;
1383 if (key_field->offset != key_field_test->offset)
1384 return false;
1385 }
1386
1387 for (i = 0; i < hist_data->n_sort_keys; i++) {
1388 sort_key = &hist_data->sort_keys[i];
1389 sort_key_test = &hist_data_test->sort_keys[i];
1390
1391 if (sort_key->field_idx != sort_key_test->field_idx ||
1392 sort_key->descending != sort_key_test->descending)
1393 return false;
1394 }
1395
1396 if (!ignore_filter && data->filter_str &&
1397 (strcmp(data->filter_str, data_test->filter_str) != 0))
1398 return false;
1399
1400 return true;
1401 }
1402
hist_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1403 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
1404 struct event_trigger_data *data,
1405 struct trace_event_file *file)
1406 {
1407 struct hist_trigger_data *hist_data = data->private_data;
1408 struct event_trigger_data *test, *named_data = NULL;
1409 int ret = 0;
1410
1411 if (hist_data->attrs->name) {
1412 named_data = find_named_trigger(hist_data->attrs->name);
1413 if (named_data) {
1414 if (!hist_trigger_match(data, named_data, named_data,
1415 true)) {
1416 ret = -EINVAL;
1417 goto out;
1418 }
1419 }
1420 }
1421
1422 if (hist_data->attrs->name && !named_data)
1423 goto new;
1424
1425 list_for_each_entry_rcu(test, &file->triggers, list) {
1426 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1427 if (!hist_trigger_match(data, test, named_data, false))
1428 continue;
1429 if (hist_data->attrs->pause)
1430 test->paused = true;
1431 else if (hist_data->attrs->cont)
1432 test->paused = false;
1433 else if (hist_data->attrs->clear)
1434 hist_clear(test);
1435 else
1436 ret = -EEXIST;
1437 goto out;
1438 }
1439 }
1440 new:
1441 if (hist_data->attrs->cont || hist_data->attrs->clear) {
1442 ret = -ENOENT;
1443 goto out;
1444 }
1445
1446 if (hist_data->attrs->pause)
1447 data->paused = true;
1448
1449 if (named_data) {
1450 destroy_hist_data(data->private_data);
1451 data->private_data = named_data->private_data;
1452 set_named_trigger_data(data, named_data);
1453 data->ops = &event_hist_trigger_named_ops;
1454 }
1455
1456 if (data->ops->init) {
1457 ret = data->ops->init(data->ops, data);
1458 if (ret < 0)
1459 goto out;
1460 }
1461
1462 list_add_rcu(&data->list, &file->triggers);
1463 ret++;
1464
1465 update_cond_flag(file);
1466
1467 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1468 list_del_rcu(&data->list);
1469 update_cond_flag(file);
1470 ret--;
1471 }
1472 out:
1473 return ret;
1474 }
1475
hist_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1476 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
1477 struct event_trigger_data *data,
1478 struct trace_event_file *file)
1479 {
1480 struct hist_trigger_data *hist_data = data->private_data;
1481 struct event_trigger_data *test, *named_data = NULL;
1482 bool unregistered = false;
1483
1484 if (hist_data->attrs->name)
1485 named_data = find_named_trigger(hist_data->attrs->name);
1486
1487 list_for_each_entry_rcu(test, &file->triggers, list) {
1488 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1489 if (!hist_trigger_match(data, test, named_data, false))
1490 continue;
1491 unregistered = true;
1492 list_del_rcu(&test->list);
1493 trace_event_trigger_enable_disable(file, 0);
1494 update_cond_flag(file);
1495 break;
1496 }
1497 }
1498
1499 if (unregistered && test->ops->free)
1500 test->ops->free(test->ops, test);
1501 }
1502
hist_unreg_all(struct trace_event_file * file)1503 static void hist_unreg_all(struct trace_event_file *file)
1504 {
1505 struct event_trigger_data *test, *n;
1506
1507 list_for_each_entry_safe(test, n, &file->triggers, list) {
1508 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1509 list_del_rcu(&test->list);
1510 trace_event_trigger_enable_disable(file, 0);
1511 update_cond_flag(file);
1512 if (test->ops->free)
1513 test->ops->free(test->ops, test);
1514 }
1515 }
1516 }
1517
event_hist_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)1518 static int event_hist_trigger_func(struct event_command *cmd_ops,
1519 struct trace_event_file *file,
1520 char *glob, char *cmd, char *param)
1521 {
1522 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
1523 struct event_trigger_data *trigger_data;
1524 struct hist_trigger_attrs *attrs;
1525 struct event_trigger_ops *trigger_ops;
1526 struct hist_trigger_data *hist_data;
1527 char *trigger;
1528 int ret = 0;
1529
1530 if (!param)
1531 return -EINVAL;
1532
1533 /* separate the trigger from the filter (k:v [if filter]) */
1534 trigger = strsep(¶m, " \t");
1535 if (!trigger)
1536 return -EINVAL;
1537
1538 attrs = parse_hist_trigger_attrs(trigger);
1539 if (IS_ERR(attrs))
1540 return PTR_ERR(attrs);
1541
1542 if (attrs->map_bits)
1543 hist_trigger_bits = attrs->map_bits;
1544
1545 hist_data = create_hist_data(hist_trigger_bits, attrs, file);
1546 if (IS_ERR(hist_data)) {
1547 destroy_hist_trigger_attrs(attrs);
1548 return PTR_ERR(hist_data);
1549 }
1550
1551 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1552
1553 ret = -ENOMEM;
1554 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1555 if (!trigger_data)
1556 goto out_free;
1557
1558 trigger_data->count = -1;
1559 trigger_data->ops = trigger_ops;
1560 trigger_data->cmd_ops = cmd_ops;
1561
1562 INIT_LIST_HEAD(&trigger_data->list);
1563 RCU_INIT_POINTER(trigger_data->filter, NULL);
1564
1565 trigger_data->private_data = hist_data;
1566
1567 /* if param is non-empty, it's supposed to be a filter */
1568 if (param && cmd_ops->set_filter) {
1569 ret = cmd_ops->set_filter(param, trigger_data, file);
1570 if (ret < 0)
1571 goto out_free;
1572 }
1573
1574 if (glob[0] == '!') {
1575 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1576 ret = 0;
1577 goto out_free;
1578 }
1579
1580 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1581 /*
1582 * The above returns on success the # of triggers registered,
1583 * but if it didn't register any it returns zero. Consider no
1584 * triggers registered a failure too.
1585 */
1586 if (!ret) {
1587 if (!(attrs->pause || attrs->cont || attrs->clear))
1588 ret = -ENOENT;
1589 goto out_free;
1590 } else if (ret < 0)
1591 goto out_free;
1592 /* Just return zero, not the number of registered triggers */
1593 ret = 0;
1594 out:
1595 return ret;
1596 out_free:
1597 if (cmd_ops->set_filter)
1598 cmd_ops->set_filter(NULL, trigger_data, NULL);
1599
1600 kfree(trigger_data);
1601
1602 destroy_hist_data(hist_data);
1603 goto out;
1604 }
1605
1606 static struct event_command trigger_hist_cmd = {
1607 .name = "hist",
1608 .trigger_type = ETT_EVENT_HIST,
1609 .flags = EVENT_CMD_FL_NEEDS_REC,
1610 .func = event_hist_trigger_func,
1611 .reg = hist_register_trigger,
1612 .unreg = hist_unregister_trigger,
1613 .unreg_all = hist_unreg_all,
1614 .get_trigger_ops = event_hist_get_trigger_ops,
1615 .set_filter = set_trigger_filter,
1616 };
1617
register_trigger_hist_cmd(void)1618 __init int register_trigger_hist_cmd(void)
1619 {
1620 int ret;
1621
1622 ret = register_event_command(&trigger_hist_cmd);
1623 WARN_ON(ret < 0);
1624
1625 return ret;
1626 }
1627
1628 static void
hist_enable_trigger(struct event_trigger_data * data,void * rec)1629 hist_enable_trigger(struct event_trigger_data *data, void *rec)
1630 {
1631 struct enable_trigger_data *enable_data = data->private_data;
1632 struct event_trigger_data *test;
1633
1634 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
1635 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1636 if (enable_data->enable)
1637 test->paused = false;
1638 else
1639 test->paused = true;
1640 }
1641 }
1642 }
1643
1644 static void
hist_enable_count_trigger(struct event_trigger_data * data,void * rec)1645 hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
1646 {
1647 if (!data->count)
1648 return;
1649
1650 if (data->count != -1)
1651 (data->count)--;
1652
1653 hist_enable_trigger(data, rec);
1654 }
1655
1656 static struct event_trigger_ops hist_enable_trigger_ops = {
1657 .func = hist_enable_trigger,
1658 .print = event_enable_trigger_print,
1659 .init = event_trigger_init,
1660 .free = event_enable_trigger_free,
1661 };
1662
1663 static struct event_trigger_ops hist_enable_count_trigger_ops = {
1664 .func = hist_enable_count_trigger,
1665 .print = event_enable_trigger_print,
1666 .init = event_trigger_init,
1667 .free = event_enable_trigger_free,
1668 };
1669
1670 static struct event_trigger_ops hist_disable_trigger_ops = {
1671 .func = hist_enable_trigger,
1672 .print = event_enable_trigger_print,
1673 .init = event_trigger_init,
1674 .free = event_enable_trigger_free,
1675 };
1676
1677 static struct event_trigger_ops hist_disable_count_trigger_ops = {
1678 .func = hist_enable_count_trigger,
1679 .print = event_enable_trigger_print,
1680 .init = event_trigger_init,
1681 .free = event_enable_trigger_free,
1682 };
1683
1684 static struct event_trigger_ops *
hist_enable_get_trigger_ops(char * cmd,char * param)1685 hist_enable_get_trigger_ops(char *cmd, char *param)
1686 {
1687 struct event_trigger_ops *ops;
1688 bool enable;
1689
1690 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
1691
1692 if (enable)
1693 ops = param ? &hist_enable_count_trigger_ops :
1694 &hist_enable_trigger_ops;
1695 else
1696 ops = param ? &hist_disable_count_trigger_ops :
1697 &hist_disable_trigger_ops;
1698
1699 return ops;
1700 }
1701
hist_enable_unreg_all(struct trace_event_file * file)1702 static void hist_enable_unreg_all(struct trace_event_file *file)
1703 {
1704 struct event_trigger_data *test, *n;
1705
1706 list_for_each_entry_safe(test, n, &file->triggers, list) {
1707 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
1708 list_del_rcu(&test->list);
1709 update_cond_flag(file);
1710 trace_event_trigger_enable_disable(file, 0);
1711 if (test->ops->free)
1712 test->ops->free(test->ops, test);
1713 }
1714 }
1715 }
1716
1717 static struct event_command trigger_hist_enable_cmd = {
1718 .name = ENABLE_HIST_STR,
1719 .trigger_type = ETT_HIST_ENABLE,
1720 .func = event_enable_trigger_func,
1721 .reg = event_enable_register_trigger,
1722 .unreg = event_enable_unregister_trigger,
1723 .unreg_all = hist_enable_unreg_all,
1724 .get_trigger_ops = hist_enable_get_trigger_ops,
1725 .set_filter = set_trigger_filter,
1726 };
1727
1728 static struct event_command trigger_hist_disable_cmd = {
1729 .name = DISABLE_HIST_STR,
1730 .trigger_type = ETT_HIST_ENABLE,
1731 .func = event_enable_trigger_func,
1732 .reg = event_enable_register_trigger,
1733 .unreg = event_enable_unregister_trigger,
1734 .unreg_all = hist_enable_unreg_all,
1735 .get_trigger_ops = hist_enable_get_trigger_ops,
1736 .set_filter = set_trigger_filter,
1737 };
1738
unregister_trigger_hist_enable_disable_cmds(void)1739 static __init void unregister_trigger_hist_enable_disable_cmds(void)
1740 {
1741 unregister_event_command(&trigger_hist_enable_cmd);
1742 unregister_event_command(&trigger_hist_disable_cmd);
1743 }
1744
register_trigger_hist_enable_disable_cmds(void)1745 __init int register_trigger_hist_enable_disable_cmds(void)
1746 {
1747 int ret;
1748
1749 ret = register_event_command(&trigger_hist_enable_cmd);
1750 if (WARN_ON(ret < 0))
1751 return ret;
1752 ret = register_event_command(&trigger_hist_disable_cmd);
1753 if (WARN_ON(ret < 0))
1754 unregister_trigger_hist_enable_disable_cmds();
1755
1756 return ret;
1757 }
1758