1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 int op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 int op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = 0; \
153 \
154 switch (pred->op) { \
155 case OP_LT: \
156 match = (*addr < val); \
157 break; \
158 case OP_LE: \
159 match = (*addr <= val); \
160 break; \
161 case OP_GT: \
162 match = (*addr > val); \
163 break; \
164 case OP_GE: \
165 match = (*addr >= val); \
166 break; \
167 case OP_BAND: \
168 match = (*addr & val); \
169 break; \
170 default: \
171 break; \
172 } \
173 \
174 return !!match == !pred->not; \
175 }
176
177 #define DEFINE_EQUALITY_PRED(size) \
178 static int filter_pred_##size(struct filter_pred *pred, void *event) \
179 { \
180 u##size *addr = (u##size *)(event + pred->offset); \
181 u##size val = (u##size)pred->val; \
182 int match; \
183 \
184 match = (val == *addr) ^ pred->not; \
185 \
186 return match; \
187 }
188
189 DEFINE_COMPARISON_PRED(s64);
190 DEFINE_COMPARISON_PRED(u64);
191 DEFINE_COMPARISON_PRED(s32);
192 DEFINE_COMPARISON_PRED(u32);
193 DEFINE_COMPARISON_PRED(s16);
194 DEFINE_COMPARISON_PRED(u16);
195 DEFINE_COMPARISON_PRED(s8);
196 DEFINE_COMPARISON_PRED(u8);
197
198 DEFINE_EQUALITY_PRED(64);
199 DEFINE_EQUALITY_PRED(32);
200 DEFINE_EQUALITY_PRED(16);
201 DEFINE_EQUALITY_PRED(8);
202
203 /* Filter predicate for fixed sized arrays of characters */
filter_pred_string(struct filter_pred * pred,void * event)204 static int filter_pred_string(struct filter_pred *pred, void *event)
205 {
206 char *addr = (char *)(event + pred->offset);
207 int cmp, match;
208
209 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
210
211 match = cmp ^ pred->not;
212
213 return match;
214 }
215
216 /* Filter predicate for char * pointers */
filter_pred_pchar(struct filter_pred * pred,void * event)217 static int filter_pred_pchar(struct filter_pred *pred, void *event)
218 {
219 char **addr = (char **)(event + pred->offset);
220 int cmp, match;
221 int len = strlen(*addr) + 1; /* including tailing '\0' */
222
223 cmp = pred->regex.match(*addr, &pred->regex, len);
224
225 match = cmp ^ pred->not;
226
227 return match;
228 }
229
230 /*
231 * Filter predicate for dynamic sized arrays of characters.
232 * These are implemented through a list of strings at the end
233 * of the entry.
234 * Also each of these strings have a field in the entry which
235 * contains its offset from the beginning of the entry.
236 * We have then first to get this field, dereference it
237 * and add it to the address of the entry, and at last we have
238 * the address of the string.
239 */
filter_pred_strloc(struct filter_pred * pred,void * event)240 static int filter_pred_strloc(struct filter_pred *pred, void *event)
241 {
242 u32 str_item = *(u32 *)(event + pred->offset);
243 int str_loc = str_item & 0xffff;
244 int str_len = str_item >> 16;
245 char *addr = (char *)(event + str_loc);
246 int cmp, match;
247
248 cmp = pred->regex.match(addr, &pred->regex, str_len);
249
250 match = cmp ^ pred->not;
251
252 return match;
253 }
254
255 /* Filter predicate for CPUs. */
filter_pred_cpu(struct filter_pred * pred,void * event)256 static int filter_pred_cpu(struct filter_pred *pred, void *event)
257 {
258 int cpu, cmp;
259 int match = 0;
260
261 cpu = raw_smp_processor_id();
262 cmp = pred->val;
263
264 switch (pred->op) {
265 case OP_EQ:
266 match = cpu == cmp;
267 break;
268 case OP_LT:
269 match = cpu < cmp;
270 break;
271 case OP_LE:
272 match = cpu <= cmp;
273 break;
274 case OP_GT:
275 match = cpu > cmp;
276 break;
277 case OP_GE:
278 match = cpu >= cmp;
279 break;
280 default:
281 break;
282 }
283
284 return !!match == !pred->not;
285 }
286
287 /* Filter predicate for COMM. */
filter_pred_comm(struct filter_pred * pred,void * event)288 static int filter_pred_comm(struct filter_pred *pred, void *event)
289 {
290 int cmp, match;
291
292 cmp = pred->regex.match(current->comm, &pred->regex,
293 pred->regex.field_len);
294 match = cmp ^ pred->not;
295
296 return match;
297 }
298
filter_pred_none(struct filter_pred * pred,void * event)299 static int filter_pred_none(struct filter_pred *pred, void *event)
300 {
301 return 0;
302 }
303
304 /*
305 * regex_match_foo - Basic regex callbacks
306 *
307 * @str: the string to be searched
308 * @r: the regex structure containing the pattern string
309 * @len: the length of the string to be searched (including '\0')
310 *
311 * Note:
312 * - @str might not be NULL-terminated if it's of type DYN_STRING
313 * or STATIC_STRING
314 */
315
regex_match_full(char * str,struct regex * r,int len)316 static int regex_match_full(char *str, struct regex *r, int len)
317 {
318 if (strncmp(str, r->pattern, len) == 0)
319 return 1;
320 return 0;
321 }
322
regex_match_front(char * str,struct regex * r,int len)323 static int regex_match_front(char *str, struct regex *r, int len)
324 {
325 if (strncmp(str, r->pattern, r->len) == 0)
326 return 1;
327 return 0;
328 }
329
regex_match_middle(char * str,struct regex * r,int len)330 static int regex_match_middle(char *str, struct regex *r, int len)
331 {
332 if (strnstr(str, r->pattern, len))
333 return 1;
334 return 0;
335 }
336
regex_match_end(char * str,struct regex * r,int len)337 static int regex_match_end(char *str, struct regex *r, int len)
338 {
339 int strlen = len - 1;
340
341 if (strlen >= r->len &&
342 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
343 return 1;
344 return 0;
345 }
346
347 /**
348 * filter_parse_regex - parse a basic regex
349 * @buff: the raw regex
350 * @len: length of the regex
351 * @search: will point to the beginning of the string to compare
352 * @not: tell whether the match will have to be inverted
353 *
354 * This passes in a buffer containing a regex and this function will
355 * set search to point to the search part of the buffer and
356 * return the type of search it is (see enum above).
357 * This does modify buff.
358 *
359 * Returns enum type.
360 * search returns the pointer to use for comparison.
361 * not returns 1 if buff started with a '!'
362 * 0 otherwise.
363 */
filter_parse_regex(char * buff,int len,char ** search,int * not)364 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
365 {
366 int type = MATCH_FULL;
367 int i;
368
369 if (buff[0] == '!') {
370 *not = 1;
371 buff++;
372 len--;
373 } else
374 *not = 0;
375
376 *search = buff;
377
378 for (i = 0; i < len; i++) {
379 if (buff[i] == '*') {
380 if (!i) {
381 *search = buff + 1;
382 type = MATCH_END_ONLY;
383 } else {
384 if (type == MATCH_END_ONLY)
385 type = MATCH_MIDDLE_ONLY;
386 else
387 type = MATCH_FRONT_ONLY;
388 buff[i] = 0;
389 break;
390 }
391 }
392 }
393
394 return type;
395 }
396
filter_build_regex(struct filter_pred * pred)397 static void filter_build_regex(struct filter_pred *pred)
398 {
399 struct regex *r = &pred->regex;
400 char *search;
401 enum regex_type type = MATCH_FULL;
402 int not = 0;
403
404 if (pred->op == OP_GLOB) {
405 type = filter_parse_regex(r->pattern, r->len, &search, ¬);
406 r->len = strlen(search);
407 memmove(r->pattern, search, r->len+1);
408 }
409
410 switch (type) {
411 case MATCH_FULL:
412 r->match = regex_match_full;
413 break;
414 case MATCH_FRONT_ONLY:
415 r->match = regex_match_front;
416 break;
417 case MATCH_MIDDLE_ONLY:
418 r->match = regex_match_middle;
419 break;
420 case MATCH_END_ONLY:
421 r->match = regex_match_end;
422 break;
423 }
424
425 pred->not ^= not;
426 }
427
428 enum move_type {
429 MOVE_DOWN,
430 MOVE_UP_FROM_LEFT,
431 MOVE_UP_FROM_RIGHT
432 };
433
434 static struct filter_pred *
get_pred_parent(struct filter_pred * pred,struct filter_pred * preds,int index,enum move_type * move)435 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
436 int index, enum move_type *move)
437 {
438 if (pred->parent & FILTER_PRED_IS_RIGHT)
439 *move = MOVE_UP_FROM_RIGHT;
440 else
441 *move = MOVE_UP_FROM_LEFT;
442 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
443
444 return pred;
445 }
446
447 enum walk_return {
448 WALK_PRED_ABORT,
449 WALK_PRED_PARENT,
450 WALK_PRED_DEFAULT,
451 };
452
453 typedef int (*filter_pred_walkcb_t) (enum move_type move,
454 struct filter_pred *pred,
455 int *err, void *data);
456
walk_pred_tree(struct filter_pred * preds,struct filter_pred * root,filter_pred_walkcb_t cb,void * data)457 static int walk_pred_tree(struct filter_pred *preds,
458 struct filter_pred *root,
459 filter_pred_walkcb_t cb, void *data)
460 {
461 struct filter_pred *pred = root;
462 enum move_type move = MOVE_DOWN;
463 int done = 0;
464
465 if (!preds)
466 return -EINVAL;
467
468 do {
469 int err = 0, ret;
470
471 ret = cb(move, pred, &err, data);
472 if (ret == WALK_PRED_ABORT)
473 return err;
474 if (ret == WALK_PRED_PARENT)
475 goto get_parent;
476
477 switch (move) {
478 case MOVE_DOWN:
479 if (pred->left != FILTER_PRED_INVALID) {
480 pred = &preds[pred->left];
481 continue;
482 }
483 goto get_parent;
484 case MOVE_UP_FROM_LEFT:
485 pred = &preds[pred->right];
486 move = MOVE_DOWN;
487 continue;
488 case MOVE_UP_FROM_RIGHT:
489 get_parent:
490 if (pred == root)
491 break;
492 pred = get_pred_parent(pred, preds,
493 pred->parent,
494 &move);
495 continue;
496 }
497 done = 1;
498 } while (!done);
499
500 /* We are fine. */
501 return 0;
502 }
503
504 /*
505 * A series of AND or ORs where found together. Instead of
506 * climbing up and down the tree branches, an array of the
507 * ops were made in order of checks. We can just move across
508 * the array and short circuit if needed.
509 */
process_ops(struct filter_pred * preds,struct filter_pred * op,void * rec)510 static int process_ops(struct filter_pred *preds,
511 struct filter_pred *op, void *rec)
512 {
513 struct filter_pred *pred;
514 int match = 0;
515 int type;
516 int i;
517
518 /*
519 * Micro-optimization: We set type to true if op
520 * is an OR and false otherwise (AND). Then we
521 * just need to test if the match is equal to
522 * the type, and if it is, we can short circuit the
523 * rest of the checks:
524 *
525 * if ((match && op->op == OP_OR) ||
526 * (!match && op->op == OP_AND))
527 * return match;
528 */
529 type = op->op == OP_OR;
530
531 for (i = 0; i < op->val; i++) {
532 pred = &preds[op->ops[i]];
533 if (!WARN_ON_ONCE(!pred->fn))
534 match = pred->fn(pred, rec);
535 if (!!match == type)
536 break;
537 }
538 /* If not of not match is equal to not of not, then it is a match */
539 return !!match == !op->not;
540 }
541
542 struct filter_match_preds_data {
543 struct filter_pred *preds;
544 int match;
545 void *rec;
546 };
547
filter_match_preds_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)548 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
549 int *err, void *data)
550 {
551 struct filter_match_preds_data *d = data;
552
553 *err = 0;
554 switch (move) {
555 case MOVE_DOWN:
556 /* only AND and OR have children */
557 if (pred->left != FILTER_PRED_INVALID) {
558 /* If ops is set, then it was folded. */
559 if (!pred->ops)
560 return WALK_PRED_DEFAULT;
561 /* We can treat folded ops as a leaf node */
562 d->match = process_ops(d->preds, pred, d->rec);
563 } else {
564 if (!WARN_ON_ONCE(!pred->fn))
565 d->match = pred->fn(pred, d->rec);
566 }
567
568 return WALK_PRED_PARENT;
569 case MOVE_UP_FROM_LEFT:
570 /*
571 * Check for short circuits.
572 *
573 * Optimization: !!match == (pred->op == OP_OR)
574 * is the same as:
575 * if ((match && pred->op == OP_OR) ||
576 * (!match && pred->op == OP_AND))
577 */
578 if (!!d->match == (pred->op == OP_OR))
579 return WALK_PRED_PARENT;
580 break;
581 case MOVE_UP_FROM_RIGHT:
582 break;
583 }
584
585 return WALK_PRED_DEFAULT;
586 }
587
588 /* return 1 if event matches, 0 otherwise (discard) */
filter_match_preds(struct event_filter * filter,void * rec)589 int filter_match_preds(struct event_filter *filter, void *rec)
590 {
591 struct filter_pred *preds;
592 struct filter_pred *root;
593 struct filter_match_preds_data data = {
594 /* match is currently meaningless */
595 .match = -1,
596 .rec = rec,
597 };
598 int n_preds, ret;
599
600 /* no filter is considered a match */
601 if (!filter)
602 return 1;
603
604 n_preds = filter->n_preds;
605 if (!n_preds)
606 return 1;
607
608 /*
609 * n_preds, root and filter->preds are protect with preemption disabled.
610 */
611 root = rcu_dereference_sched(filter->root);
612 if (!root)
613 return 1;
614
615 data.preds = preds = rcu_dereference_sched(filter->preds);
616 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
617 WARN_ON(ret);
618 return data.match;
619 }
620 EXPORT_SYMBOL_GPL(filter_match_preds);
621
parse_error(struct filter_parse_state * ps,int err,int pos)622 static void parse_error(struct filter_parse_state *ps, int err, int pos)
623 {
624 ps->lasterr = err;
625 ps->lasterr_pos = pos;
626 }
627
remove_filter_string(struct event_filter * filter)628 static void remove_filter_string(struct event_filter *filter)
629 {
630 if (!filter)
631 return;
632
633 kfree(filter->filter_string);
634 filter->filter_string = NULL;
635 }
636
replace_filter_string(struct event_filter * filter,char * filter_string)637 static int replace_filter_string(struct event_filter *filter,
638 char *filter_string)
639 {
640 kfree(filter->filter_string);
641 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
642 if (!filter->filter_string)
643 return -ENOMEM;
644
645 return 0;
646 }
647
append_filter_string(struct event_filter * filter,char * string)648 static int append_filter_string(struct event_filter *filter,
649 char *string)
650 {
651 int newlen;
652 char *new_filter_string;
653
654 BUG_ON(!filter->filter_string);
655 newlen = strlen(filter->filter_string) + strlen(string) + 1;
656 new_filter_string = kmalloc(newlen, GFP_KERNEL);
657 if (!new_filter_string)
658 return -ENOMEM;
659
660 strcpy(new_filter_string, filter->filter_string);
661 strcat(new_filter_string, string);
662 kfree(filter->filter_string);
663 filter->filter_string = new_filter_string;
664
665 return 0;
666 }
667
append_filter_err(struct filter_parse_state * ps,struct event_filter * filter)668 static void append_filter_err(struct filter_parse_state *ps,
669 struct event_filter *filter)
670 {
671 int pos = ps->lasterr_pos;
672 char *buf, *pbuf;
673
674 buf = (char *)__get_free_page(GFP_TEMPORARY);
675 if (!buf)
676 return;
677
678 append_filter_string(filter, "\n");
679 memset(buf, ' ', PAGE_SIZE);
680 if (pos > PAGE_SIZE - 128)
681 pos = 0;
682 buf[pos] = '^';
683 pbuf = &buf[pos] + 1;
684
685 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
686 append_filter_string(filter, buf);
687 free_page((unsigned long) buf);
688 }
689
event_filter(struct trace_event_file * file)690 static inline struct event_filter *event_filter(struct trace_event_file *file)
691 {
692 return file->filter;
693 }
694
695 /* caller must hold event_mutex */
print_event_filter(struct trace_event_file * file,struct trace_seq * s)696 void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
697 {
698 struct event_filter *filter = event_filter(file);
699
700 if (filter && filter->filter_string)
701 trace_seq_printf(s, "%s\n", filter->filter_string);
702 else
703 trace_seq_puts(s, "none\n");
704 }
705
print_subsystem_event_filter(struct event_subsystem * system,struct trace_seq * s)706 void print_subsystem_event_filter(struct event_subsystem *system,
707 struct trace_seq *s)
708 {
709 struct event_filter *filter;
710
711 mutex_lock(&event_mutex);
712 filter = system->filter;
713 if (filter && filter->filter_string)
714 trace_seq_printf(s, "%s\n", filter->filter_string);
715 else
716 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
717 mutex_unlock(&event_mutex);
718 }
719
__alloc_pred_stack(struct pred_stack * stack,int n_preds)720 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
721 {
722 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
723 if (!stack->preds)
724 return -ENOMEM;
725 stack->index = n_preds;
726 return 0;
727 }
728
__free_pred_stack(struct pred_stack * stack)729 static void __free_pred_stack(struct pred_stack *stack)
730 {
731 kfree(stack->preds);
732 stack->index = 0;
733 }
734
__push_pred_stack(struct pred_stack * stack,struct filter_pred * pred)735 static int __push_pred_stack(struct pred_stack *stack,
736 struct filter_pred *pred)
737 {
738 int index = stack->index;
739
740 if (WARN_ON(index == 0))
741 return -ENOSPC;
742
743 stack->preds[--index] = pred;
744 stack->index = index;
745 return 0;
746 }
747
748 static struct filter_pred *
__pop_pred_stack(struct pred_stack * stack)749 __pop_pred_stack(struct pred_stack *stack)
750 {
751 struct filter_pred *pred;
752 int index = stack->index;
753
754 pred = stack->preds[index++];
755 if (!pred)
756 return NULL;
757
758 stack->index = index;
759 return pred;
760 }
761
filter_set_pred(struct event_filter * filter,int idx,struct pred_stack * stack,struct filter_pred * src)762 static int filter_set_pred(struct event_filter *filter,
763 int idx,
764 struct pred_stack *stack,
765 struct filter_pred *src)
766 {
767 struct filter_pred *dest = &filter->preds[idx];
768 struct filter_pred *left;
769 struct filter_pred *right;
770
771 *dest = *src;
772 dest->index = idx;
773
774 if (dest->op == OP_OR || dest->op == OP_AND) {
775 right = __pop_pred_stack(stack);
776 left = __pop_pred_stack(stack);
777 if (!left || !right)
778 return -EINVAL;
779 /*
780 * If both children can be folded
781 * and they are the same op as this op or a leaf,
782 * then this op can be folded.
783 */
784 if (left->index & FILTER_PRED_FOLD &&
785 ((left->op == dest->op && !left->not) ||
786 left->left == FILTER_PRED_INVALID) &&
787 right->index & FILTER_PRED_FOLD &&
788 ((right->op == dest->op && !right->not) ||
789 right->left == FILTER_PRED_INVALID))
790 dest->index |= FILTER_PRED_FOLD;
791
792 dest->left = left->index & ~FILTER_PRED_FOLD;
793 dest->right = right->index & ~FILTER_PRED_FOLD;
794 left->parent = dest->index & ~FILTER_PRED_FOLD;
795 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
796 } else {
797 /*
798 * Make dest->left invalid to be used as a quick
799 * way to know this is a leaf node.
800 */
801 dest->left = FILTER_PRED_INVALID;
802
803 /* All leafs allow folding the parent ops. */
804 dest->index |= FILTER_PRED_FOLD;
805 }
806
807 return __push_pred_stack(stack, dest);
808 }
809
__free_preds(struct event_filter * filter)810 static void __free_preds(struct event_filter *filter)
811 {
812 int i;
813
814 if (filter->preds) {
815 for (i = 0; i < filter->n_preds; i++)
816 kfree(filter->preds[i].ops);
817 kfree(filter->preds);
818 filter->preds = NULL;
819 }
820 filter->a_preds = 0;
821 filter->n_preds = 0;
822 }
823
filter_disable(struct trace_event_file * file)824 static void filter_disable(struct trace_event_file *file)
825 {
826 unsigned long old_flags = file->flags;
827
828 file->flags &= ~EVENT_FILE_FL_FILTERED;
829
830 if (old_flags != file->flags)
831 trace_buffered_event_disable();
832 }
833
__free_filter(struct event_filter * filter)834 static void __free_filter(struct event_filter *filter)
835 {
836 if (!filter)
837 return;
838
839 __free_preds(filter);
840 kfree(filter->filter_string);
841 kfree(filter);
842 }
843
free_event_filter(struct event_filter * filter)844 void free_event_filter(struct event_filter *filter)
845 {
846 __free_filter(filter);
847 }
848
__alloc_filter(void)849 static struct event_filter *__alloc_filter(void)
850 {
851 struct event_filter *filter;
852
853 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
854 return filter;
855 }
856
__alloc_preds(struct event_filter * filter,int n_preds)857 static int __alloc_preds(struct event_filter *filter, int n_preds)
858 {
859 struct filter_pred *pred;
860 int i;
861
862 if (filter->preds)
863 __free_preds(filter);
864
865 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
866
867 if (!filter->preds)
868 return -ENOMEM;
869
870 filter->a_preds = n_preds;
871 filter->n_preds = 0;
872
873 for (i = 0; i < n_preds; i++) {
874 pred = &filter->preds[i];
875 pred->fn = filter_pred_none;
876 }
877
878 return 0;
879 }
880
__remove_filter(struct trace_event_file * file)881 static inline void __remove_filter(struct trace_event_file *file)
882 {
883 filter_disable(file);
884 remove_filter_string(file->filter);
885 }
886
filter_free_subsystem_preds(struct trace_subsystem_dir * dir,struct trace_array * tr)887 static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
888 struct trace_array *tr)
889 {
890 struct trace_event_file *file;
891
892 list_for_each_entry(file, &tr->events, list) {
893 if (file->system != dir)
894 continue;
895 __remove_filter(file);
896 }
897 }
898
__free_subsystem_filter(struct trace_event_file * file)899 static inline void __free_subsystem_filter(struct trace_event_file *file)
900 {
901 __free_filter(file->filter);
902 file->filter = NULL;
903 }
904
filter_free_subsystem_filters(struct trace_subsystem_dir * dir,struct trace_array * tr)905 static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
906 struct trace_array *tr)
907 {
908 struct trace_event_file *file;
909
910 list_for_each_entry(file, &tr->events, list) {
911 if (file->system != dir)
912 continue;
913 __free_subsystem_filter(file);
914 }
915 }
916
filter_add_pred(struct filter_parse_state * ps,struct event_filter * filter,struct filter_pred * pred,struct pred_stack * stack)917 static int filter_add_pred(struct filter_parse_state *ps,
918 struct event_filter *filter,
919 struct filter_pred *pred,
920 struct pred_stack *stack)
921 {
922 int err;
923
924 if (WARN_ON(filter->n_preds == filter->a_preds)) {
925 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
926 return -ENOSPC;
927 }
928
929 err = filter_set_pred(filter, filter->n_preds, stack, pred);
930 if (err)
931 return err;
932
933 filter->n_preds++;
934
935 return 0;
936 }
937
filter_assign_type(const char * type)938 int filter_assign_type(const char *type)
939 {
940 if (strstr(type, "__data_loc") && strstr(type, "char"))
941 return FILTER_DYN_STRING;
942
943 if (strchr(type, '[') && strstr(type, "char"))
944 return FILTER_STATIC_STRING;
945
946 return FILTER_OTHER;
947 }
948
is_legal_op(struct ftrace_event_field * field,int op)949 static bool is_legal_op(struct ftrace_event_field *field, int op)
950 {
951 if (is_string_field(field) &&
952 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
953 return false;
954 if (!is_string_field(field) && op == OP_GLOB)
955 return false;
956
957 return true;
958 }
959
select_comparison_fn(int op,int field_size,int field_is_signed)960 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
961 int field_is_signed)
962 {
963 filter_pred_fn_t fn = NULL;
964
965 switch (field_size) {
966 case 8:
967 if (op == OP_EQ || op == OP_NE)
968 fn = filter_pred_64;
969 else if (field_is_signed)
970 fn = filter_pred_s64;
971 else
972 fn = filter_pred_u64;
973 break;
974 case 4:
975 if (op == OP_EQ || op == OP_NE)
976 fn = filter_pred_32;
977 else if (field_is_signed)
978 fn = filter_pred_s32;
979 else
980 fn = filter_pred_u32;
981 break;
982 case 2:
983 if (op == OP_EQ || op == OP_NE)
984 fn = filter_pred_16;
985 else if (field_is_signed)
986 fn = filter_pred_s16;
987 else
988 fn = filter_pred_u16;
989 break;
990 case 1:
991 if (op == OP_EQ || op == OP_NE)
992 fn = filter_pred_8;
993 else if (field_is_signed)
994 fn = filter_pred_s8;
995 else
996 fn = filter_pred_u8;
997 break;
998 }
999
1000 return fn;
1001 }
1002
init_pred(struct filter_parse_state * ps,struct ftrace_event_field * field,struct filter_pred * pred)1003 static int init_pred(struct filter_parse_state *ps,
1004 struct ftrace_event_field *field,
1005 struct filter_pred *pred)
1006
1007 {
1008 filter_pred_fn_t fn = filter_pred_none;
1009 unsigned long long val;
1010 int ret;
1011
1012 pred->offset = field->offset;
1013
1014 if (!is_legal_op(field, pred->op)) {
1015 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
1016 return -EINVAL;
1017 }
1018
1019 if (field->filter_type == FILTER_COMM) {
1020 filter_build_regex(pred);
1021 fn = filter_pred_comm;
1022 pred->regex.field_len = TASK_COMM_LEN;
1023 } else if (is_string_field(field)) {
1024 filter_build_regex(pred);
1025
1026 if (field->filter_type == FILTER_STATIC_STRING) {
1027 fn = filter_pred_string;
1028 pred->regex.field_len = field->size;
1029 } else if (field->filter_type == FILTER_DYN_STRING)
1030 fn = filter_pred_strloc;
1031 else
1032 fn = filter_pred_pchar;
1033 } else if (is_function_field(field)) {
1034 if (strcmp(field->name, "ip")) {
1035 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1036 return -EINVAL;
1037 }
1038 } else {
1039 if (field->is_signed)
1040 ret = kstrtoll(pred->regex.pattern, 0, &val);
1041 else
1042 ret = kstrtoull(pred->regex.pattern, 0, &val);
1043 if (ret) {
1044 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1045 return -EINVAL;
1046 }
1047 pred->val = val;
1048
1049 if (field->filter_type == FILTER_CPU)
1050 fn = filter_pred_cpu;
1051 else
1052 fn = select_comparison_fn(pred->op, field->size,
1053 field->is_signed);
1054 if (!fn) {
1055 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1056 return -EINVAL;
1057 }
1058 }
1059
1060 if (pred->op == OP_NE)
1061 pred->not ^= 1;
1062
1063 pred->fn = fn;
1064 return 0;
1065 }
1066
parse_init(struct filter_parse_state * ps,struct filter_op * ops,char * infix_string)1067 static void parse_init(struct filter_parse_state *ps,
1068 struct filter_op *ops,
1069 char *infix_string)
1070 {
1071 memset(ps, '\0', sizeof(*ps));
1072
1073 ps->infix.string = infix_string;
1074 ps->infix.cnt = strlen(infix_string);
1075 ps->ops = ops;
1076
1077 INIT_LIST_HEAD(&ps->opstack);
1078 INIT_LIST_HEAD(&ps->postfix);
1079 }
1080
infix_next(struct filter_parse_state * ps)1081 static char infix_next(struct filter_parse_state *ps)
1082 {
1083 if (!ps->infix.cnt)
1084 return 0;
1085
1086 ps->infix.cnt--;
1087
1088 return ps->infix.string[ps->infix.tail++];
1089 }
1090
infix_peek(struct filter_parse_state * ps)1091 static char infix_peek(struct filter_parse_state *ps)
1092 {
1093 if (ps->infix.tail == strlen(ps->infix.string))
1094 return 0;
1095
1096 return ps->infix.string[ps->infix.tail];
1097 }
1098
infix_advance(struct filter_parse_state * ps)1099 static void infix_advance(struct filter_parse_state *ps)
1100 {
1101 if (!ps->infix.cnt)
1102 return;
1103
1104 ps->infix.cnt--;
1105 ps->infix.tail++;
1106 }
1107
is_precedence_lower(struct filter_parse_state * ps,int a,int b)1108 static inline int is_precedence_lower(struct filter_parse_state *ps,
1109 int a, int b)
1110 {
1111 return ps->ops[a].precedence < ps->ops[b].precedence;
1112 }
1113
is_op_char(struct filter_parse_state * ps,char c)1114 static inline int is_op_char(struct filter_parse_state *ps, char c)
1115 {
1116 int i;
1117
1118 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1119 if (ps->ops[i].string[0] == c)
1120 return 1;
1121 }
1122
1123 return 0;
1124 }
1125
infix_get_op(struct filter_parse_state * ps,char firstc)1126 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1127 {
1128 char nextc = infix_peek(ps);
1129 char opstr[3];
1130 int i;
1131
1132 opstr[0] = firstc;
1133 opstr[1] = nextc;
1134 opstr[2] = '\0';
1135
1136 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1137 if (!strcmp(opstr, ps->ops[i].string)) {
1138 infix_advance(ps);
1139 return ps->ops[i].id;
1140 }
1141 }
1142
1143 opstr[1] = '\0';
1144
1145 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1146 if (!strcmp(opstr, ps->ops[i].string))
1147 return ps->ops[i].id;
1148 }
1149
1150 return OP_NONE;
1151 }
1152
clear_operand_string(struct filter_parse_state * ps)1153 static inline void clear_operand_string(struct filter_parse_state *ps)
1154 {
1155 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1156 ps->operand.tail = 0;
1157 }
1158
append_operand_char(struct filter_parse_state * ps,char c)1159 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1160 {
1161 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1162 return -EINVAL;
1163
1164 ps->operand.string[ps->operand.tail++] = c;
1165
1166 return 0;
1167 }
1168
filter_opstack_push(struct filter_parse_state * ps,int op)1169 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1170 {
1171 struct opstack_op *opstack_op;
1172
1173 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1174 if (!opstack_op)
1175 return -ENOMEM;
1176
1177 opstack_op->op = op;
1178 list_add(&opstack_op->list, &ps->opstack);
1179
1180 return 0;
1181 }
1182
filter_opstack_empty(struct filter_parse_state * ps)1183 static int filter_opstack_empty(struct filter_parse_state *ps)
1184 {
1185 return list_empty(&ps->opstack);
1186 }
1187
filter_opstack_top(struct filter_parse_state * ps)1188 static int filter_opstack_top(struct filter_parse_state *ps)
1189 {
1190 struct opstack_op *opstack_op;
1191
1192 if (filter_opstack_empty(ps))
1193 return OP_NONE;
1194
1195 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1196
1197 return opstack_op->op;
1198 }
1199
filter_opstack_pop(struct filter_parse_state * ps)1200 static int filter_opstack_pop(struct filter_parse_state *ps)
1201 {
1202 struct opstack_op *opstack_op;
1203 int op;
1204
1205 if (filter_opstack_empty(ps))
1206 return OP_NONE;
1207
1208 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1209 op = opstack_op->op;
1210 list_del(&opstack_op->list);
1211
1212 kfree(opstack_op);
1213
1214 return op;
1215 }
1216
filter_opstack_clear(struct filter_parse_state * ps)1217 static void filter_opstack_clear(struct filter_parse_state *ps)
1218 {
1219 while (!filter_opstack_empty(ps))
1220 filter_opstack_pop(ps);
1221 }
1222
curr_operand(struct filter_parse_state * ps)1223 static char *curr_operand(struct filter_parse_state *ps)
1224 {
1225 return ps->operand.string;
1226 }
1227
postfix_append_operand(struct filter_parse_state * ps,char * operand)1228 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1229 {
1230 struct postfix_elt *elt;
1231
1232 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1233 if (!elt)
1234 return -ENOMEM;
1235
1236 elt->op = OP_NONE;
1237 elt->operand = kstrdup(operand, GFP_KERNEL);
1238 if (!elt->operand) {
1239 kfree(elt);
1240 return -ENOMEM;
1241 }
1242
1243 list_add_tail(&elt->list, &ps->postfix);
1244
1245 return 0;
1246 }
1247
postfix_append_op(struct filter_parse_state * ps,int op)1248 static int postfix_append_op(struct filter_parse_state *ps, int op)
1249 {
1250 struct postfix_elt *elt;
1251
1252 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1253 if (!elt)
1254 return -ENOMEM;
1255
1256 elt->op = op;
1257 elt->operand = NULL;
1258
1259 list_add_tail(&elt->list, &ps->postfix);
1260
1261 return 0;
1262 }
1263
postfix_clear(struct filter_parse_state * ps)1264 static void postfix_clear(struct filter_parse_state *ps)
1265 {
1266 struct postfix_elt *elt;
1267
1268 while (!list_empty(&ps->postfix)) {
1269 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1270 list_del(&elt->list);
1271 kfree(elt->operand);
1272 kfree(elt);
1273 }
1274 }
1275
filter_parse(struct filter_parse_state * ps)1276 static int filter_parse(struct filter_parse_state *ps)
1277 {
1278 int in_string = 0;
1279 int op, top_op;
1280 char ch;
1281
1282 while ((ch = infix_next(ps))) {
1283 if (ch == '"') {
1284 in_string ^= 1;
1285 continue;
1286 }
1287
1288 if (in_string)
1289 goto parse_operand;
1290
1291 if (isspace(ch))
1292 continue;
1293
1294 if (is_op_char(ps, ch)) {
1295 op = infix_get_op(ps, ch);
1296 if (op == OP_NONE) {
1297 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1298 return -EINVAL;
1299 }
1300
1301 if (strlen(curr_operand(ps))) {
1302 postfix_append_operand(ps, curr_operand(ps));
1303 clear_operand_string(ps);
1304 }
1305
1306 while (!filter_opstack_empty(ps)) {
1307 top_op = filter_opstack_top(ps);
1308 if (!is_precedence_lower(ps, top_op, op)) {
1309 top_op = filter_opstack_pop(ps);
1310 postfix_append_op(ps, top_op);
1311 continue;
1312 }
1313 break;
1314 }
1315
1316 filter_opstack_push(ps, op);
1317 continue;
1318 }
1319
1320 if (ch == '(') {
1321 filter_opstack_push(ps, OP_OPEN_PAREN);
1322 continue;
1323 }
1324
1325 if (ch == ')') {
1326 if (strlen(curr_operand(ps))) {
1327 postfix_append_operand(ps, curr_operand(ps));
1328 clear_operand_string(ps);
1329 }
1330
1331 top_op = filter_opstack_pop(ps);
1332 while (top_op != OP_NONE) {
1333 if (top_op == OP_OPEN_PAREN)
1334 break;
1335 postfix_append_op(ps, top_op);
1336 top_op = filter_opstack_pop(ps);
1337 }
1338 if (top_op == OP_NONE) {
1339 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1340 return -EINVAL;
1341 }
1342 continue;
1343 }
1344 parse_operand:
1345 if (append_operand_char(ps, ch)) {
1346 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1347 return -EINVAL;
1348 }
1349 }
1350
1351 if (strlen(curr_operand(ps)))
1352 postfix_append_operand(ps, curr_operand(ps));
1353
1354 while (!filter_opstack_empty(ps)) {
1355 top_op = filter_opstack_pop(ps);
1356 if (top_op == OP_NONE)
1357 break;
1358 if (top_op == OP_OPEN_PAREN) {
1359 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1360 return -EINVAL;
1361 }
1362 postfix_append_op(ps, top_op);
1363 }
1364
1365 return 0;
1366 }
1367
create_pred(struct filter_parse_state * ps,struct trace_event_call * call,int op,char * operand1,char * operand2)1368 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1369 struct trace_event_call *call,
1370 int op, char *operand1, char *operand2)
1371 {
1372 struct ftrace_event_field *field;
1373 static struct filter_pred pred;
1374
1375 memset(&pred, 0, sizeof(pred));
1376 pred.op = op;
1377
1378 if (op == OP_AND || op == OP_OR)
1379 return &pred;
1380
1381 if (!operand1 || !operand2) {
1382 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1383 return NULL;
1384 }
1385
1386 field = trace_find_event_field(call, operand1);
1387 if (!field) {
1388 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1389 return NULL;
1390 }
1391
1392 strcpy(pred.regex.pattern, operand2);
1393 pred.regex.len = strlen(pred.regex.pattern);
1394 pred.field = field;
1395 return init_pred(ps, field, &pred) ? NULL : &pred;
1396 }
1397
check_preds(struct filter_parse_state * ps)1398 static int check_preds(struct filter_parse_state *ps)
1399 {
1400 int n_normal_preds = 0, n_logical_preds = 0;
1401 struct postfix_elt *elt;
1402 int cnt = 0;
1403
1404 list_for_each_entry(elt, &ps->postfix, list) {
1405 if (elt->op == OP_NONE) {
1406 cnt++;
1407 continue;
1408 }
1409
1410 if (elt->op == OP_AND || elt->op == OP_OR) {
1411 n_logical_preds++;
1412 cnt--;
1413 continue;
1414 }
1415 if (elt->op != OP_NOT)
1416 cnt--;
1417 n_normal_preds++;
1418 /* all ops should have operands */
1419 if (cnt < 0)
1420 break;
1421 }
1422
1423 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1424 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1425 return -EINVAL;
1426 }
1427
1428 return 0;
1429 }
1430
count_preds(struct filter_parse_state * ps)1431 static int count_preds(struct filter_parse_state *ps)
1432 {
1433 struct postfix_elt *elt;
1434 int n_preds = 0;
1435
1436 list_for_each_entry(elt, &ps->postfix, list) {
1437 if (elt->op == OP_NONE)
1438 continue;
1439 n_preds++;
1440 }
1441
1442 return n_preds;
1443 }
1444
1445 struct check_pred_data {
1446 int count;
1447 int max;
1448 };
1449
check_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1450 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1451 int *err, void *data)
1452 {
1453 struct check_pred_data *d = data;
1454
1455 if (WARN_ON(d->count++ > d->max)) {
1456 *err = -EINVAL;
1457 return WALK_PRED_ABORT;
1458 }
1459 return WALK_PRED_DEFAULT;
1460 }
1461
1462 /*
1463 * The tree is walked at filtering of an event. If the tree is not correctly
1464 * built, it may cause an infinite loop. Check here that the tree does
1465 * indeed terminate.
1466 */
check_pred_tree(struct event_filter * filter,struct filter_pred * root)1467 static int check_pred_tree(struct event_filter *filter,
1468 struct filter_pred *root)
1469 {
1470 struct check_pred_data data = {
1471 /*
1472 * The max that we can hit a node is three times.
1473 * Once going down, once coming up from left, and
1474 * once coming up from right. This is more than enough
1475 * since leafs are only hit a single time.
1476 */
1477 .max = 3 * filter->n_preds,
1478 .count = 0,
1479 };
1480
1481 return walk_pred_tree(filter->preds, root,
1482 check_pred_tree_cb, &data);
1483 }
1484
count_leafs_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1485 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1486 int *err, void *data)
1487 {
1488 int *count = data;
1489
1490 if ((move == MOVE_DOWN) &&
1491 (pred->left == FILTER_PRED_INVALID))
1492 (*count)++;
1493
1494 return WALK_PRED_DEFAULT;
1495 }
1496
count_leafs(struct filter_pred * preds,struct filter_pred * root)1497 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1498 {
1499 int count = 0, ret;
1500
1501 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1502 WARN_ON(ret);
1503 return count;
1504 }
1505
1506 struct fold_pred_data {
1507 struct filter_pred *root;
1508 int count;
1509 int children;
1510 };
1511
fold_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1512 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1513 int *err, void *data)
1514 {
1515 struct fold_pred_data *d = data;
1516 struct filter_pred *root = d->root;
1517
1518 if (move != MOVE_DOWN)
1519 return WALK_PRED_DEFAULT;
1520 if (pred->left != FILTER_PRED_INVALID)
1521 return WALK_PRED_DEFAULT;
1522
1523 if (WARN_ON(d->count == d->children)) {
1524 *err = -EINVAL;
1525 return WALK_PRED_ABORT;
1526 }
1527
1528 pred->index &= ~FILTER_PRED_FOLD;
1529 root->ops[d->count++] = pred->index;
1530 return WALK_PRED_DEFAULT;
1531 }
1532
fold_pred(struct filter_pred * preds,struct filter_pred * root)1533 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1534 {
1535 struct fold_pred_data data = {
1536 .root = root,
1537 .count = 0,
1538 };
1539 int children;
1540
1541 /* No need to keep the fold flag */
1542 root->index &= ~FILTER_PRED_FOLD;
1543
1544 /* If the root is a leaf then do nothing */
1545 if (root->left == FILTER_PRED_INVALID)
1546 return 0;
1547
1548 /* count the children */
1549 children = count_leafs(preds, &preds[root->left]);
1550 children += count_leafs(preds, &preds[root->right]);
1551
1552 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1553 if (!root->ops)
1554 return -ENOMEM;
1555
1556 root->val = children;
1557 data.children = children;
1558 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1559 }
1560
fold_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1561 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1562 int *err, void *data)
1563 {
1564 struct filter_pred *preds = data;
1565
1566 if (move != MOVE_DOWN)
1567 return WALK_PRED_DEFAULT;
1568 if (!(pred->index & FILTER_PRED_FOLD))
1569 return WALK_PRED_DEFAULT;
1570
1571 *err = fold_pred(preds, pred);
1572 if (*err)
1573 return WALK_PRED_ABORT;
1574
1575 /* eveyrhing below is folded, continue with parent */
1576 return WALK_PRED_PARENT;
1577 }
1578
1579 /*
1580 * To optimize the processing of the ops, if we have several "ors" or
1581 * "ands" together, we can put them in an array and process them all
1582 * together speeding up the filter logic.
1583 */
fold_pred_tree(struct event_filter * filter,struct filter_pred * root)1584 static int fold_pred_tree(struct event_filter *filter,
1585 struct filter_pred *root)
1586 {
1587 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1588 filter->preds);
1589 }
1590
replace_preds(struct trace_event_call * call,struct event_filter * filter,struct filter_parse_state * ps,bool dry_run)1591 static int replace_preds(struct trace_event_call *call,
1592 struct event_filter *filter,
1593 struct filter_parse_state *ps,
1594 bool dry_run)
1595 {
1596 char *operand1 = NULL, *operand2 = NULL;
1597 struct filter_pred *pred;
1598 struct filter_pred *root;
1599 struct postfix_elt *elt;
1600 struct pred_stack stack = { }; /* init to NULL */
1601 int err;
1602 int n_preds = 0;
1603
1604 n_preds = count_preds(ps);
1605 if (n_preds >= MAX_FILTER_PRED) {
1606 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1607 return -ENOSPC;
1608 }
1609
1610 err = check_preds(ps);
1611 if (err)
1612 return err;
1613
1614 if (!dry_run) {
1615 err = __alloc_pred_stack(&stack, n_preds);
1616 if (err)
1617 return err;
1618 err = __alloc_preds(filter, n_preds);
1619 if (err)
1620 goto fail;
1621 }
1622
1623 n_preds = 0;
1624 list_for_each_entry(elt, &ps->postfix, list) {
1625 if (elt->op == OP_NONE) {
1626 if (!operand1)
1627 operand1 = elt->operand;
1628 else if (!operand2)
1629 operand2 = elt->operand;
1630 else {
1631 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1632 err = -EINVAL;
1633 goto fail;
1634 }
1635 continue;
1636 }
1637
1638 if (elt->op == OP_NOT) {
1639 if (!n_preds || operand1 || operand2) {
1640 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1641 err = -EINVAL;
1642 goto fail;
1643 }
1644 if (!dry_run)
1645 filter->preds[n_preds - 1].not ^= 1;
1646 continue;
1647 }
1648
1649 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1650 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1651 err = -ENOSPC;
1652 goto fail;
1653 }
1654
1655 pred = create_pred(ps, call, elt->op, operand1, operand2);
1656 if (!pred) {
1657 err = -EINVAL;
1658 goto fail;
1659 }
1660
1661 if (!dry_run) {
1662 err = filter_add_pred(ps, filter, pred, &stack);
1663 if (err)
1664 goto fail;
1665 }
1666
1667 operand1 = operand2 = NULL;
1668 }
1669
1670 if (!dry_run) {
1671 /* We should have one item left on the stack */
1672 pred = __pop_pred_stack(&stack);
1673 if (!pred)
1674 return -EINVAL;
1675 /* This item is where we start from in matching */
1676 root = pred;
1677 /* Make sure the stack is empty */
1678 pred = __pop_pred_stack(&stack);
1679 if (WARN_ON(pred)) {
1680 err = -EINVAL;
1681 filter->root = NULL;
1682 goto fail;
1683 }
1684 err = check_pred_tree(filter, root);
1685 if (err)
1686 goto fail;
1687
1688 /* Optimize the tree */
1689 err = fold_pred_tree(filter, root);
1690 if (err)
1691 goto fail;
1692
1693 /* We don't set root until we know it works */
1694 barrier();
1695 filter->root = root;
1696 }
1697
1698 err = 0;
1699 fail:
1700 __free_pred_stack(&stack);
1701 return err;
1702 }
1703
event_set_filtered_flag(struct trace_event_file * file)1704 static inline void event_set_filtered_flag(struct trace_event_file *file)
1705 {
1706 unsigned long old_flags = file->flags;
1707
1708 file->flags |= EVENT_FILE_FL_FILTERED;
1709
1710 if (old_flags != file->flags)
1711 trace_buffered_event_enable();
1712 }
1713
event_set_filter(struct trace_event_file * file,struct event_filter * filter)1714 static inline void event_set_filter(struct trace_event_file *file,
1715 struct event_filter *filter)
1716 {
1717 rcu_assign_pointer(file->filter, filter);
1718 }
1719
event_clear_filter(struct trace_event_file * file)1720 static inline void event_clear_filter(struct trace_event_file *file)
1721 {
1722 RCU_INIT_POINTER(file->filter, NULL);
1723 }
1724
1725 static inline void
event_set_no_set_filter_flag(struct trace_event_file * file)1726 event_set_no_set_filter_flag(struct trace_event_file *file)
1727 {
1728 file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
1729 }
1730
1731 static inline void
event_clear_no_set_filter_flag(struct trace_event_file * file)1732 event_clear_no_set_filter_flag(struct trace_event_file *file)
1733 {
1734 file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
1735 }
1736
1737 static inline bool
event_no_set_filter_flag(struct trace_event_file * file)1738 event_no_set_filter_flag(struct trace_event_file *file)
1739 {
1740 if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
1741 return true;
1742
1743 return false;
1744 }
1745
1746 struct filter_list {
1747 struct list_head list;
1748 struct event_filter *filter;
1749 };
1750
replace_system_preds(struct trace_subsystem_dir * dir,struct trace_array * tr,struct filter_parse_state * ps,char * filter_string)1751 static int replace_system_preds(struct trace_subsystem_dir *dir,
1752 struct trace_array *tr,
1753 struct filter_parse_state *ps,
1754 char *filter_string)
1755 {
1756 struct trace_event_file *file;
1757 struct filter_list *filter_item;
1758 struct filter_list *tmp;
1759 LIST_HEAD(filter_list);
1760 bool fail = true;
1761 int err;
1762
1763 list_for_each_entry(file, &tr->events, list) {
1764 if (file->system != dir)
1765 continue;
1766
1767 /*
1768 * Try to see if the filter can be applied
1769 * (filter arg is ignored on dry_run)
1770 */
1771 err = replace_preds(file->event_call, NULL, ps, true);
1772 if (err)
1773 event_set_no_set_filter_flag(file);
1774 else
1775 event_clear_no_set_filter_flag(file);
1776 }
1777
1778 list_for_each_entry(file, &tr->events, list) {
1779 struct event_filter *filter;
1780
1781 if (file->system != dir)
1782 continue;
1783
1784 if (event_no_set_filter_flag(file))
1785 continue;
1786
1787 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1788 if (!filter_item)
1789 goto fail_mem;
1790
1791 list_add_tail(&filter_item->list, &filter_list);
1792
1793 filter_item->filter = __alloc_filter();
1794 if (!filter_item->filter)
1795 goto fail_mem;
1796 filter = filter_item->filter;
1797
1798 /* Can only fail on no memory */
1799 err = replace_filter_string(filter, filter_string);
1800 if (err)
1801 goto fail_mem;
1802
1803 err = replace_preds(file->event_call, filter, ps, false);
1804 if (err) {
1805 filter_disable(file);
1806 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1807 append_filter_err(ps, filter);
1808 } else
1809 event_set_filtered_flag(file);
1810 /*
1811 * Regardless of if this returned an error, we still
1812 * replace the filter for the call.
1813 */
1814 filter = event_filter(file);
1815 event_set_filter(file, filter_item->filter);
1816 filter_item->filter = filter;
1817
1818 fail = false;
1819 }
1820
1821 if (fail)
1822 goto fail;
1823
1824 /*
1825 * The calls can still be using the old filters.
1826 * Do a synchronize_sched() to ensure all calls are
1827 * done with them before we free them.
1828 */
1829 synchronize_sched();
1830 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1831 __free_filter(filter_item->filter);
1832 list_del(&filter_item->list);
1833 kfree(filter_item);
1834 }
1835 return 0;
1836 fail:
1837 /* No call succeeded */
1838 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1839 list_del(&filter_item->list);
1840 kfree(filter_item);
1841 }
1842 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1843 return -EINVAL;
1844 fail_mem:
1845 /* If any call succeeded, we still need to sync */
1846 if (!fail)
1847 synchronize_sched();
1848 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1849 __free_filter(filter_item->filter);
1850 list_del(&filter_item->list);
1851 kfree(filter_item);
1852 }
1853 return -ENOMEM;
1854 }
1855
create_filter_start(char * filter_str,bool set_str,struct filter_parse_state ** psp,struct event_filter ** filterp)1856 static int create_filter_start(char *filter_str, bool set_str,
1857 struct filter_parse_state **psp,
1858 struct event_filter **filterp)
1859 {
1860 struct event_filter *filter;
1861 struct filter_parse_state *ps = NULL;
1862 int err = 0;
1863
1864 WARN_ON_ONCE(*psp || *filterp);
1865
1866 /* allocate everything, and if any fails, free all and fail */
1867 filter = __alloc_filter();
1868 if (filter && set_str)
1869 err = replace_filter_string(filter, filter_str);
1870
1871 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1872
1873 if (!filter || !ps || err) {
1874 kfree(ps);
1875 __free_filter(filter);
1876 return -ENOMEM;
1877 }
1878
1879 /* we're committed to creating a new filter */
1880 *filterp = filter;
1881 *psp = ps;
1882
1883 parse_init(ps, filter_ops, filter_str);
1884 err = filter_parse(ps);
1885 if (err && set_str)
1886 append_filter_err(ps, filter);
1887 return err;
1888 }
1889
create_filter_finish(struct filter_parse_state * ps)1890 static void create_filter_finish(struct filter_parse_state *ps)
1891 {
1892 if (ps) {
1893 filter_opstack_clear(ps);
1894 postfix_clear(ps);
1895 kfree(ps);
1896 }
1897 }
1898
1899 /**
1900 * create_filter - create a filter for a trace_event_call
1901 * @call: trace_event_call to create a filter for
1902 * @filter_str: filter string
1903 * @set_str: remember @filter_str and enable detailed error in filter
1904 * @filterp: out param for created filter (always updated on return)
1905 *
1906 * Creates a filter for @call with @filter_str. If @set_str is %true,
1907 * @filter_str is copied and recorded in the new filter.
1908 *
1909 * On success, returns 0 and *@filterp points to the new filter. On
1910 * failure, returns -errno and *@filterp may point to %NULL or to a new
1911 * filter. In the latter case, the returned filter contains error
1912 * information if @set_str is %true and the caller is responsible for
1913 * freeing it.
1914 */
create_filter(struct trace_event_call * call,char * filter_str,bool set_str,struct event_filter ** filterp)1915 static int create_filter(struct trace_event_call *call,
1916 char *filter_str, bool set_str,
1917 struct event_filter **filterp)
1918 {
1919 struct event_filter *filter = NULL;
1920 struct filter_parse_state *ps = NULL;
1921 int err;
1922
1923 err = create_filter_start(filter_str, set_str, &ps, &filter);
1924 if (!err) {
1925 err = replace_preds(call, filter, ps, false);
1926 if (err && set_str)
1927 append_filter_err(ps, filter);
1928 }
1929 if (err && !set_str) {
1930 free_event_filter(filter);
1931 filter = NULL;
1932 }
1933 create_filter_finish(ps);
1934
1935 *filterp = filter;
1936 return err;
1937 }
1938
create_event_filter(struct trace_event_call * call,char * filter_str,bool set_str,struct event_filter ** filterp)1939 int create_event_filter(struct trace_event_call *call,
1940 char *filter_str, bool set_str,
1941 struct event_filter **filterp)
1942 {
1943 return create_filter(call, filter_str, set_str, filterp);
1944 }
1945
1946 /**
1947 * create_system_filter - create a filter for an event_subsystem
1948 * @system: event_subsystem to create a filter for
1949 * @filter_str: filter string
1950 * @filterp: out param for created filter (always updated on return)
1951 *
1952 * Identical to create_filter() except that it creates a subsystem filter
1953 * and always remembers @filter_str.
1954 */
create_system_filter(struct trace_subsystem_dir * dir,struct trace_array * tr,char * filter_str,struct event_filter ** filterp)1955 static int create_system_filter(struct trace_subsystem_dir *dir,
1956 struct trace_array *tr,
1957 char *filter_str, struct event_filter **filterp)
1958 {
1959 struct event_filter *filter = NULL;
1960 struct filter_parse_state *ps = NULL;
1961 int err;
1962
1963 err = create_filter_start(filter_str, true, &ps, &filter);
1964 if (!err) {
1965 err = replace_system_preds(dir, tr, ps, filter_str);
1966 if (!err) {
1967 /* System filters just show a default message */
1968 kfree(filter->filter_string);
1969 filter->filter_string = NULL;
1970 } else {
1971 append_filter_err(ps, filter);
1972 }
1973 }
1974 create_filter_finish(ps);
1975
1976 *filterp = filter;
1977 return err;
1978 }
1979
1980 /* caller must hold event_mutex */
apply_event_filter(struct trace_event_file * file,char * filter_string)1981 int apply_event_filter(struct trace_event_file *file, char *filter_string)
1982 {
1983 struct trace_event_call *call = file->event_call;
1984 struct event_filter *filter;
1985 int err;
1986
1987 if (!strcmp(strstrip(filter_string), "0")) {
1988 filter_disable(file);
1989 filter = event_filter(file);
1990
1991 if (!filter)
1992 return 0;
1993
1994 event_clear_filter(file);
1995
1996 /* Make sure the filter is not being used */
1997 synchronize_sched();
1998 __free_filter(filter);
1999
2000 return 0;
2001 }
2002
2003 err = create_filter(call, filter_string, true, &filter);
2004
2005 /*
2006 * Always swap the call filter with the new filter
2007 * even if there was an error. If there was an error
2008 * in the filter, we disable the filter and show the error
2009 * string
2010 */
2011 if (filter) {
2012 struct event_filter *tmp;
2013
2014 tmp = event_filter(file);
2015 if (!err)
2016 event_set_filtered_flag(file);
2017 else
2018 filter_disable(file);
2019
2020 event_set_filter(file, filter);
2021
2022 if (tmp) {
2023 /* Make sure the call is done with the filter */
2024 synchronize_sched();
2025 __free_filter(tmp);
2026 }
2027 }
2028
2029 return err;
2030 }
2031
apply_subsystem_event_filter(struct trace_subsystem_dir * dir,char * filter_string)2032 int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
2033 char *filter_string)
2034 {
2035 struct event_subsystem *system = dir->subsystem;
2036 struct trace_array *tr = dir->tr;
2037 struct event_filter *filter;
2038 int err = 0;
2039
2040 mutex_lock(&event_mutex);
2041
2042 /* Make sure the system still has events */
2043 if (!dir->nr_events) {
2044 err = -ENODEV;
2045 goto out_unlock;
2046 }
2047
2048 if (!strcmp(strstrip(filter_string), "0")) {
2049 filter_free_subsystem_preds(dir, tr);
2050 remove_filter_string(system->filter);
2051 filter = system->filter;
2052 system->filter = NULL;
2053 /* Ensure all filters are no longer used */
2054 synchronize_sched();
2055 filter_free_subsystem_filters(dir, tr);
2056 __free_filter(filter);
2057 goto out_unlock;
2058 }
2059
2060 err = create_system_filter(dir, tr, filter_string, &filter);
2061 if (filter) {
2062 /*
2063 * No event actually uses the system filter
2064 * we can free it without synchronize_sched().
2065 */
2066 __free_filter(system->filter);
2067 system->filter = filter;
2068 }
2069 out_unlock:
2070 mutex_unlock(&event_mutex);
2071
2072 return err;
2073 }
2074
2075 #ifdef CONFIG_PERF_EVENTS
2076
ftrace_profile_free_filter(struct perf_event * event)2077 void ftrace_profile_free_filter(struct perf_event *event)
2078 {
2079 struct event_filter *filter = event->filter;
2080
2081 event->filter = NULL;
2082 __free_filter(filter);
2083 }
2084
2085 struct function_filter_data {
2086 struct ftrace_ops *ops;
2087 int first_filter;
2088 int first_notrace;
2089 };
2090
2091 #ifdef CONFIG_FUNCTION_TRACER
2092 static char **
ftrace_function_filter_re(char * buf,int len,int * count)2093 ftrace_function_filter_re(char *buf, int len, int *count)
2094 {
2095 char *str, **re;
2096
2097 str = kstrndup(buf, len, GFP_KERNEL);
2098 if (!str)
2099 return NULL;
2100
2101 /*
2102 * The argv_split function takes white space
2103 * as a separator, so convert ',' into spaces.
2104 */
2105 strreplace(str, ',', ' ');
2106
2107 re = argv_split(GFP_KERNEL, str, count);
2108 kfree(str);
2109 return re;
2110 }
2111
ftrace_function_set_regexp(struct ftrace_ops * ops,int filter,int reset,char * re,int len)2112 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2113 int reset, char *re, int len)
2114 {
2115 int ret;
2116
2117 if (filter)
2118 ret = ftrace_set_filter(ops, re, len, reset);
2119 else
2120 ret = ftrace_set_notrace(ops, re, len, reset);
2121
2122 return ret;
2123 }
2124
__ftrace_function_set_filter(int filter,char * buf,int len,struct function_filter_data * data)2125 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2126 struct function_filter_data *data)
2127 {
2128 int i, re_cnt, ret = -EINVAL;
2129 int *reset;
2130 char **re;
2131
2132 reset = filter ? &data->first_filter : &data->first_notrace;
2133
2134 /*
2135 * The 'ip' field could have multiple filters set, separated
2136 * either by space or comma. We first cut the filter and apply
2137 * all pieces separatelly.
2138 */
2139 re = ftrace_function_filter_re(buf, len, &re_cnt);
2140 if (!re)
2141 return -EINVAL;
2142
2143 for (i = 0; i < re_cnt; i++) {
2144 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2145 re[i], strlen(re[i]));
2146 if (ret)
2147 break;
2148
2149 if (*reset)
2150 *reset = 0;
2151 }
2152
2153 argv_free(re);
2154 return ret;
2155 }
2156
ftrace_function_check_pred(struct filter_pred * pred,int leaf)2157 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2158 {
2159 struct ftrace_event_field *field = pred->field;
2160
2161 if (leaf) {
2162 /*
2163 * Check the leaf predicate for function trace, verify:
2164 * - only '==' and '!=' is used
2165 * - the 'ip' field is used
2166 */
2167 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2168 return -EINVAL;
2169
2170 if (strcmp(field->name, "ip"))
2171 return -EINVAL;
2172 } else {
2173 /*
2174 * Check the non leaf predicate for function trace, verify:
2175 * - only '||' is used
2176 */
2177 if (pred->op != OP_OR)
2178 return -EINVAL;
2179 }
2180
2181 return 0;
2182 }
2183
ftrace_function_set_filter_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2184 static int ftrace_function_set_filter_cb(enum move_type move,
2185 struct filter_pred *pred,
2186 int *err, void *data)
2187 {
2188 /* Checking the node is valid for function trace. */
2189 if ((move != MOVE_DOWN) ||
2190 (pred->left != FILTER_PRED_INVALID)) {
2191 *err = ftrace_function_check_pred(pred, 0);
2192 } else {
2193 *err = ftrace_function_check_pred(pred, 1);
2194 if (*err)
2195 return WALK_PRED_ABORT;
2196
2197 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2198 pred->regex.pattern,
2199 pred->regex.len,
2200 data);
2201 }
2202
2203 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2204 }
2205
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2206 static int ftrace_function_set_filter(struct perf_event *event,
2207 struct event_filter *filter)
2208 {
2209 struct function_filter_data data = {
2210 .first_filter = 1,
2211 .first_notrace = 1,
2212 .ops = &event->ftrace_ops,
2213 };
2214
2215 return walk_pred_tree(filter->preds, filter->root,
2216 ftrace_function_set_filter_cb, &data);
2217 }
2218 #else
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2219 static int ftrace_function_set_filter(struct perf_event *event,
2220 struct event_filter *filter)
2221 {
2222 return -ENODEV;
2223 }
2224 #endif /* CONFIG_FUNCTION_TRACER */
2225
ftrace_profile_set_filter(struct perf_event * event,int event_id,char * filter_str)2226 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2227 char *filter_str)
2228 {
2229 int err;
2230 struct event_filter *filter;
2231 struct trace_event_call *call;
2232
2233 mutex_lock(&event_mutex);
2234
2235 call = event->tp_event;
2236
2237 err = -EINVAL;
2238 if (!call)
2239 goto out_unlock;
2240
2241 err = -EEXIST;
2242 if (event->filter)
2243 goto out_unlock;
2244
2245 err = create_filter(call, filter_str, false, &filter);
2246 if (err)
2247 goto free_filter;
2248
2249 if (ftrace_event_is_function(call))
2250 err = ftrace_function_set_filter(event, filter);
2251 else
2252 event->filter = filter;
2253
2254 free_filter:
2255 if (err || ftrace_event_is_function(call))
2256 __free_filter(filter);
2257
2258 out_unlock:
2259 mutex_unlock(&event_mutex);
2260
2261 return err;
2262 }
2263
2264 #endif /* CONFIG_PERF_EVENTS */
2265
2266 #ifdef CONFIG_FTRACE_STARTUP_TEST
2267
2268 #include <linux/types.h>
2269 #include <linux/tracepoint.h>
2270
2271 #define CREATE_TRACE_POINTS
2272 #include "trace_events_filter_test.h"
2273
2274 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2275 { \
2276 .filter = FILTER, \
2277 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2278 .e = ve, .f = vf, .g = vg, .h = vh }, \
2279 .match = m, \
2280 .not_visited = nvisit, \
2281 }
2282 #define YES 1
2283 #define NO 0
2284
2285 static struct test_filter_data_t {
2286 char *filter;
2287 struct trace_event_raw_ftrace_test_filter rec;
2288 int match;
2289 char *not_visited;
2290 } test_filter_data[] = {
2291 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2292 "e == 1 && f == 1 && g == 1 && h == 1"
2293 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2294 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2295 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2296 #undef FILTER
2297 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2298 "e == 1 || f == 1 || g == 1 || h == 1"
2299 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2300 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2301 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2302 #undef FILTER
2303 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2304 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2305 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2306 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2307 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2308 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2309 #undef FILTER
2310 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2311 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2312 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2313 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2314 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2315 #undef FILTER
2316 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2317 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2318 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2319 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2320 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2321 #undef FILTER
2322 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2323 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2324 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2325 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2326 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2327 #undef FILTER
2328 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2329 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2330 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2331 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2332 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2333 #undef FILTER
2334 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2335 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2336 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2337 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2338 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2339 };
2340
2341 #undef DATA_REC
2342 #undef FILTER
2343 #undef YES
2344 #undef NO
2345
2346 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2347
2348 static int test_pred_visited;
2349
test_pred_visited_fn(struct filter_pred * pred,void * event)2350 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2351 {
2352 struct ftrace_event_field *field = pred->field;
2353
2354 test_pred_visited = 1;
2355 printk(KERN_INFO "\npred visited %s\n", field->name);
2356 return 1;
2357 }
2358
test_walk_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2359 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2360 int *err, void *data)
2361 {
2362 char *fields = data;
2363
2364 if ((move == MOVE_DOWN) &&
2365 (pred->left == FILTER_PRED_INVALID)) {
2366 struct ftrace_event_field *field = pred->field;
2367
2368 if (!field) {
2369 WARN(1, "all leafs should have field defined");
2370 return WALK_PRED_DEFAULT;
2371 }
2372 if (!strchr(fields, *field->name))
2373 return WALK_PRED_DEFAULT;
2374
2375 WARN_ON(!pred->fn);
2376 pred->fn = test_pred_visited_fn;
2377 }
2378 return WALK_PRED_DEFAULT;
2379 }
2380
ftrace_test_event_filter(void)2381 static __init int ftrace_test_event_filter(void)
2382 {
2383 int i;
2384
2385 printk(KERN_INFO "Testing ftrace filter: ");
2386
2387 for (i = 0; i < DATA_CNT; i++) {
2388 struct event_filter *filter = NULL;
2389 struct test_filter_data_t *d = &test_filter_data[i];
2390 int err;
2391
2392 err = create_filter(&event_ftrace_test_filter, d->filter,
2393 false, &filter);
2394 if (err) {
2395 printk(KERN_INFO
2396 "Failed to get filter for '%s', err %d\n",
2397 d->filter, err);
2398 __free_filter(filter);
2399 break;
2400 }
2401
2402 /*
2403 * The preemption disabling is not really needed for self
2404 * tests, but the rcu dereference will complain without it.
2405 */
2406 preempt_disable();
2407 if (*d->not_visited)
2408 walk_pred_tree(filter->preds, filter->root,
2409 test_walk_pred_cb,
2410 d->not_visited);
2411
2412 test_pred_visited = 0;
2413 err = filter_match_preds(filter, &d->rec);
2414 preempt_enable();
2415
2416 __free_filter(filter);
2417
2418 if (test_pred_visited) {
2419 printk(KERN_INFO
2420 "Failed, unwanted pred visited for filter %s\n",
2421 d->filter);
2422 break;
2423 }
2424
2425 if (err != d->match) {
2426 printk(KERN_INFO
2427 "Failed to match filter '%s', expected %d\n",
2428 d->filter, d->match);
2429 break;
2430 }
2431 }
2432
2433 if (i == DATA_CNT)
2434 printk(KERN_CONT "OK\n");
2435
2436 return 0;
2437 }
2438
2439 late_initcall(ftrace_test_event_filter);
2440
2441 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2442