1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 int op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 int op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = 0; \
153 \
154 switch (pred->op) { \
155 case OP_LT: \
156 match = (*addr < val); \
157 break; \
158 case OP_LE: \
159 match = (*addr <= val); \
160 break; \
161 case OP_GT: \
162 match = (*addr > val); \
163 break; \
164 case OP_GE: \
165 match = (*addr >= val); \
166 break; \
167 case OP_BAND: \
168 match = (*addr & val); \
169 break; \
170 default: \
171 break; \
172 } \
173 \
174 return !!match == !pred->not; \
175 }
176
177 #define DEFINE_EQUALITY_PRED(size) \
178 static int filter_pred_##size(struct filter_pred *pred, void *event) \
179 { \
180 u##size *addr = (u##size *)(event + pred->offset); \
181 u##size val = (u##size)pred->val; \
182 int match; \
183 \
184 match = (val == *addr) ^ pred->not; \
185 \
186 return match; \
187 }
188
189 DEFINE_COMPARISON_PRED(s64);
190 DEFINE_COMPARISON_PRED(u64);
191 DEFINE_COMPARISON_PRED(s32);
192 DEFINE_COMPARISON_PRED(u32);
193 DEFINE_COMPARISON_PRED(s16);
194 DEFINE_COMPARISON_PRED(u16);
195 DEFINE_COMPARISON_PRED(s8);
196 DEFINE_COMPARISON_PRED(u8);
197
198 DEFINE_EQUALITY_PRED(64);
199 DEFINE_EQUALITY_PRED(32);
200 DEFINE_EQUALITY_PRED(16);
201 DEFINE_EQUALITY_PRED(8);
202
203 /* Filter predicate for fixed sized arrays of characters */
filter_pred_string(struct filter_pred * pred,void * event)204 static int filter_pred_string(struct filter_pred *pred, void *event)
205 {
206 char *addr = (char *)(event + pred->offset);
207 int cmp, match;
208
209 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
210
211 match = cmp ^ pred->not;
212
213 return match;
214 }
215
216 /* Filter predicate for char * pointers */
filter_pred_pchar(struct filter_pred * pred,void * event)217 static int filter_pred_pchar(struct filter_pred *pred, void *event)
218 {
219 char **addr = (char **)(event + pred->offset);
220 int cmp, match;
221 int len = strlen(*addr) + 1; /* including tailing '\0' */
222
223 cmp = pred->regex.match(*addr, &pred->regex, len);
224
225 match = cmp ^ pred->not;
226
227 return match;
228 }
229
230 /*
231 * Filter predicate for dynamic sized arrays of characters.
232 * These are implemented through a list of strings at the end
233 * of the entry.
234 * Also each of these strings have a field in the entry which
235 * contains its offset from the beginning of the entry.
236 * We have then first to get this field, dereference it
237 * and add it to the address of the entry, and at last we have
238 * the address of the string.
239 */
filter_pred_strloc(struct filter_pred * pred,void * event)240 static int filter_pred_strloc(struct filter_pred *pred, void *event)
241 {
242 u32 str_item = *(u32 *)(event + pred->offset);
243 int str_loc = str_item & 0xffff;
244 int str_len = str_item >> 16;
245 char *addr = (char *)(event + str_loc);
246 int cmp, match;
247
248 cmp = pred->regex.match(addr, &pred->regex, str_len);
249
250 match = cmp ^ pred->not;
251
252 return match;
253 }
254
255 /* Filter predicate for CPUs. */
filter_pred_cpu(struct filter_pred * pred,void * event)256 static int filter_pred_cpu(struct filter_pred *pred, void *event)
257 {
258 int cpu, cmp;
259 int match = 0;
260
261 cpu = raw_smp_processor_id();
262 cmp = pred->val;
263
264 switch (pred->op) {
265 case OP_EQ:
266 match = cpu == cmp;
267 break;
268 case OP_LT:
269 match = cpu < cmp;
270 break;
271 case OP_LE:
272 match = cpu <= cmp;
273 break;
274 case OP_GT:
275 match = cpu > cmp;
276 break;
277 case OP_GE:
278 match = cpu >= cmp;
279 break;
280 default:
281 break;
282 }
283
284 return !!match == !pred->not;
285 }
286
287 /* Filter predicate for COMM. */
filter_pred_comm(struct filter_pred * pred,void * event)288 static int filter_pred_comm(struct filter_pred *pred, void *event)
289 {
290 int cmp, match;
291
292 cmp = pred->regex.match(current->comm, &pred->regex,
293 pred->regex.field_len);
294 match = cmp ^ pred->not;
295
296 return match;
297 }
298
filter_pred_none(struct filter_pred * pred,void * event)299 static int filter_pred_none(struct filter_pred *pred, void *event)
300 {
301 return 0;
302 }
303
304 /*
305 * regex_match_foo - Basic regex callbacks
306 *
307 * @str: the string to be searched
308 * @r: the regex structure containing the pattern string
309 * @len: the length of the string to be searched (including '\0')
310 *
311 * Note:
312 * - @str might not be NULL-terminated if it's of type DYN_STRING
313 * or STATIC_STRING
314 */
315
regex_match_full(char * str,struct regex * r,int len)316 static int regex_match_full(char *str, struct regex *r, int len)
317 {
318 if (strncmp(str, r->pattern, len) == 0)
319 return 1;
320 return 0;
321 }
322
regex_match_front(char * str,struct regex * r,int len)323 static int regex_match_front(char *str, struct regex *r, int len)
324 {
325 if (len < r->len)
326 return 0;
327
328 if (strncmp(str, r->pattern, r->len) == 0)
329 return 1;
330 return 0;
331 }
332
regex_match_middle(char * str,struct regex * r,int len)333 static int regex_match_middle(char *str, struct regex *r, int len)
334 {
335 if (strnstr(str, r->pattern, len))
336 return 1;
337 return 0;
338 }
339
regex_match_end(char * str,struct regex * r,int len)340 static int regex_match_end(char *str, struct regex *r, int len)
341 {
342 int strlen = len - 1;
343
344 if (strlen >= r->len &&
345 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
346 return 1;
347 return 0;
348 }
349
350 /**
351 * filter_parse_regex - parse a basic regex
352 * @buff: the raw regex
353 * @len: length of the regex
354 * @search: will point to the beginning of the string to compare
355 * @not: tell whether the match will have to be inverted
356 *
357 * This passes in a buffer containing a regex and this function will
358 * set search to point to the search part of the buffer and
359 * return the type of search it is (see enum above).
360 * This does modify buff.
361 *
362 * Returns enum type.
363 * search returns the pointer to use for comparison.
364 * not returns 1 if buff started with a '!'
365 * 0 otherwise.
366 */
filter_parse_regex(char * buff,int len,char ** search,int * not)367 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
368 {
369 int type = MATCH_FULL;
370 int i;
371
372 if (buff[0] == '!') {
373 *not = 1;
374 buff++;
375 len--;
376 } else
377 *not = 0;
378
379 *search = buff;
380
381 for (i = 0; i < len; i++) {
382 if (buff[i] == '*') {
383 if (!i) {
384 *search = buff + 1;
385 type = MATCH_END_ONLY;
386 } else {
387 if (type == MATCH_END_ONLY)
388 type = MATCH_MIDDLE_ONLY;
389 else
390 type = MATCH_FRONT_ONLY;
391 buff[i] = 0;
392 break;
393 }
394 }
395 }
396
397 return type;
398 }
399
filter_build_regex(struct filter_pred * pred)400 static void filter_build_regex(struct filter_pred *pred)
401 {
402 struct regex *r = &pred->regex;
403 char *search;
404 enum regex_type type = MATCH_FULL;
405 int not = 0;
406
407 if (pred->op == OP_GLOB) {
408 type = filter_parse_regex(r->pattern, r->len, &search, ¬);
409 r->len = strlen(search);
410 memmove(r->pattern, search, r->len+1);
411 }
412
413 switch (type) {
414 case MATCH_FULL:
415 r->match = regex_match_full;
416 break;
417 case MATCH_FRONT_ONLY:
418 r->match = regex_match_front;
419 break;
420 case MATCH_MIDDLE_ONLY:
421 r->match = regex_match_middle;
422 break;
423 case MATCH_END_ONLY:
424 r->match = regex_match_end;
425 break;
426 }
427
428 pred->not ^= not;
429 }
430
431 enum move_type {
432 MOVE_DOWN,
433 MOVE_UP_FROM_LEFT,
434 MOVE_UP_FROM_RIGHT
435 };
436
437 static struct filter_pred *
get_pred_parent(struct filter_pred * pred,struct filter_pred * preds,int index,enum move_type * move)438 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
439 int index, enum move_type *move)
440 {
441 if (pred->parent & FILTER_PRED_IS_RIGHT)
442 *move = MOVE_UP_FROM_RIGHT;
443 else
444 *move = MOVE_UP_FROM_LEFT;
445 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
446
447 return pred;
448 }
449
450 enum walk_return {
451 WALK_PRED_ABORT,
452 WALK_PRED_PARENT,
453 WALK_PRED_DEFAULT,
454 };
455
456 typedef int (*filter_pred_walkcb_t) (enum move_type move,
457 struct filter_pred *pred,
458 int *err, void *data);
459
walk_pred_tree(struct filter_pred * preds,struct filter_pred * root,filter_pred_walkcb_t cb,void * data)460 static int walk_pred_tree(struct filter_pred *preds,
461 struct filter_pred *root,
462 filter_pred_walkcb_t cb, void *data)
463 {
464 struct filter_pred *pred = root;
465 enum move_type move = MOVE_DOWN;
466 int done = 0;
467
468 if (!preds)
469 return -EINVAL;
470
471 do {
472 int err = 0, ret;
473
474 ret = cb(move, pred, &err, data);
475 if (ret == WALK_PRED_ABORT)
476 return err;
477 if (ret == WALK_PRED_PARENT)
478 goto get_parent;
479
480 switch (move) {
481 case MOVE_DOWN:
482 if (pred->left != FILTER_PRED_INVALID) {
483 pred = &preds[pred->left];
484 continue;
485 }
486 goto get_parent;
487 case MOVE_UP_FROM_LEFT:
488 pred = &preds[pred->right];
489 move = MOVE_DOWN;
490 continue;
491 case MOVE_UP_FROM_RIGHT:
492 get_parent:
493 if (pred == root)
494 break;
495 pred = get_pred_parent(pred, preds,
496 pred->parent,
497 &move);
498 continue;
499 }
500 done = 1;
501 } while (!done);
502
503 /* We are fine. */
504 return 0;
505 }
506
507 /*
508 * A series of AND or ORs where found together. Instead of
509 * climbing up and down the tree branches, an array of the
510 * ops were made in order of checks. We can just move across
511 * the array and short circuit if needed.
512 */
process_ops(struct filter_pred * preds,struct filter_pred * op,void * rec)513 static int process_ops(struct filter_pred *preds,
514 struct filter_pred *op, void *rec)
515 {
516 struct filter_pred *pred;
517 int match = 0;
518 int type;
519 int i;
520
521 /*
522 * Micro-optimization: We set type to true if op
523 * is an OR and false otherwise (AND). Then we
524 * just need to test if the match is equal to
525 * the type, and if it is, we can short circuit the
526 * rest of the checks:
527 *
528 * if ((match && op->op == OP_OR) ||
529 * (!match && op->op == OP_AND))
530 * return match;
531 */
532 type = op->op == OP_OR;
533
534 for (i = 0; i < op->val; i++) {
535 pred = &preds[op->ops[i]];
536 if (!WARN_ON_ONCE(!pred->fn))
537 match = pred->fn(pred, rec);
538 if (!!match == type)
539 break;
540 }
541 /* If not of not match is equal to not of not, then it is a match */
542 return !!match == !op->not;
543 }
544
545 struct filter_match_preds_data {
546 struct filter_pred *preds;
547 int match;
548 void *rec;
549 };
550
filter_match_preds_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)551 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
552 int *err, void *data)
553 {
554 struct filter_match_preds_data *d = data;
555
556 *err = 0;
557 switch (move) {
558 case MOVE_DOWN:
559 /* only AND and OR have children */
560 if (pred->left != FILTER_PRED_INVALID) {
561 /* If ops is set, then it was folded. */
562 if (!pred->ops)
563 return WALK_PRED_DEFAULT;
564 /* We can treat folded ops as a leaf node */
565 d->match = process_ops(d->preds, pred, d->rec);
566 } else {
567 if (!WARN_ON_ONCE(!pred->fn))
568 d->match = pred->fn(pred, d->rec);
569 }
570
571 return WALK_PRED_PARENT;
572 case MOVE_UP_FROM_LEFT:
573 /*
574 * Check for short circuits.
575 *
576 * Optimization: !!match == (pred->op == OP_OR)
577 * is the same as:
578 * if ((match && pred->op == OP_OR) ||
579 * (!match && pred->op == OP_AND))
580 */
581 if (!!d->match == (pred->op == OP_OR))
582 return WALK_PRED_PARENT;
583 break;
584 case MOVE_UP_FROM_RIGHT:
585 break;
586 }
587
588 return WALK_PRED_DEFAULT;
589 }
590
591 /* return 1 if event matches, 0 otherwise (discard) */
filter_match_preds(struct event_filter * filter,void * rec)592 int filter_match_preds(struct event_filter *filter, void *rec)
593 {
594 struct filter_pred *preds;
595 struct filter_pred *root;
596 struct filter_match_preds_data data = {
597 /* match is currently meaningless */
598 .match = -1,
599 .rec = rec,
600 };
601 int n_preds, ret;
602
603 /* no filter is considered a match */
604 if (!filter)
605 return 1;
606
607 n_preds = filter->n_preds;
608 if (!n_preds)
609 return 1;
610
611 /*
612 * n_preds, root and filter->preds are protect with preemption disabled.
613 */
614 root = rcu_dereference_sched(filter->root);
615 if (!root)
616 return 1;
617
618 data.preds = preds = rcu_dereference_sched(filter->preds);
619 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
620 WARN_ON(ret);
621 return data.match;
622 }
623 EXPORT_SYMBOL_GPL(filter_match_preds);
624
parse_error(struct filter_parse_state * ps,int err,int pos)625 static void parse_error(struct filter_parse_state *ps, int err, int pos)
626 {
627 ps->lasterr = err;
628 ps->lasterr_pos = pos;
629 }
630
remove_filter_string(struct event_filter * filter)631 static void remove_filter_string(struct event_filter *filter)
632 {
633 if (!filter)
634 return;
635
636 kfree(filter->filter_string);
637 filter->filter_string = NULL;
638 }
639
replace_filter_string(struct event_filter * filter,char * filter_string)640 static int replace_filter_string(struct event_filter *filter,
641 char *filter_string)
642 {
643 kfree(filter->filter_string);
644 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
645 if (!filter->filter_string)
646 return -ENOMEM;
647
648 return 0;
649 }
650
append_filter_string(struct event_filter * filter,char * string)651 static int append_filter_string(struct event_filter *filter,
652 char *string)
653 {
654 int newlen;
655 char *new_filter_string;
656
657 BUG_ON(!filter->filter_string);
658 newlen = strlen(filter->filter_string) + strlen(string) + 1;
659 new_filter_string = kmalloc(newlen, GFP_KERNEL);
660 if (!new_filter_string)
661 return -ENOMEM;
662
663 strcpy(new_filter_string, filter->filter_string);
664 strcat(new_filter_string, string);
665 kfree(filter->filter_string);
666 filter->filter_string = new_filter_string;
667
668 return 0;
669 }
670
append_filter_err(struct filter_parse_state * ps,struct event_filter * filter)671 static void append_filter_err(struct filter_parse_state *ps,
672 struct event_filter *filter)
673 {
674 int pos = ps->lasterr_pos;
675 char *buf, *pbuf;
676
677 buf = (char *)__get_free_page(GFP_TEMPORARY);
678 if (!buf)
679 return;
680
681 append_filter_string(filter, "\n");
682 memset(buf, ' ', PAGE_SIZE);
683 if (pos > PAGE_SIZE - 128)
684 pos = 0;
685 buf[pos] = '^';
686 pbuf = &buf[pos] + 1;
687
688 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
689 append_filter_string(filter, buf);
690 free_page((unsigned long) buf);
691 }
692
event_filter(struct trace_event_file * file)693 static inline struct event_filter *event_filter(struct trace_event_file *file)
694 {
695 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
696 return file->event_call->filter;
697 else
698 return file->filter;
699 }
700
701 /* caller must hold event_mutex */
print_event_filter(struct trace_event_file * file,struct trace_seq * s)702 void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
703 {
704 struct event_filter *filter = event_filter(file);
705
706 if (filter && filter->filter_string)
707 trace_seq_printf(s, "%s\n", filter->filter_string);
708 else
709 trace_seq_puts(s, "none\n");
710 }
711
print_subsystem_event_filter(struct event_subsystem * system,struct trace_seq * s)712 void print_subsystem_event_filter(struct event_subsystem *system,
713 struct trace_seq *s)
714 {
715 struct event_filter *filter;
716
717 mutex_lock(&event_mutex);
718 filter = system->filter;
719 if (filter && filter->filter_string)
720 trace_seq_printf(s, "%s\n", filter->filter_string);
721 else
722 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
723 mutex_unlock(&event_mutex);
724 }
725
__alloc_pred_stack(struct pred_stack * stack,int n_preds)726 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
727 {
728 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
729 if (!stack->preds)
730 return -ENOMEM;
731 stack->index = n_preds;
732 return 0;
733 }
734
__free_pred_stack(struct pred_stack * stack)735 static void __free_pred_stack(struct pred_stack *stack)
736 {
737 kfree(stack->preds);
738 stack->index = 0;
739 }
740
__push_pred_stack(struct pred_stack * stack,struct filter_pred * pred)741 static int __push_pred_stack(struct pred_stack *stack,
742 struct filter_pred *pred)
743 {
744 int index = stack->index;
745
746 if (WARN_ON(index == 0))
747 return -ENOSPC;
748
749 stack->preds[--index] = pred;
750 stack->index = index;
751 return 0;
752 }
753
754 static struct filter_pred *
__pop_pred_stack(struct pred_stack * stack)755 __pop_pred_stack(struct pred_stack *stack)
756 {
757 struct filter_pred *pred;
758 int index = stack->index;
759
760 pred = stack->preds[index++];
761 if (!pred)
762 return NULL;
763
764 stack->index = index;
765 return pred;
766 }
767
filter_set_pred(struct event_filter * filter,int idx,struct pred_stack * stack,struct filter_pred * src)768 static int filter_set_pred(struct event_filter *filter,
769 int idx,
770 struct pred_stack *stack,
771 struct filter_pred *src)
772 {
773 struct filter_pred *dest = &filter->preds[idx];
774 struct filter_pred *left;
775 struct filter_pred *right;
776
777 *dest = *src;
778 dest->index = idx;
779
780 if (dest->op == OP_OR || dest->op == OP_AND) {
781 right = __pop_pred_stack(stack);
782 left = __pop_pred_stack(stack);
783 if (!left || !right)
784 return -EINVAL;
785 /*
786 * If both children can be folded
787 * and they are the same op as this op or a leaf,
788 * then this op can be folded.
789 */
790 if (left->index & FILTER_PRED_FOLD &&
791 ((left->op == dest->op && !left->not) ||
792 left->left == FILTER_PRED_INVALID) &&
793 right->index & FILTER_PRED_FOLD &&
794 ((right->op == dest->op && !right->not) ||
795 right->left == FILTER_PRED_INVALID))
796 dest->index |= FILTER_PRED_FOLD;
797
798 dest->left = left->index & ~FILTER_PRED_FOLD;
799 dest->right = right->index & ~FILTER_PRED_FOLD;
800 left->parent = dest->index & ~FILTER_PRED_FOLD;
801 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
802 } else {
803 /*
804 * Make dest->left invalid to be used as a quick
805 * way to know this is a leaf node.
806 */
807 dest->left = FILTER_PRED_INVALID;
808
809 /* All leafs allow folding the parent ops. */
810 dest->index |= FILTER_PRED_FOLD;
811 }
812
813 return __push_pred_stack(stack, dest);
814 }
815
__free_preds(struct event_filter * filter)816 static void __free_preds(struct event_filter *filter)
817 {
818 int i;
819
820 if (filter->preds) {
821 for (i = 0; i < filter->n_preds; i++)
822 kfree(filter->preds[i].ops);
823 kfree(filter->preds);
824 filter->preds = NULL;
825 }
826 filter->a_preds = 0;
827 filter->n_preds = 0;
828 }
829
filter_disable(struct trace_event_file * file)830 static void filter_disable(struct trace_event_file *file)
831 {
832 struct trace_event_call *call = file->event_call;
833
834 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
835 call->flags &= ~TRACE_EVENT_FL_FILTERED;
836 else
837 file->flags &= ~EVENT_FILE_FL_FILTERED;
838 }
839
__free_filter(struct event_filter * filter)840 static void __free_filter(struct event_filter *filter)
841 {
842 if (!filter)
843 return;
844
845 __free_preds(filter);
846 kfree(filter->filter_string);
847 kfree(filter);
848 }
849
free_event_filter(struct event_filter * filter)850 void free_event_filter(struct event_filter *filter)
851 {
852 __free_filter(filter);
853 }
854
__alloc_filter(void)855 static struct event_filter *__alloc_filter(void)
856 {
857 struct event_filter *filter;
858
859 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
860 return filter;
861 }
862
__alloc_preds(struct event_filter * filter,int n_preds)863 static int __alloc_preds(struct event_filter *filter, int n_preds)
864 {
865 struct filter_pred *pred;
866 int i;
867
868 if (filter->preds)
869 __free_preds(filter);
870
871 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
872
873 if (!filter->preds)
874 return -ENOMEM;
875
876 filter->a_preds = n_preds;
877 filter->n_preds = 0;
878
879 for (i = 0; i < n_preds; i++) {
880 pred = &filter->preds[i];
881 pred->fn = filter_pred_none;
882 }
883
884 return 0;
885 }
886
__remove_filter(struct trace_event_file * file)887 static inline void __remove_filter(struct trace_event_file *file)
888 {
889 struct trace_event_call *call = file->event_call;
890
891 filter_disable(file);
892 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
893 remove_filter_string(call->filter);
894 else
895 remove_filter_string(file->filter);
896 }
897
filter_free_subsystem_preds(struct trace_subsystem_dir * dir,struct trace_array * tr)898 static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
899 struct trace_array *tr)
900 {
901 struct trace_event_file *file;
902
903 list_for_each_entry(file, &tr->events, list) {
904 if (file->system != dir)
905 continue;
906 __remove_filter(file);
907 }
908 }
909
__free_subsystem_filter(struct trace_event_file * file)910 static inline void __free_subsystem_filter(struct trace_event_file *file)
911 {
912 struct trace_event_call *call = file->event_call;
913
914 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
915 __free_filter(call->filter);
916 call->filter = NULL;
917 } else {
918 __free_filter(file->filter);
919 file->filter = NULL;
920 }
921 }
922
filter_free_subsystem_filters(struct trace_subsystem_dir * dir,struct trace_array * tr)923 static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
924 struct trace_array *tr)
925 {
926 struct trace_event_file *file;
927
928 list_for_each_entry(file, &tr->events, list) {
929 if (file->system != dir)
930 continue;
931 __free_subsystem_filter(file);
932 }
933 }
934
filter_add_pred(struct filter_parse_state * ps,struct event_filter * filter,struct filter_pred * pred,struct pred_stack * stack)935 static int filter_add_pred(struct filter_parse_state *ps,
936 struct event_filter *filter,
937 struct filter_pred *pred,
938 struct pred_stack *stack)
939 {
940 int err;
941
942 if (WARN_ON(filter->n_preds == filter->a_preds)) {
943 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
944 return -ENOSPC;
945 }
946
947 err = filter_set_pred(filter, filter->n_preds, stack, pred);
948 if (err)
949 return err;
950
951 filter->n_preds++;
952
953 return 0;
954 }
955
filter_assign_type(const char * type)956 int filter_assign_type(const char *type)
957 {
958 if (strstr(type, "__data_loc") && strstr(type, "char"))
959 return FILTER_DYN_STRING;
960
961 if (strchr(type, '[') && strstr(type, "char"))
962 return FILTER_STATIC_STRING;
963
964 return FILTER_OTHER;
965 }
966
is_function_field(struct ftrace_event_field * field)967 static bool is_function_field(struct ftrace_event_field *field)
968 {
969 return field->filter_type == FILTER_TRACE_FN;
970 }
971
is_string_field(struct ftrace_event_field * field)972 static bool is_string_field(struct ftrace_event_field *field)
973 {
974 return field->filter_type == FILTER_DYN_STRING ||
975 field->filter_type == FILTER_STATIC_STRING ||
976 field->filter_type == FILTER_PTR_STRING;
977 }
978
is_legal_op(struct ftrace_event_field * field,int op)979 static bool is_legal_op(struct ftrace_event_field *field, int op)
980 {
981 if (is_string_field(field) &&
982 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
983 return false;
984 if (!is_string_field(field) && op == OP_GLOB)
985 return false;
986
987 return true;
988 }
989
select_comparison_fn(int op,int field_size,int field_is_signed)990 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
991 int field_is_signed)
992 {
993 filter_pred_fn_t fn = NULL;
994
995 switch (field_size) {
996 case 8:
997 if (op == OP_EQ || op == OP_NE)
998 fn = filter_pred_64;
999 else if (field_is_signed)
1000 fn = filter_pred_s64;
1001 else
1002 fn = filter_pred_u64;
1003 break;
1004 case 4:
1005 if (op == OP_EQ || op == OP_NE)
1006 fn = filter_pred_32;
1007 else if (field_is_signed)
1008 fn = filter_pred_s32;
1009 else
1010 fn = filter_pred_u32;
1011 break;
1012 case 2:
1013 if (op == OP_EQ || op == OP_NE)
1014 fn = filter_pred_16;
1015 else if (field_is_signed)
1016 fn = filter_pred_s16;
1017 else
1018 fn = filter_pred_u16;
1019 break;
1020 case 1:
1021 if (op == OP_EQ || op == OP_NE)
1022 fn = filter_pred_8;
1023 else if (field_is_signed)
1024 fn = filter_pred_s8;
1025 else
1026 fn = filter_pred_u8;
1027 break;
1028 }
1029
1030 return fn;
1031 }
1032
init_pred(struct filter_parse_state * ps,struct ftrace_event_field * field,struct filter_pred * pred)1033 static int init_pred(struct filter_parse_state *ps,
1034 struct ftrace_event_field *field,
1035 struct filter_pred *pred)
1036
1037 {
1038 filter_pred_fn_t fn = filter_pred_none;
1039 unsigned long long val;
1040 int ret;
1041
1042 pred->offset = field->offset;
1043
1044 if (!is_legal_op(field, pred->op)) {
1045 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
1046 return -EINVAL;
1047 }
1048
1049 if (field->filter_type == FILTER_COMM) {
1050 filter_build_regex(pred);
1051 fn = filter_pred_comm;
1052 pred->regex.field_len = TASK_COMM_LEN;
1053 } else if (is_string_field(field)) {
1054 filter_build_regex(pred);
1055
1056 if (field->filter_type == FILTER_STATIC_STRING) {
1057 fn = filter_pred_string;
1058 pred->regex.field_len = field->size;
1059 } else if (field->filter_type == FILTER_DYN_STRING)
1060 fn = filter_pred_strloc;
1061 else
1062 fn = filter_pred_pchar;
1063 } else if (is_function_field(field)) {
1064 if (strcmp(field->name, "ip")) {
1065 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1066 return -EINVAL;
1067 }
1068 } else {
1069 if (field->is_signed)
1070 ret = kstrtoll(pred->regex.pattern, 0, &val);
1071 else
1072 ret = kstrtoull(pred->regex.pattern, 0, &val);
1073 if (ret) {
1074 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1075 return -EINVAL;
1076 }
1077 pred->val = val;
1078
1079 if (field->filter_type == FILTER_CPU)
1080 fn = filter_pred_cpu;
1081 else
1082 fn = select_comparison_fn(pred->op, field->size,
1083 field->is_signed);
1084 if (!fn) {
1085 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1086 return -EINVAL;
1087 }
1088 }
1089
1090 if (pred->op == OP_NE)
1091 pred->not ^= 1;
1092
1093 pred->fn = fn;
1094 return 0;
1095 }
1096
parse_init(struct filter_parse_state * ps,struct filter_op * ops,char * infix_string)1097 static void parse_init(struct filter_parse_state *ps,
1098 struct filter_op *ops,
1099 char *infix_string)
1100 {
1101 memset(ps, '\0', sizeof(*ps));
1102
1103 ps->infix.string = infix_string;
1104 ps->infix.cnt = strlen(infix_string);
1105 ps->ops = ops;
1106
1107 INIT_LIST_HEAD(&ps->opstack);
1108 INIT_LIST_HEAD(&ps->postfix);
1109 }
1110
infix_next(struct filter_parse_state * ps)1111 static char infix_next(struct filter_parse_state *ps)
1112 {
1113 if (!ps->infix.cnt)
1114 return 0;
1115
1116 ps->infix.cnt--;
1117
1118 return ps->infix.string[ps->infix.tail++];
1119 }
1120
infix_peek(struct filter_parse_state * ps)1121 static char infix_peek(struct filter_parse_state *ps)
1122 {
1123 if (ps->infix.tail == strlen(ps->infix.string))
1124 return 0;
1125
1126 return ps->infix.string[ps->infix.tail];
1127 }
1128
infix_advance(struct filter_parse_state * ps)1129 static void infix_advance(struct filter_parse_state *ps)
1130 {
1131 if (!ps->infix.cnt)
1132 return;
1133
1134 ps->infix.cnt--;
1135 ps->infix.tail++;
1136 }
1137
is_precedence_lower(struct filter_parse_state * ps,int a,int b)1138 static inline int is_precedence_lower(struct filter_parse_state *ps,
1139 int a, int b)
1140 {
1141 return ps->ops[a].precedence < ps->ops[b].precedence;
1142 }
1143
is_op_char(struct filter_parse_state * ps,char c)1144 static inline int is_op_char(struct filter_parse_state *ps, char c)
1145 {
1146 int i;
1147
1148 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1149 if (ps->ops[i].string[0] == c)
1150 return 1;
1151 }
1152
1153 return 0;
1154 }
1155
infix_get_op(struct filter_parse_state * ps,char firstc)1156 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1157 {
1158 char nextc = infix_peek(ps);
1159 char opstr[3];
1160 int i;
1161
1162 opstr[0] = firstc;
1163 opstr[1] = nextc;
1164 opstr[2] = '\0';
1165
1166 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1167 if (!strcmp(opstr, ps->ops[i].string)) {
1168 infix_advance(ps);
1169 return ps->ops[i].id;
1170 }
1171 }
1172
1173 opstr[1] = '\0';
1174
1175 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1176 if (!strcmp(opstr, ps->ops[i].string))
1177 return ps->ops[i].id;
1178 }
1179
1180 return OP_NONE;
1181 }
1182
clear_operand_string(struct filter_parse_state * ps)1183 static inline void clear_operand_string(struct filter_parse_state *ps)
1184 {
1185 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1186 ps->operand.tail = 0;
1187 }
1188
append_operand_char(struct filter_parse_state * ps,char c)1189 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1190 {
1191 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1192 return -EINVAL;
1193
1194 ps->operand.string[ps->operand.tail++] = c;
1195
1196 return 0;
1197 }
1198
filter_opstack_push(struct filter_parse_state * ps,int op)1199 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1200 {
1201 struct opstack_op *opstack_op;
1202
1203 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1204 if (!opstack_op)
1205 return -ENOMEM;
1206
1207 opstack_op->op = op;
1208 list_add(&opstack_op->list, &ps->opstack);
1209
1210 return 0;
1211 }
1212
filter_opstack_empty(struct filter_parse_state * ps)1213 static int filter_opstack_empty(struct filter_parse_state *ps)
1214 {
1215 return list_empty(&ps->opstack);
1216 }
1217
filter_opstack_top(struct filter_parse_state * ps)1218 static int filter_opstack_top(struct filter_parse_state *ps)
1219 {
1220 struct opstack_op *opstack_op;
1221
1222 if (filter_opstack_empty(ps))
1223 return OP_NONE;
1224
1225 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1226
1227 return opstack_op->op;
1228 }
1229
filter_opstack_pop(struct filter_parse_state * ps)1230 static int filter_opstack_pop(struct filter_parse_state *ps)
1231 {
1232 struct opstack_op *opstack_op;
1233 int op;
1234
1235 if (filter_opstack_empty(ps))
1236 return OP_NONE;
1237
1238 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1239 op = opstack_op->op;
1240 list_del(&opstack_op->list);
1241
1242 kfree(opstack_op);
1243
1244 return op;
1245 }
1246
filter_opstack_clear(struct filter_parse_state * ps)1247 static void filter_opstack_clear(struct filter_parse_state *ps)
1248 {
1249 while (!filter_opstack_empty(ps))
1250 filter_opstack_pop(ps);
1251 }
1252
curr_operand(struct filter_parse_state * ps)1253 static char *curr_operand(struct filter_parse_state *ps)
1254 {
1255 return ps->operand.string;
1256 }
1257
postfix_append_operand(struct filter_parse_state * ps,char * operand)1258 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1259 {
1260 struct postfix_elt *elt;
1261
1262 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1263 if (!elt)
1264 return -ENOMEM;
1265
1266 elt->op = OP_NONE;
1267 elt->operand = kstrdup(operand, GFP_KERNEL);
1268 if (!elt->operand) {
1269 kfree(elt);
1270 return -ENOMEM;
1271 }
1272
1273 list_add_tail(&elt->list, &ps->postfix);
1274
1275 return 0;
1276 }
1277
postfix_append_op(struct filter_parse_state * ps,int op)1278 static int postfix_append_op(struct filter_parse_state *ps, int op)
1279 {
1280 struct postfix_elt *elt;
1281
1282 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1283 if (!elt)
1284 return -ENOMEM;
1285
1286 elt->op = op;
1287 elt->operand = NULL;
1288
1289 list_add_tail(&elt->list, &ps->postfix);
1290
1291 return 0;
1292 }
1293
postfix_clear(struct filter_parse_state * ps)1294 static void postfix_clear(struct filter_parse_state *ps)
1295 {
1296 struct postfix_elt *elt;
1297
1298 while (!list_empty(&ps->postfix)) {
1299 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1300 list_del(&elt->list);
1301 kfree(elt->operand);
1302 kfree(elt);
1303 }
1304 }
1305
filter_parse(struct filter_parse_state * ps)1306 static int filter_parse(struct filter_parse_state *ps)
1307 {
1308 int in_string = 0;
1309 int op, top_op;
1310 char ch;
1311
1312 while ((ch = infix_next(ps))) {
1313 if (ch == '"') {
1314 in_string ^= 1;
1315 continue;
1316 }
1317
1318 if (in_string)
1319 goto parse_operand;
1320
1321 if (isspace(ch))
1322 continue;
1323
1324 if (is_op_char(ps, ch)) {
1325 op = infix_get_op(ps, ch);
1326 if (op == OP_NONE) {
1327 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1328 return -EINVAL;
1329 }
1330
1331 if (strlen(curr_operand(ps))) {
1332 postfix_append_operand(ps, curr_operand(ps));
1333 clear_operand_string(ps);
1334 }
1335
1336 while (!filter_opstack_empty(ps)) {
1337 top_op = filter_opstack_top(ps);
1338 if (!is_precedence_lower(ps, top_op, op)) {
1339 top_op = filter_opstack_pop(ps);
1340 postfix_append_op(ps, top_op);
1341 continue;
1342 }
1343 break;
1344 }
1345
1346 filter_opstack_push(ps, op);
1347 continue;
1348 }
1349
1350 if (ch == '(') {
1351 filter_opstack_push(ps, OP_OPEN_PAREN);
1352 continue;
1353 }
1354
1355 if (ch == ')') {
1356 if (strlen(curr_operand(ps))) {
1357 postfix_append_operand(ps, curr_operand(ps));
1358 clear_operand_string(ps);
1359 }
1360
1361 top_op = filter_opstack_pop(ps);
1362 while (top_op != OP_NONE) {
1363 if (top_op == OP_OPEN_PAREN)
1364 break;
1365 postfix_append_op(ps, top_op);
1366 top_op = filter_opstack_pop(ps);
1367 }
1368 if (top_op == OP_NONE) {
1369 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1370 return -EINVAL;
1371 }
1372 continue;
1373 }
1374 parse_operand:
1375 if (append_operand_char(ps, ch)) {
1376 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1377 return -EINVAL;
1378 }
1379 }
1380
1381 if (strlen(curr_operand(ps)))
1382 postfix_append_operand(ps, curr_operand(ps));
1383
1384 while (!filter_opstack_empty(ps)) {
1385 top_op = filter_opstack_pop(ps);
1386 if (top_op == OP_NONE)
1387 break;
1388 if (top_op == OP_OPEN_PAREN) {
1389 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1390 return -EINVAL;
1391 }
1392 postfix_append_op(ps, top_op);
1393 }
1394
1395 return 0;
1396 }
1397
create_pred(struct filter_parse_state * ps,struct trace_event_call * call,int op,char * operand1,char * operand2)1398 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1399 struct trace_event_call *call,
1400 int op, char *operand1, char *operand2)
1401 {
1402 struct ftrace_event_field *field;
1403 static struct filter_pred pred;
1404
1405 memset(&pred, 0, sizeof(pred));
1406 pred.op = op;
1407
1408 if (op == OP_AND || op == OP_OR)
1409 return &pred;
1410
1411 if (!operand1 || !operand2) {
1412 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1413 return NULL;
1414 }
1415
1416 field = trace_find_event_field(call, operand1);
1417 if (!field) {
1418 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1419 return NULL;
1420 }
1421
1422 strcpy(pred.regex.pattern, operand2);
1423 pred.regex.len = strlen(pred.regex.pattern);
1424 pred.field = field;
1425 return init_pred(ps, field, &pred) ? NULL : &pred;
1426 }
1427
check_preds(struct filter_parse_state * ps)1428 static int check_preds(struct filter_parse_state *ps)
1429 {
1430 int n_normal_preds = 0, n_logical_preds = 0;
1431 struct postfix_elt *elt;
1432 int cnt = 0;
1433
1434 list_for_each_entry(elt, &ps->postfix, list) {
1435 if (elt->op == OP_NONE) {
1436 cnt++;
1437 continue;
1438 }
1439
1440 if (elt->op == OP_AND || elt->op == OP_OR) {
1441 n_logical_preds++;
1442 cnt--;
1443 continue;
1444 }
1445 if (elt->op != OP_NOT)
1446 cnt--;
1447 n_normal_preds++;
1448 /* all ops should have operands */
1449 if (cnt < 0)
1450 break;
1451 }
1452
1453 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1454 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1455 return -EINVAL;
1456 }
1457
1458 return 0;
1459 }
1460
count_preds(struct filter_parse_state * ps)1461 static int count_preds(struct filter_parse_state *ps)
1462 {
1463 struct postfix_elt *elt;
1464 int n_preds = 0;
1465
1466 list_for_each_entry(elt, &ps->postfix, list) {
1467 if (elt->op == OP_NONE)
1468 continue;
1469 n_preds++;
1470 }
1471
1472 return n_preds;
1473 }
1474
1475 struct check_pred_data {
1476 int count;
1477 int max;
1478 };
1479
check_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1480 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1481 int *err, void *data)
1482 {
1483 struct check_pred_data *d = data;
1484
1485 if (WARN_ON(d->count++ > d->max)) {
1486 *err = -EINVAL;
1487 return WALK_PRED_ABORT;
1488 }
1489 return WALK_PRED_DEFAULT;
1490 }
1491
1492 /*
1493 * The tree is walked at filtering of an event. If the tree is not correctly
1494 * built, it may cause an infinite loop. Check here that the tree does
1495 * indeed terminate.
1496 */
check_pred_tree(struct event_filter * filter,struct filter_pred * root)1497 static int check_pred_tree(struct event_filter *filter,
1498 struct filter_pred *root)
1499 {
1500 struct check_pred_data data = {
1501 /*
1502 * The max that we can hit a node is three times.
1503 * Once going down, once coming up from left, and
1504 * once coming up from right. This is more than enough
1505 * since leafs are only hit a single time.
1506 */
1507 .max = 3 * filter->n_preds,
1508 .count = 0,
1509 };
1510
1511 return walk_pred_tree(filter->preds, root,
1512 check_pred_tree_cb, &data);
1513 }
1514
count_leafs_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1515 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1516 int *err, void *data)
1517 {
1518 int *count = data;
1519
1520 if ((move == MOVE_DOWN) &&
1521 (pred->left == FILTER_PRED_INVALID))
1522 (*count)++;
1523
1524 return WALK_PRED_DEFAULT;
1525 }
1526
count_leafs(struct filter_pred * preds,struct filter_pred * root)1527 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1528 {
1529 int count = 0, ret;
1530
1531 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1532 WARN_ON(ret);
1533 return count;
1534 }
1535
1536 struct fold_pred_data {
1537 struct filter_pred *root;
1538 int count;
1539 int children;
1540 };
1541
fold_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1542 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1543 int *err, void *data)
1544 {
1545 struct fold_pred_data *d = data;
1546 struct filter_pred *root = d->root;
1547
1548 if (move != MOVE_DOWN)
1549 return WALK_PRED_DEFAULT;
1550 if (pred->left != FILTER_PRED_INVALID)
1551 return WALK_PRED_DEFAULT;
1552
1553 if (WARN_ON(d->count == d->children)) {
1554 *err = -EINVAL;
1555 return WALK_PRED_ABORT;
1556 }
1557
1558 pred->index &= ~FILTER_PRED_FOLD;
1559 root->ops[d->count++] = pred->index;
1560 return WALK_PRED_DEFAULT;
1561 }
1562
fold_pred(struct filter_pred * preds,struct filter_pred * root)1563 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1564 {
1565 struct fold_pred_data data = {
1566 .root = root,
1567 .count = 0,
1568 };
1569 int children;
1570
1571 /* No need to keep the fold flag */
1572 root->index &= ~FILTER_PRED_FOLD;
1573
1574 /* If the root is a leaf then do nothing */
1575 if (root->left == FILTER_PRED_INVALID)
1576 return 0;
1577
1578 /* count the children */
1579 children = count_leafs(preds, &preds[root->left]);
1580 children += count_leafs(preds, &preds[root->right]);
1581
1582 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1583 if (!root->ops)
1584 return -ENOMEM;
1585
1586 root->val = children;
1587 data.children = children;
1588 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1589 }
1590
fold_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1591 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1592 int *err, void *data)
1593 {
1594 struct filter_pred *preds = data;
1595
1596 if (move != MOVE_DOWN)
1597 return WALK_PRED_DEFAULT;
1598 if (!(pred->index & FILTER_PRED_FOLD))
1599 return WALK_PRED_DEFAULT;
1600
1601 *err = fold_pred(preds, pred);
1602 if (*err)
1603 return WALK_PRED_ABORT;
1604
1605 /* eveyrhing below is folded, continue with parent */
1606 return WALK_PRED_PARENT;
1607 }
1608
1609 /*
1610 * To optimize the processing of the ops, if we have several "ors" or
1611 * "ands" together, we can put them in an array and process them all
1612 * together speeding up the filter logic.
1613 */
fold_pred_tree(struct event_filter * filter,struct filter_pred * root)1614 static int fold_pred_tree(struct event_filter *filter,
1615 struct filter_pred *root)
1616 {
1617 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1618 filter->preds);
1619 }
1620
replace_preds(struct trace_event_call * call,struct event_filter * filter,struct filter_parse_state * ps,bool dry_run)1621 static int replace_preds(struct trace_event_call *call,
1622 struct event_filter *filter,
1623 struct filter_parse_state *ps,
1624 bool dry_run)
1625 {
1626 char *operand1 = NULL, *operand2 = NULL;
1627 struct filter_pred *pred;
1628 struct filter_pred *root;
1629 struct postfix_elt *elt;
1630 struct pred_stack stack = { }; /* init to NULL */
1631 int err;
1632 int n_preds = 0;
1633
1634 n_preds = count_preds(ps);
1635 if (n_preds >= MAX_FILTER_PRED) {
1636 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1637 return -ENOSPC;
1638 }
1639
1640 err = check_preds(ps);
1641 if (err)
1642 return err;
1643
1644 if (!dry_run) {
1645 err = __alloc_pred_stack(&stack, n_preds);
1646 if (err)
1647 return err;
1648 err = __alloc_preds(filter, n_preds);
1649 if (err)
1650 goto fail;
1651 }
1652
1653 n_preds = 0;
1654 list_for_each_entry(elt, &ps->postfix, list) {
1655 if (elt->op == OP_NONE) {
1656 if (!operand1)
1657 operand1 = elt->operand;
1658 else if (!operand2)
1659 operand2 = elt->operand;
1660 else {
1661 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1662 err = -EINVAL;
1663 goto fail;
1664 }
1665 continue;
1666 }
1667
1668 if (elt->op == OP_NOT) {
1669 if (!n_preds || operand1 || operand2) {
1670 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1671 err = -EINVAL;
1672 goto fail;
1673 }
1674 if (!dry_run)
1675 filter->preds[n_preds - 1].not ^= 1;
1676 continue;
1677 }
1678
1679 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1680 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1681 err = -ENOSPC;
1682 goto fail;
1683 }
1684
1685 pred = create_pred(ps, call, elt->op, operand1, operand2);
1686 if (!pred) {
1687 err = -EINVAL;
1688 goto fail;
1689 }
1690
1691 if (!dry_run) {
1692 err = filter_add_pred(ps, filter, pred, &stack);
1693 if (err)
1694 goto fail;
1695 }
1696
1697 operand1 = operand2 = NULL;
1698 }
1699
1700 if (!dry_run) {
1701 /* We should have one item left on the stack */
1702 pred = __pop_pred_stack(&stack);
1703 if (!pred)
1704 return -EINVAL;
1705 /* This item is where we start from in matching */
1706 root = pred;
1707 /* Make sure the stack is empty */
1708 pred = __pop_pred_stack(&stack);
1709 if (WARN_ON(pred)) {
1710 err = -EINVAL;
1711 filter->root = NULL;
1712 goto fail;
1713 }
1714 err = check_pred_tree(filter, root);
1715 if (err)
1716 goto fail;
1717
1718 /* Optimize the tree */
1719 err = fold_pred_tree(filter, root);
1720 if (err)
1721 goto fail;
1722
1723 /* We don't set root until we know it works */
1724 barrier();
1725 filter->root = root;
1726 }
1727
1728 err = 0;
1729 fail:
1730 __free_pred_stack(&stack);
1731 return err;
1732 }
1733
event_set_filtered_flag(struct trace_event_file * file)1734 static inline void event_set_filtered_flag(struct trace_event_file *file)
1735 {
1736 struct trace_event_call *call = file->event_call;
1737
1738 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1739 call->flags |= TRACE_EVENT_FL_FILTERED;
1740 else
1741 file->flags |= EVENT_FILE_FL_FILTERED;
1742 }
1743
event_set_filter(struct trace_event_file * file,struct event_filter * filter)1744 static inline void event_set_filter(struct trace_event_file *file,
1745 struct event_filter *filter)
1746 {
1747 struct trace_event_call *call = file->event_call;
1748
1749 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1750 rcu_assign_pointer(call->filter, filter);
1751 else
1752 rcu_assign_pointer(file->filter, filter);
1753 }
1754
event_clear_filter(struct trace_event_file * file)1755 static inline void event_clear_filter(struct trace_event_file *file)
1756 {
1757 struct trace_event_call *call = file->event_call;
1758
1759 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1760 RCU_INIT_POINTER(call->filter, NULL);
1761 else
1762 RCU_INIT_POINTER(file->filter, NULL);
1763 }
1764
1765 static inline void
event_set_no_set_filter_flag(struct trace_event_file * file)1766 event_set_no_set_filter_flag(struct trace_event_file *file)
1767 {
1768 struct trace_event_call *call = file->event_call;
1769
1770 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1771 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1772 else
1773 file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
1774 }
1775
1776 static inline void
event_clear_no_set_filter_flag(struct trace_event_file * file)1777 event_clear_no_set_filter_flag(struct trace_event_file *file)
1778 {
1779 struct trace_event_call *call = file->event_call;
1780
1781 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1782 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1783 else
1784 file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
1785 }
1786
1787 static inline bool
event_no_set_filter_flag(struct trace_event_file * file)1788 event_no_set_filter_flag(struct trace_event_file *file)
1789 {
1790 struct trace_event_call *call = file->event_call;
1791
1792 if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
1793 return true;
1794
1795 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1796 (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1797 return true;
1798
1799 return false;
1800 }
1801
1802 struct filter_list {
1803 struct list_head list;
1804 struct event_filter *filter;
1805 };
1806
replace_system_preds(struct trace_subsystem_dir * dir,struct trace_array * tr,struct filter_parse_state * ps,char * filter_string)1807 static int replace_system_preds(struct trace_subsystem_dir *dir,
1808 struct trace_array *tr,
1809 struct filter_parse_state *ps,
1810 char *filter_string)
1811 {
1812 struct trace_event_file *file;
1813 struct filter_list *filter_item;
1814 struct filter_list *tmp;
1815 LIST_HEAD(filter_list);
1816 bool fail = true;
1817 int err;
1818
1819 list_for_each_entry(file, &tr->events, list) {
1820 if (file->system != dir)
1821 continue;
1822
1823 /*
1824 * Try to see if the filter can be applied
1825 * (filter arg is ignored on dry_run)
1826 */
1827 err = replace_preds(file->event_call, NULL, ps, true);
1828 if (err)
1829 event_set_no_set_filter_flag(file);
1830 else
1831 event_clear_no_set_filter_flag(file);
1832 }
1833
1834 list_for_each_entry(file, &tr->events, list) {
1835 struct event_filter *filter;
1836
1837 if (file->system != dir)
1838 continue;
1839
1840 if (event_no_set_filter_flag(file))
1841 continue;
1842
1843 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1844 if (!filter_item)
1845 goto fail_mem;
1846
1847 list_add_tail(&filter_item->list, &filter_list);
1848
1849 filter_item->filter = __alloc_filter();
1850 if (!filter_item->filter)
1851 goto fail_mem;
1852 filter = filter_item->filter;
1853
1854 /* Can only fail on no memory */
1855 err = replace_filter_string(filter, filter_string);
1856 if (err)
1857 goto fail_mem;
1858
1859 err = replace_preds(file->event_call, filter, ps, false);
1860 if (err) {
1861 filter_disable(file);
1862 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1863 append_filter_err(ps, filter);
1864 } else
1865 event_set_filtered_flag(file);
1866 /*
1867 * Regardless of if this returned an error, we still
1868 * replace the filter for the call.
1869 */
1870 filter = event_filter(file);
1871 event_set_filter(file, filter_item->filter);
1872 filter_item->filter = filter;
1873
1874 fail = false;
1875 }
1876
1877 if (fail)
1878 goto fail;
1879
1880 /*
1881 * The calls can still be using the old filters.
1882 * Do a synchronize_sched() to ensure all calls are
1883 * done with them before we free them.
1884 */
1885 synchronize_sched();
1886 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1887 __free_filter(filter_item->filter);
1888 list_del(&filter_item->list);
1889 kfree(filter_item);
1890 }
1891 return 0;
1892 fail:
1893 /* No call succeeded */
1894 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1895 list_del(&filter_item->list);
1896 kfree(filter_item);
1897 }
1898 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1899 return -EINVAL;
1900 fail_mem:
1901 /* If any call succeeded, we still need to sync */
1902 if (!fail)
1903 synchronize_sched();
1904 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1905 __free_filter(filter_item->filter);
1906 list_del(&filter_item->list);
1907 kfree(filter_item);
1908 }
1909 return -ENOMEM;
1910 }
1911
create_filter_start(char * filter_str,bool set_str,struct filter_parse_state ** psp,struct event_filter ** filterp)1912 static int create_filter_start(char *filter_str, bool set_str,
1913 struct filter_parse_state **psp,
1914 struct event_filter **filterp)
1915 {
1916 struct event_filter *filter;
1917 struct filter_parse_state *ps = NULL;
1918 int err = 0;
1919
1920 WARN_ON_ONCE(*psp || *filterp);
1921
1922 /* allocate everything, and if any fails, free all and fail */
1923 filter = __alloc_filter();
1924 if (filter && set_str)
1925 err = replace_filter_string(filter, filter_str);
1926
1927 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1928
1929 if (!filter || !ps || err) {
1930 kfree(ps);
1931 __free_filter(filter);
1932 return -ENOMEM;
1933 }
1934
1935 /* we're committed to creating a new filter */
1936 *filterp = filter;
1937 *psp = ps;
1938
1939 parse_init(ps, filter_ops, filter_str);
1940 err = filter_parse(ps);
1941 if (err && set_str)
1942 append_filter_err(ps, filter);
1943 return err;
1944 }
1945
create_filter_finish(struct filter_parse_state * ps)1946 static void create_filter_finish(struct filter_parse_state *ps)
1947 {
1948 if (ps) {
1949 filter_opstack_clear(ps);
1950 postfix_clear(ps);
1951 kfree(ps);
1952 }
1953 }
1954
1955 /**
1956 * create_filter - create a filter for a trace_event_call
1957 * @call: trace_event_call to create a filter for
1958 * @filter_str: filter string
1959 * @set_str: remember @filter_str and enable detailed error in filter
1960 * @filterp: out param for created filter (always updated on return)
1961 *
1962 * Creates a filter for @call with @filter_str. If @set_str is %true,
1963 * @filter_str is copied and recorded in the new filter.
1964 *
1965 * On success, returns 0 and *@filterp points to the new filter. On
1966 * failure, returns -errno and *@filterp may point to %NULL or to a new
1967 * filter. In the latter case, the returned filter contains error
1968 * information if @set_str is %true and the caller is responsible for
1969 * freeing it.
1970 */
create_filter(struct trace_event_call * call,char * filter_str,bool set_str,struct event_filter ** filterp)1971 static int create_filter(struct trace_event_call *call,
1972 char *filter_str, bool set_str,
1973 struct event_filter **filterp)
1974 {
1975 struct event_filter *filter = NULL;
1976 struct filter_parse_state *ps = NULL;
1977 int err;
1978
1979 err = create_filter_start(filter_str, set_str, &ps, &filter);
1980 if (!err) {
1981 err = replace_preds(call, filter, ps, false);
1982 if (err && set_str)
1983 append_filter_err(ps, filter);
1984 }
1985 if (err && !set_str) {
1986 free_event_filter(filter);
1987 filter = NULL;
1988 }
1989 create_filter_finish(ps);
1990
1991 *filterp = filter;
1992 return err;
1993 }
1994
create_event_filter(struct trace_event_call * call,char * filter_str,bool set_str,struct event_filter ** filterp)1995 int create_event_filter(struct trace_event_call *call,
1996 char *filter_str, bool set_str,
1997 struct event_filter **filterp)
1998 {
1999 return create_filter(call, filter_str, set_str, filterp);
2000 }
2001
2002 /**
2003 * create_system_filter - create a filter for an event_subsystem
2004 * @system: event_subsystem to create a filter for
2005 * @filter_str: filter string
2006 * @filterp: out param for created filter (always updated on return)
2007 *
2008 * Identical to create_filter() except that it creates a subsystem filter
2009 * and always remembers @filter_str.
2010 */
create_system_filter(struct trace_subsystem_dir * dir,struct trace_array * tr,char * filter_str,struct event_filter ** filterp)2011 static int create_system_filter(struct trace_subsystem_dir *dir,
2012 struct trace_array *tr,
2013 char *filter_str, struct event_filter **filterp)
2014 {
2015 struct event_filter *filter = NULL;
2016 struct filter_parse_state *ps = NULL;
2017 int err;
2018
2019 err = create_filter_start(filter_str, true, &ps, &filter);
2020 if (!err) {
2021 err = replace_system_preds(dir, tr, ps, filter_str);
2022 if (!err) {
2023 /* System filters just show a default message */
2024 kfree(filter->filter_string);
2025 filter->filter_string = NULL;
2026 } else {
2027 append_filter_err(ps, filter);
2028 }
2029 }
2030 create_filter_finish(ps);
2031
2032 *filterp = filter;
2033 return err;
2034 }
2035
2036 /* caller must hold event_mutex */
apply_event_filter(struct trace_event_file * file,char * filter_string)2037 int apply_event_filter(struct trace_event_file *file, char *filter_string)
2038 {
2039 struct trace_event_call *call = file->event_call;
2040 struct event_filter *filter;
2041 int err;
2042
2043 if (!strcmp(strstrip(filter_string), "0")) {
2044 filter_disable(file);
2045 filter = event_filter(file);
2046
2047 if (!filter)
2048 return 0;
2049
2050 event_clear_filter(file);
2051
2052 /* Make sure the filter is not being used */
2053 synchronize_sched();
2054 __free_filter(filter);
2055
2056 return 0;
2057 }
2058
2059 err = create_filter(call, filter_string, true, &filter);
2060
2061 /*
2062 * Always swap the call filter with the new filter
2063 * even if there was an error. If there was an error
2064 * in the filter, we disable the filter and show the error
2065 * string
2066 */
2067 if (filter) {
2068 struct event_filter *tmp;
2069
2070 tmp = event_filter(file);
2071 if (!err)
2072 event_set_filtered_flag(file);
2073 else
2074 filter_disable(file);
2075
2076 event_set_filter(file, filter);
2077
2078 if (tmp) {
2079 /* Make sure the call is done with the filter */
2080 synchronize_sched();
2081 __free_filter(tmp);
2082 }
2083 }
2084
2085 return err;
2086 }
2087
apply_subsystem_event_filter(struct trace_subsystem_dir * dir,char * filter_string)2088 int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
2089 char *filter_string)
2090 {
2091 struct event_subsystem *system = dir->subsystem;
2092 struct trace_array *tr = dir->tr;
2093 struct event_filter *filter;
2094 int err = 0;
2095
2096 mutex_lock(&event_mutex);
2097
2098 /* Make sure the system still has events */
2099 if (!dir->nr_events) {
2100 err = -ENODEV;
2101 goto out_unlock;
2102 }
2103
2104 if (!strcmp(strstrip(filter_string), "0")) {
2105 filter_free_subsystem_preds(dir, tr);
2106 remove_filter_string(system->filter);
2107 filter = system->filter;
2108 system->filter = NULL;
2109 /* Ensure all filters are no longer used */
2110 synchronize_sched();
2111 filter_free_subsystem_filters(dir, tr);
2112 __free_filter(filter);
2113 goto out_unlock;
2114 }
2115
2116 err = create_system_filter(dir, tr, filter_string, &filter);
2117 if (filter) {
2118 /*
2119 * No event actually uses the system filter
2120 * we can free it without synchronize_sched().
2121 */
2122 __free_filter(system->filter);
2123 system->filter = filter;
2124 }
2125 out_unlock:
2126 mutex_unlock(&event_mutex);
2127
2128 return err;
2129 }
2130
2131 #ifdef CONFIG_PERF_EVENTS
2132
ftrace_profile_free_filter(struct perf_event * event)2133 void ftrace_profile_free_filter(struct perf_event *event)
2134 {
2135 struct event_filter *filter = event->filter;
2136
2137 event->filter = NULL;
2138 __free_filter(filter);
2139 }
2140
2141 struct function_filter_data {
2142 struct ftrace_ops *ops;
2143 int first_filter;
2144 int first_notrace;
2145 };
2146
2147 #ifdef CONFIG_FUNCTION_TRACER
2148 static char **
ftrace_function_filter_re(char * buf,int len,int * count)2149 ftrace_function_filter_re(char *buf, int len, int *count)
2150 {
2151 char *str, **re;
2152
2153 str = kstrndup(buf, len, GFP_KERNEL);
2154 if (!str)
2155 return NULL;
2156
2157 /*
2158 * The argv_split function takes white space
2159 * as a separator, so convert ',' into spaces.
2160 */
2161 strreplace(str, ',', ' ');
2162
2163 re = argv_split(GFP_KERNEL, str, count);
2164 kfree(str);
2165 return re;
2166 }
2167
ftrace_function_set_regexp(struct ftrace_ops * ops,int filter,int reset,char * re,int len)2168 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2169 int reset, char *re, int len)
2170 {
2171 int ret;
2172
2173 if (filter)
2174 ret = ftrace_set_filter(ops, re, len, reset);
2175 else
2176 ret = ftrace_set_notrace(ops, re, len, reset);
2177
2178 return ret;
2179 }
2180
__ftrace_function_set_filter(int filter,char * buf,int len,struct function_filter_data * data)2181 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2182 struct function_filter_data *data)
2183 {
2184 int i, re_cnt, ret = -EINVAL;
2185 int *reset;
2186 char **re;
2187
2188 reset = filter ? &data->first_filter : &data->first_notrace;
2189
2190 /*
2191 * The 'ip' field could have multiple filters set, separated
2192 * either by space or comma. We first cut the filter and apply
2193 * all pieces separatelly.
2194 */
2195 re = ftrace_function_filter_re(buf, len, &re_cnt);
2196 if (!re)
2197 return -EINVAL;
2198
2199 for (i = 0; i < re_cnt; i++) {
2200 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2201 re[i], strlen(re[i]));
2202 if (ret)
2203 break;
2204
2205 if (*reset)
2206 *reset = 0;
2207 }
2208
2209 argv_free(re);
2210 return ret;
2211 }
2212
ftrace_function_check_pred(struct filter_pred * pred,int leaf)2213 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2214 {
2215 struct ftrace_event_field *field = pred->field;
2216
2217 if (leaf) {
2218 /*
2219 * Check the leaf predicate for function trace, verify:
2220 * - only '==' and '!=' is used
2221 * - the 'ip' field is used
2222 */
2223 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2224 return -EINVAL;
2225
2226 if (strcmp(field->name, "ip"))
2227 return -EINVAL;
2228 } else {
2229 /*
2230 * Check the non leaf predicate for function trace, verify:
2231 * - only '||' is used
2232 */
2233 if (pred->op != OP_OR)
2234 return -EINVAL;
2235 }
2236
2237 return 0;
2238 }
2239
ftrace_function_set_filter_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2240 static int ftrace_function_set_filter_cb(enum move_type move,
2241 struct filter_pred *pred,
2242 int *err, void *data)
2243 {
2244 /* Checking the node is valid for function trace. */
2245 if ((move != MOVE_DOWN) ||
2246 (pred->left != FILTER_PRED_INVALID)) {
2247 *err = ftrace_function_check_pred(pred, 0);
2248 } else {
2249 *err = ftrace_function_check_pred(pred, 1);
2250 if (*err)
2251 return WALK_PRED_ABORT;
2252
2253 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2254 pred->regex.pattern,
2255 pred->regex.len,
2256 data);
2257 }
2258
2259 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2260 }
2261
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2262 static int ftrace_function_set_filter(struct perf_event *event,
2263 struct event_filter *filter)
2264 {
2265 struct function_filter_data data = {
2266 .first_filter = 1,
2267 .first_notrace = 1,
2268 .ops = &event->ftrace_ops,
2269 };
2270
2271 return walk_pred_tree(filter->preds, filter->root,
2272 ftrace_function_set_filter_cb, &data);
2273 }
2274 #else
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2275 static int ftrace_function_set_filter(struct perf_event *event,
2276 struct event_filter *filter)
2277 {
2278 return -ENODEV;
2279 }
2280 #endif /* CONFIG_FUNCTION_TRACER */
2281
ftrace_profile_set_filter(struct perf_event * event,int event_id,char * filter_str)2282 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2283 char *filter_str)
2284 {
2285 int err;
2286 struct event_filter *filter;
2287 struct trace_event_call *call;
2288
2289 mutex_lock(&event_mutex);
2290
2291 call = event->tp_event;
2292
2293 err = -EINVAL;
2294 if (!call)
2295 goto out_unlock;
2296
2297 err = -EEXIST;
2298 if (event->filter)
2299 goto out_unlock;
2300
2301 err = create_filter(call, filter_str, false, &filter);
2302 if (err)
2303 goto free_filter;
2304
2305 if (ftrace_event_is_function(call))
2306 err = ftrace_function_set_filter(event, filter);
2307 else
2308 event->filter = filter;
2309
2310 free_filter:
2311 if (err || ftrace_event_is_function(call))
2312 __free_filter(filter);
2313
2314 out_unlock:
2315 mutex_unlock(&event_mutex);
2316
2317 return err;
2318 }
2319
2320 #endif /* CONFIG_PERF_EVENTS */
2321
2322 #ifdef CONFIG_FTRACE_STARTUP_TEST
2323
2324 #include <linux/types.h>
2325 #include <linux/tracepoint.h>
2326
2327 #define CREATE_TRACE_POINTS
2328 #include "trace_events_filter_test.h"
2329
2330 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2331 { \
2332 .filter = FILTER, \
2333 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2334 .e = ve, .f = vf, .g = vg, .h = vh }, \
2335 .match = m, \
2336 .not_visited = nvisit, \
2337 }
2338 #define YES 1
2339 #define NO 0
2340
2341 static struct test_filter_data_t {
2342 char *filter;
2343 struct trace_event_raw_ftrace_test_filter rec;
2344 int match;
2345 char *not_visited;
2346 } test_filter_data[] = {
2347 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2348 "e == 1 && f == 1 && g == 1 && h == 1"
2349 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2350 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2351 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2352 #undef FILTER
2353 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2354 "e == 1 || f == 1 || g == 1 || h == 1"
2355 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2356 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2357 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2358 #undef FILTER
2359 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2360 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2361 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2362 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2363 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2364 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2365 #undef FILTER
2366 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2367 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2368 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2369 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2370 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2371 #undef FILTER
2372 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2373 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2374 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2375 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2376 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2377 #undef FILTER
2378 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2379 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2380 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2381 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2382 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2383 #undef FILTER
2384 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2385 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2386 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2387 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2388 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2389 #undef FILTER
2390 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2391 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2392 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2393 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2394 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2395 };
2396
2397 #undef DATA_REC
2398 #undef FILTER
2399 #undef YES
2400 #undef NO
2401
2402 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2403
2404 static int test_pred_visited;
2405
test_pred_visited_fn(struct filter_pred * pred,void * event)2406 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2407 {
2408 struct ftrace_event_field *field = pred->field;
2409
2410 test_pred_visited = 1;
2411 printk(KERN_INFO "\npred visited %s\n", field->name);
2412 return 1;
2413 }
2414
test_walk_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2415 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2416 int *err, void *data)
2417 {
2418 char *fields = data;
2419
2420 if ((move == MOVE_DOWN) &&
2421 (pred->left == FILTER_PRED_INVALID)) {
2422 struct ftrace_event_field *field = pred->field;
2423
2424 if (!field) {
2425 WARN(1, "all leafs should have field defined");
2426 return WALK_PRED_DEFAULT;
2427 }
2428 if (!strchr(fields, *field->name))
2429 return WALK_PRED_DEFAULT;
2430
2431 WARN_ON(!pred->fn);
2432 pred->fn = test_pred_visited_fn;
2433 }
2434 return WALK_PRED_DEFAULT;
2435 }
2436
ftrace_test_event_filter(void)2437 static __init int ftrace_test_event_filter(void)
2438 {
2439 int i;
2440
2441 printk(KERN_INFO "Testing ftrace filter: ");
2442
2443 for (i = 0; i < DATA_CNT; i++) {
2444 struct event_filter *filter = NULL;
2445 struct test_filter_data_t *d = &test_filter_data[i];
2446 int err;
2447
2448 err = create_filter(&event_ftrace_test_filter, d->filter,
2449 false, &filter);
2450 if (err) {
2451 printk(KERN_INFO
2452 "Failed to get filter for '%s', err %d\n",
2453 d->filter, err);
2454 __free_filter(filter);
2455 break;
2456 }
2457
2458 /*
2459 * The preemption disabling is not really needed for self
2460 * tests, but the rcu dereference will complain without it.
2461 */
2462 preempt_disable();
2463 if (*d->not_visited)
2464 walk_pred_tree(filter->preds, filter->root,
2465 test_walk_pred_cb,
2466 d->not_visited);
2467
2468 test_pred_visited = 0;
2469 err = filter_match_preds(filter, &d->rec);
2470 preempt_enable();
2471
2472 __free_filter(filter);
2473
2474 if (test_pred_visited) {
2475 printk(KERN_INFO
2476 "Failed, unwanted pred visited for filter %s\n",
2477 d->filter);
2478 break;
2479 }
2480
2481 if (err != d->match) {
2482 printk(KERN_INFO
2483 "Failed to match filter '%s', expected %d\n",
2484 d->filter, d->match);
2485 break;
2486 }
2487 }
2488
2489 if (i == DATA_CNT)
2490 printk(KERN_CONT "OK\n");
2491
2492 return 0;
2493 }
2494
2495 late_initcall(ftrace_test_event_filter);
2496
2497 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2498