1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_NONE,
48 OP_OPEN_PAREN,
49 };
50
51 struct filter_op {
52 int id;
53 char *string;
54 int precedence;
55 };
56
57 static struct filter_op filter_ops[] = {
58 { OP_OR, "||", 1 },
59 { OP_AND, "&&", 2 },
60 { OP_GLOB, "~", 4 },
61 { OP_NE, "!=", 4 },
62 { OP_EQ, "==", 4 },
63 { OP_LT, "<", 5 },
64 { OP_LE, "<=", 5 },
65 { OP_GT, ">", 5 },
66 { OP_GE, ">=", 5 },
67 { OP_NONE, "OP_NONE", 0 },
68 { OP_OPEN_PAREN, "(", 0 },
69 };
70
71 enum {
72 FILT_ERR_NONE,
73 FILT_ERR_INVALID_OP,
74 FILT_ERR_UNBALANCED_PAREN,
75 FILT_ERR_TOO_MANY_OPERANDS,
76 FILT_ERR_OPERAND_TOO_LONG,
77 FILT_ERR_FIELD_NOT_FOUND,
78 FILT_ERR_ILLEGAL_FIELD_OP,
79 FILT_ERR_ILLEGAL_INTVAL,
80 FILT_ERR_BAD_SUBSYS_FILTER,
81 FILT_ERR_TOO_MANY_PREDS,
82 FILT_ERR_MISSING_FIELD,
83 FILT_ERR_INVALID_FILTER,
84 FILT_ERR_IP_FIELD_ONLY,
85 };
86
87 static char *err_text[] = {
88 "No error",
89 "Invalid operator",
90 "Unbalanced parens",
91 "Too many operands",
92 "Operand too long",
93 "Field not found",
94 "Illegal operation for field type",
95 "Illegal integer value",
96 "Couldn't find or set field in one of a subsystem's events",
97 "Too many terms in predicate expression",
98 "Missing field name and/or value",
99 "Meaningless filter expression",
100 "Only 'ip' field is supported for function trace",
101 };
102
103 struct opstack_op {
104 int op;
105 struct list_head list;
106 };
107
108 struct postfix_elt {
109 int op;
110 char *operand;
111 struct list_head list;
112 };
113
114 struct filter_parse_state {
115 struct filter_op *ops;
116 struct list_head opstack;
117 struct list_head postfix;
118 int lasterr;
119 int lasterr_pos;
120
121 struct {
122 char *string;
123 unsigned int cnt;
124 unsigned int tail;
125 } infix;
126
127 struct {
128 char string[MAX_FILTER_STR_VAL];
129 int pos;
130 unsigned int tail;
131 } operand;
132 };
133
134 struct pred_stack {
135 struct filter_pred **preds;
136 int index;
137 };
138
139 #define DEFINE_COMPARISON_PRED(type) \
140 static int filter_pred_##type(struct filter_pred *pred, void *event) \
141 { \
142 type *addr = (type *)(event + pred->offset); \
143 type val = (type)pred->val; \
144 int match = 0; \
145 \
146 switch (pred->op) { \
147 case OP_LT: \
148 match = (*addr < val); \
149 break; \
150 case OP_LE: \
151 match = (*addr <= val); \
152 break; \
153 case OP_GT: \
154 match = (*addr > val); \
155 break; \
156 case OP_GE: \
157 match = (*addr >= val); \
158 break; \
159 default: \
160 break; \
161 } \
162 \
163 return match; \
164 }
165
166 #define DEFINE_EQUALITY_PRED(size) \
167 static int filter_pred_##size(struct filter_pred *pred, void *event) \
168 { \
169 u##size *addr = (u##size *)(event + pred->offset); \
170 u##size val = (u##size)pred->val; \
171 int match; \
172 \
173 match = (val == *addr) ^ pred->not; \
174 \
175 return match; \
176 }
177
178 DEFINE_COMPARISON_PRED(s64);
179 DEFINE_COMPARISON_PRED(u64);
180 DEFINE_COMPARISON_PRED(s32);
181 DEFINE_COMPARISON_PRED(u32);
182 DEFINE_COMPARISON_PRED(s16);
183 DEFINE_COMPARISON_PRED(u16);
184 DEFINE_COMPARISON_PRED(s8);
185 DEFINE_COMPARISON_PRED(u8);
186
187 DEFINE_EQUALITY_PRED(64);
188 DEFINE_EQUALITY_PRED(32);
189 DEFINE_EQUALITY_PRED(16);
190 DEFINE_EQUALITY_PRED(8);
191
192 /* Filter predicate for fixed sized arrays of characters */
filter_pred_string(struct filter_pred * pred,void * event)193 static int filter_pred_string(struct filter_pred *pred, void *event)
194 {
195 char *addr = (char *)(event + pred->offset);
196 int cmp, match;
197
198 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
199
200 match = cmp ^ pred->not;
201
202 return match;
203 }
204
205 /* Filter predicate for char * pointers */
filter_pred_pchar(struct filter_pred * pred,void * event)206 static int filter_pred_pchar(struct filter_pred *pred, void *event)
207 {
208 char **addr = (char **)(event + pred->offset);
209 int cmp, match;
210 int len = strlen(*addr) + 1; /* including tailing '\0' */
211
212 cmp = pred->regex.match(*addr, &pred->regex, len);
213
214 match = cmp ^ pred->not;
215
216 return match;
217 }
218
219 /*
220 * Filter predicate for dynamic sized arrays of characters.
221 * These are implemented through a list of strings at the end
222 * of the entry.
223 * Also each of these strings have a field in the entry which
224 * contains its offset from the beginning of the entry.
225 * We have then first to get this field, dereference it
226 * and add it to the address of the entry, and at last we have
227 * the address of the string.
228 */
filter_pred_strloc(struct filter_pred * pred,void * event)229 static int filter_pred_strloc(struct filter_pred *pred, void *event)
230 {
231 u32 str_item = *(u32 *)(event + pred->offset);
232 int str_loc = str_item & 0xffff;
233 int str_len = str_item >> 16;
234 char *addr = (char *)(event + str_loc);
235 int cmp, match;
236
237 cmp = pred->regex.match(addr, &pred->regex, str_len);
238
239 match = cmp ^ pred->not;
240
241 return match;
242 }
243
filter_pred_none(struct filter_pred * pred,void * event)244 static int filter_pred_none(struct filter_pred *pred, void *event)
245 {
246 return 0;
247 }
248
249 /*
250 * regex_match_foo - Basic regex callbacks
251 *
252 * @str: the string to be searched
253 * @r: the regex structure containing the pattern string
254 * @len: the length of the string to be searched (including '\0')
255 *
256 * Note:
257 * - @str might not be NULL-terminated if it's of type DYN_STRING
258 * or STATIC_STRING
259 */
260
regex_match_full(char * str,struct regex * r,int len)261 static int regex_match_full(char *str, struct regex *r, int len)
262 {
263 if (strncmp(str, r->pattern, len) == 0)
264 return 1;
265 return 0;
266 }
267
regex_match_front(char * str,struct regex * r,int len)268 static int regex_match_front(char *str, struct regex *r, int len)
269 {
270 if (strncmp(str, r->pattern, r->len) == 0)
271 return 1;
272 return 0;
273 }
274
regex_match_middle(char * str,struct regex * r,int len)275 static int regex_match_middle(char *str, struct regex *r, int len)
276 {
277 if (strnstr(str, r->pattern, len))
278 return 1;
279 return 0;
280 }
281
regex_match_end(char * str,struct regex * r,int len)282 static int regex_match_end(char *str, struct regex *r, int len)
283 {
284 int strlen = len - 1;
285
286 if (strlen >= r->len &&
287 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
288 return 1;
289 return 0;
290 }
291
292 /**
293 * filter_parse_regex - parse a basic regex
294 * @buff: the raw regex
295 * @len: length of the regex
296 * @search: will point to the beginning of the string to compare
297 * @not: tell whether the match will have to be inverted
298 *
299 * This passes in a buffer containing a regex and this function will
300 * set search to point to the search part of the buffer and
301 * return the type of search it is (see enum above).
302 * This does modify buff.
303 *
304 * Returns enum type.
305 * search returns the pointer to use for comparison.
306 * not returns 1 if buff started with a '!'
307 * 0 otherwise.
308 */
filter_parse_regex(char * buff,int len,char ** search,int * not)309 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
310 {
311 int type = MATCH_FULL;
312 int i;
313
314 if (buff[0] == '!') {
315 *not = 1;
316 buff++;
317 len--;
318 } else
319 *not = 0;
320
321 *search = buff;
322
323 for (i = 0; i < len; i++) {
324 if (buff[i] == '*') {
325 if (!i) {
326 *search = buff + 1;
327 type = MATCH_END_ONLY;
328 } else {
329 if (type == MATCH_END_ONLY)
330 type = MATCH_MIDDLE_ONLY;
331 else
332 type = MATCH_FRONT_ONLY;
333 buff[i] = 0;
334 break;
335 }
336 }
337 }
338
339 return type;
340 }
341
filter_build_regex(struct filter_pred * pred)342 static void filter_build_regex(struct filter_pred *pred)
343 {
344 struct regex *r = &pred->regex;
345 char *search;
346 enum regex_type type = MATCH_FULL;
347 int not = 0;
348
349 if (pred->op == OP_GLOB) {
350 type = filter_parse_regex(r->pattern, r->len, &search, ¬);
351 r->len = strlen(search);
352 memmove(r->pattern, search, r->len+1);
353 }
354
355 switch (type) {
356 case MATCH_FULL:
357 r->match = regex_match_full;
358 break;
359 case MATCH_FRONT_ONLY:
360 r->match = regex_match_front;
361 break;
362 case MATCH_MIDDLE_ONLY:
363 r->match = regex_match_middle;
364 break;
365 case MATCH_END_ONLY:
366 r->match = regex_match_end;
367 break;
368 }
369
370 pred->not ^= not;
371 }
372
373 enum move_type {
374 MOVE_DOWN,
375 MOVE_UP_FROM_LEFT,
376 MOVE_UP_FROM_RIGHT
377 };
378
379 static struct filter_pred *
get_pred_parent(struct filter_pred * pred,struct filter_pred * preds,int index,enum move_type * move)380 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
381 int index, enum move_type *move)
382 {
383 if (pred->parent & FILTER_PRED_IS_RIGHT)
384 *move = MOVE_UP_FROM_RIGHT;
385 else
386 *move = MOVE_UP_FROM_LEFT;
387 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
388
389 return pred;
390 }
391
392 enum walk_return {
393 WALK_PRED_ABORT,
394 WALK_PRED_PARENT,
395 WALK_PRED_DEFAULT,
396 };
397
398 typedef int (*filter_pred_walkcb_t) (enum move_type move,
399 struct filter_pred *pred,
400 int *err, void *data);
401
walk_pred_tree(struct filter_pred * preds,struct filter_pred * root,filter_pred_walkcb_t cb,void * data)402 static int walk_pred_tree(struct filter_pred *preds,
403 struct filter_pred *root,
404 filter_pred_walkcb_t cb, void *data)
405 {
406 struct filter_pred *pred = root;
407 enum move_type move = MOVE_DOWN;
408 int done = 0;
409
410 if (!preds)
411 return -EINVAL;
412
413 do {
414 int err = 0, ret;
415
416 ret = cb(move, pred, &err, data);
417 if (ret == WALK_PRED_ABORT)
418 return err;
419 if (ret == WALK_PRED_PARENT)
420 goto get_parent;
421
422 switch (move) {
423 case MOVE_DOWN:
424 if (pred->left != FILTER_PRED_INVALID) {
425 pred = &preds[pred->left];
426 continue;
427 }
428 goto get_parent;
429 case MOVE_UP_FROM_LEFT:
430 pred = &preds[pred->right];
431 move = MOVE_DOWN;
432 continue;
433 case MOVE_UP_FROM_RIGHT:
434 get_parent:
435 if (pred == root)
436 break;
437 pred = get_pred_parent(pred, preds,
438 pred->parent,
439 &move);
440 continue;
441 }
442 done = 1;
443 } while (!done);
444
445 /* We are fine. */
446 return 0;
447 }
448
449 /*
450 * A series of AND or ORs where found together. Instead of
451 * climbing up and down the tree branches, an array of the
452 * ops were made in order of checks. We can just move across
453 * the array and short circuit if needed.
454 */
process_ops(struct filter_pred * preds,struct filter_pred * op,void * rec)455 static int process_ops(struct filter_pred *preds,
456 struct filter_pred *op, void *rec)
457 {
458 struct filter_pred *pred;
459 int match = 0;
460 int type;
461 int i;
462
463 /*
464 * Micro-optimization: We set type to true if op
465 * is an OR and false otherwise (AND). Then we
466 * just need to test if the match is equal to
467 * the type, and if it is, we can short circuit the
468 * rest of the checks:
469 *
470 * if ((match && op->op == OP_OR) ||
471 * (!match && op->op == OP_AND))
472 * return match;
473 */
474 type = op->op == OP_OR;
475
476 for (i = 0; i < op->val; i++) {
477 pred = &preds[op->ops[i]];
478 if (!WARN_ON_ONCE(!pred->fn))
479 match = pred->fn(pred, rec);
480 if (!!match == type)
481 return match;
482 }
483 return match;
484 }
485
486 struct filter_match_preds_data {
487 struct filter_pred *preds;
488 int match;
489 void *rec;
490 };
491
filter_match_preds_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)492 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
493 int *err, void *data)
494 {
495 struct filter_match_preds_data *d = data;
496
497 *err = 0;
498 switch (move) {
499 case MOVE_DOWN:
500 /* only AND and OR have children */
501 if (pred->left != FILTER_PRED_INVALID) {
502 /* If ops is set, then it was folded. */
503 if (!pred->ops)
504 return WALK_PRED_DEFAULT;
505 /* We can treat folded ops as a leaf node */
506 d->match = process_ops(d->preds, pred, d->rec);
507 } else {
508 if (!WARN_ON_ONCE(!pred->fn))
509 d->match = pred->fn(pred, d->rec);
510 }
511
512 return WALK_PRED_PARENT;
513 case MOVE_UP_FROM_LEFT:
514 /*
515 * Check for short circuits.
516 *
517 * Optimization: !!match == (pred->op == OP_OR)
518 * is the same as:
519 * if ((match && pred->op == OP_OR) ||
520 * (!match && pred->op == OP_AND))
521 */
522 if (!!d->match == (pred->op == OP_OR))
523 return WALK_PRED_PARENT;
524 break;
525 case MOVE_UP_FROM_RIGHT:
526 break;
527 }
528
529 return WALK_PRED_DEFAULT;
530 }
531
532 /* return 1 if event matches, 0 otherwise (discard) */
filter_match_preds(struct event_filter * filter,void * rec)533 int filter_match_preds(struct event_filter *filter, void *rec)
534 {
535 struct filter_pred *preds;
536 struct filter_pred *root;
537 struct filter_match_preds_data data = {
538 /* match is currently meaningless */
539 .match = -1,
540 .rec = rec,
541 };
542 int n_preds, ret;
543
544 /* no filter is considered a match */
545 if (!filter)
546 return 1;
547
548 n_preds = filter->n_preds;
549 if (!n_preds)
550 return 1;
551
552 /*
553 * n_preds, root and filter->preds are protect with preemption disabled.
554 */
555 root = rcu_dereference_sched(filter->root);
556 if (!root)
557 return 1;
558
559 data.preds = preds = rcu_dereference_sched(filter->preds);
560 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
561 WARN_ON(ret);
562 return data.match;
563 }
564 EXPORT_SYMBOL_GPL(filter_match_preds);
565
parse_error(struct filter_parse_state * ps,int err,int pos)566 static void parse_error(struct filter_parse_state *ps, int err, int pos)
567 {
568 ps->lasterr = err;
569 ps->lasterr_pos = pos;
570 }
571
remove_filter_string(struct event_filter * filter)572 static void remove_filter_string(struct event_filter *filter)
573 {
574 if (!filter)
575 return;
576
577 kfree(filter->filter_string);
578 filter->filter_string = NULL;
579 }
580
replace_filter_string(struct event_filter * filter,char * filter_string)581 static int replace_filter_string(struct event_filter *filter,
582 char *filter_string)
583 {
584 kfree(filter->filter_string);
585 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
586 if (!filter->filter_string)
587 return -ENOMEM;
588
589 return 0;
590 }
591
append_filter_string(struct event_filter * filter,char * string)592 static int append_filter_string(struct event_filter *filter,
593 char *string)
594 {
595 int newlen;
596 char *new_filter_string;
597
598 BUG_ON(!filter->filter_string);
599 newlen = strlen(filter->filter_string) + strlen(string) + 1;
600 new_filter_string = kmalloc(newlen, GFP_KERNEL);
601 if (!new_filter_string)
602 return -ENOMEM;
603
604 strcpy(new_filter_string, filter->filter_string);
605 strcat(new_filter_string, string);
606 kfree(filter->filter_string);
607 filter->filter_string = new_filter_string;
608
609 return 0;
610 }
611
append_filter_err(struct filter_parse_state * ps,struct event_filter * filter)612 static void append_filter_err(struct filter_parse_state *ps,
613 struct event_filter *filter)
614 {
615 int pos = ps->lasterr_pos;
616 char *buf, *pbuf;
617
618 buf = (char *)__get_free_page(GFP_TEMPORARY);
619 if (!buf)
620 return;
621
622 append_filter_string(filter, "\n");
623 memset(buf, ' ', PAGE_SIZE);
624 if (pos > PAGE_SIZE - 128)
625 pos = 0;
626 buf[pos] = '^';
627 pbuf = &buf[pos] + 1;
628
629 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
630 append_filter_string(filter, buf);
631 free_page((unsigned long) buf);
632 }
633
print_event_filter(struct ftrace_event_call * call,struct trace_seq * s)634 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
635 {
636 struct event_filter *filter;
637
638 mutex_lock(&event_mutex);
639 filter = call->filter;
640 if (filter && filter->filter_string)
641 trace_seq_printf(s, "%s\n", filter->filter_string);
642 else
643 trace_seq_printf(s, "none\n");
644 mutex_unlock(&event_mutex);
645 }
646
print_subsystem_event_filter(struct event_subsystem * system,struct trace_seq * s)647 void print_subsystem_event_filter(struct event_subsystem *system,
648 struct trace_seq *s)
649 {
650 struct event_filter *filter;
651
652 mutex_lock(&event_mutex);
653 filter = system->filter;
654 if (filter && filter->filter_string)
655 trace_seq_printf(s, "%s\n", filter->filter_string);
656 else
657 trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
658 mutex_unlock(&event_mutex);
659 }
660
__alloc_pred_stack(struct pred_stack * stack,int n_preds)661 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
662 {
663 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
664 if (!stack->preds)
665 return -ENOMEM;
666 stack->index = n_preds;
667 return 0;
668 }
669
__free_pred_stack(struct pred_stack * stack)670 static void __free_pred_stack(struct pred_stack *stack)
671 {
672 kfree(stack->preds);
673 stack->index = 0;
674 }
675
__push_pred_stack(struct pred_stack * stack,struct filter_pred * pred)676 static int __push_pred_stack(struct pred_stack *stack,
677 struct filter_pred *pred)
678 {
679 int index = stack->index;
680
681 if (WARN_ON(index == 0))
682 return -ENOSPC;
683
684 stack->preds[--index] = pred;
685 stack->index = index;
686 return 0;
687 }
688
689 static struct filter_pred *
__pop_pred_stack(struct pred_stack * stack)690 __pop_pred_stack(struct pred_stack *stack)
691 {
692 struct filter_pred *pred;
693 int index = stack->index;
694
695 pred = stack->preds[index++];
696 if (!pred)
697 return NULL;
698
699 stack->index = index;
700 return pred;
701 }
702
filter_set_pred(struct event_filter * filter,int idx,struct pred_stack * stack,struct filter_pred * src)703 static int filter_set_pred(struct event_filter *filter,
704 int idx,
705 struct pred_stack *stack,
706 struct filter_pred *src)
707 {
708 struct filter_pred *dest = &filter->preds[idx];
709 struct filter_pred *left;
710 struct filter_pred *right;
711
712 *dest = *src;
713 dest->index = idx;
714
715 if (dest->op == OP_OR || dest->op == OP_AND) {
716 right = __pop_pred_stack(stack);
717 left = __pop_pred_stack(stack);
718 if (!left || !right)
719 return -EINVAL;
720 /*
721 * If both children can be folded
722 * and they are the same op as this op or a leaf,
723 * then this op can be folded.
724 */
725 if (left->index & FILTER_PRED_FOLD &&
726 (left->op == dest->op ||
727 left->left == FILTER_PRED_INVALID) &&
728 right->index & FILTER_PRED_FOLD &&
729 (right->op == dest->op ||
730 right->left == FILTER_PRED_INVALID))
731 dest->index |= FILTER_PRED_FOLD;
732
733 dest->left = left->index & ~FILTER_PRED_FOLD;
734 dest->right = right->index & ~FILTER_PRED_FOLD;
735 left->parent = dest->index & ~FILTER_PRED_FOLD;
736 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
737 } else {
738 /*
739 * Make dest->left invalid to be used as a quick
740 * way to know this is a leaf node.
741 */
742 dest->left = FILTER_PRED_INVALID;
743
744 /* All leafs allow folding the parent ops. */
745 dest->index |= FILTER_PRED_FOLD;
746 }
747
748 return __push_pred_stack(stack, dest);
749 }
750
__free_preds(struct event_filter * filter)751 static void __free_preds(struct event_filter *filter)
752 {
753 int i;
754
755 if (filter->preds) {
756 for (i = 0; i < filter->n_preds; i++)
757 kfree(filter->preds[i].ops);
758 kfree(filter->preds);
759 filter->preds = NULL;
760 }
761 filter->a_preds = 0;
762 filter->n_preds = 0;
763 }
764
filter_disable(struct ftrace_event_call * call)765 static void filter_disable(struct ftrace_event_call *call)
766 {
767 call->flags &= ~TRACE_EVENT_FL_FILTERED;
768 }
769
__free_filter(struct event_filter * filter)770 static void __free_filter(struct event_filter *filter)
771 {
772 if (!filter)
773 return;
774
775 __free_preds(filter);
776 kfree(filter->filter_string);
777 kfree(filter);
778 }
779
780 /*
781 * Called when destroying the ftrace_event_call.
782 * The call is being freed, so we do not need to worry about
783 * the call being currently used. This is for module code removing
784 * the tracepoints from within it.
785 */
destroy_preds(struct ftrace_event_call * call)786 void destroy_preds(struct ftrace_event_call *call)
787 {
788 __free_filter(call->filter);
789 call->filter = NULL;
790 }
791
__alloc_filter(void)792 static struct event_filter *__alloc_filter(void)
793 {
794 struct event_filter *filter;
795
796 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
797 return filter;
798 }
799
__alloc_preds(struct event_filter * filter,int n_preds)800 static int __alloc_preds(struct event_filter *filter, int n_preds)
801 {
802 struct filter_pred *pred;
803 int i;
804
805 if (filter->preds)
806 __free_preds(filter);
807
808 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
809
810 if (!filter->preds)
811 return -ENOMEM;
812
813 filter->a_preds = n_preds;
814 filter->n_preds = 0;
815
816 for (i = 0; i < n_preds; i++) {
817 pred = &filter->preds[i];
818 pred->fn = filter_pred_none;
819 }
820
821 return 0;
822 }
823
filter_free_subsystem_preds(struct event_subsystem * system)824 static void filter_free_subsystem_preds(struct event_subsystem *system)
825 {
826 struct ftrace_event_call *call;
827
828 list_for_each_entry(call, &ftrace_events, list) {
829 if (strcmp(call->class->system, system->name) != 0)
830 continue;
831
832 filter_disable(call);
833 remove_filter_string(call->filter);
834 }
835 }
836
filter_free_subsystem_filters(struct event_subsystem * system)837 static void filter_free_subsystem_filters(struct event_subsystem *system)
838 {
839 struct ftrace_event_call *call;
840
841 list_for_each_entry(call, &ftrace_events, list) {
842 if (strcmp(call->class->system, system->name) != 0)
843 continue;
844 __free_filter(call->filter);
845 call->filter = NULL;
846 }
847 }
848
filter_add_pred(struct filter_parse_state * ps,struct event_filter * filter,struct filter_pred * pred,struct pred_stack * stack)849 static int filter_add_pred(struct filter_parse_state *ps,
850 struct event_filter *filter,
851 struct filter_pred *pred,
852 struct pred_stack *stack)
853 {
854 int err;
855
856 if (WARN_ON(filter->n_preds == filter->a_preds)) {
857 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
858 return -ENOSPC;
859 }
860
861 err = filter_set_pred(filter, filter->n_preds, stack, pred);
862 if (err)
863 return err;
864
865 filter->n_preds++;
866
867 return 0;
868 }
869
filter_assign_type(const char * type)870 int filter_assign_type(const char *type)
871 {
872 if (strstr(type, "__data_loc") && strstr(type, "char"))
873 return FILTER_DYN_STRING;
874
875 if (strchr(type, '[') && strstr(type, "char"))
876 return FILTER_STATIC_STRING;
877
878 return FILTER_OTHER;
879 }
880
is_function_field(struct ftrace_event_field * field)881 static bool is_function_field(struct ftrace_event_field *field)
882 {
883 return field->filter_type == FILTER_TRACE_FN;
884 }
885
is_string_field(struct ftrace_event_field * field)886 static bool is_string_field(struct ftrace_event_field *field)
887 {
888 return field->filter_type == FILTER_DYN_STRING ||
889 field->filter_type == FILTER_STATIC_STRING ||
890 field->filter_type == FILTER_PTR_STRING;
891 }
892
is_legal_op(struct ftrace_event_field * field,int op)893 static int is_legal_op(struct ftrace_event_field *field, int op)
894 {
895 if (is_string_field(field) &&
896 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
897 return 0;
898 if (!is_string_field(field) && op == OP_GLOB)
899 return 0;
900
901 return 1;
902 }
903
select_comparison_fn(int op,int field_size,int field_is_signed)904 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
905 int field_is_signed)
906 {
907 filter_pred_fn_t fn = NULL;
908
909 switch (field_size) {
910 case 8:
911 if (op == OP_EQ || op == OP_NE)
912 fn = filter_pred_64;
913 else if (field_is_signed)
914 fn = filter_pred_s64;
915 else
916 fn = filter_pred_u64;
917 break;
918 case 4:
919 if (op == OP_EQ || op == OP_NE)
920 fn = filter_pred_32;
921 else if (field_is_signed)
922 fn = filter_pred_s32;
923 else
924 fn = filter_pred_u32;
925 break;
926 case 2:
927 if (op == OP_EQ || op == OP_NE)
928 fn = filter_pred_16;
929 else if (field_is_signed)
930 fn = filter_pred_s16;
931 else
932 fn = filter_pred_u16;
933 break;
934 case 1:
935 if (op == OP_EQ || op == OP_NE)
936 fn = filter_pred_8;
937 else if (field_is_signed)
938 fn = filter_pred_s8;
939 else
940 fn = filter_pred_u8;
941 break;
942 }
943
944 return fn;
945 }
946
init_pred(struct filter_parse_state * ps,struct ftrace_event_field * field,struct filter_pred * pred)947 static int init_pred(struct filter_parse_state *ps,
948 struct ftrace_event_field *field,
949 struct filter_pred *pred)
950
951 {
952 filter_pred_fn_t fn = filter_pred_none;
953 unsigned long long val;
954 int ret;
955
956 pred->offset = field->offset;
957
958 if (!is_legal_op(field, pred->op)) {
959 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
960 return -EINVAL;
961 }
962
963 if (is_string_field(field)) {
964 filter_build_regex(pred);
965
966 if (field->filter_type == FILTER_STATIC_STRING) {
967 fn = filter_pred_string;
968 pred->regex.field_len = field->size;
969 } else if (field->filter_type == FILTER_DYN_STRING)
970 fn = filter_pred_strloc;
971 else
972 fn = filter_pred_pchar;
973 } else if (is_function_field(field)) {
974 if (strcmp(field->name, "ip")) {
975 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
976 return -EINVAL;
977 }
978 } else {
979 if (field->is_signed)
980 ret = kstrtoll(pred->regex.pattern, 0, &val);
981 else
982 ret = kstrtoull(pred->regex.pattern, 0, &val);
983 if (ret) {
984 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
985 return -EINVAL;
986 }
987 pred->val = val;
988
989 fn = select_comparison_fn(pred->op, field->size,
990 field->is_signed);
991 if (!fn) {
992 parse_error(ps, FILT_ERR_INVALID_OP, 0);
993 return -EINVAL;
994 }
995 }
996
997 if (pred->op == OP_NE)
998 pred->not = 1;
999
1000 pred->fn = fn;
1001 return 0;
1002 }
1003
parse_init(struct filter_parse_state * ps,struct filter_op * ops,char * infix_string)1004 static void parse_init(struct filter_parse_state *ps,
1005 struct filter_op *ops,
1006 char *infix_string)
1007 {
1008 memset(ps, '\0', sizeof(*ps));
1009
1010 ps->infix.string = infix_string;
1011 ps->infix.cnt = strlen(infix_string);
1012 ps->ops = ops;
1013
1014 INIT_LIST_HEAD(&ps->opstack);
1015 INIT_LIST_HEAD(&ps->postfix);
1016 }
1017
infix_next(struct filter_parse_state * ps)1018 static char infix_next(struct filter_parse_state *ps)
1019 {
1020 ps->infix.cnt--;
1021
1022 return ps->infix.string[ps->infix.tail++];
1023 }
1024
infix_peek(struct filter_parse_state * ps)1025 static char infix_peek(struct filter_parse_state *ps)
1026 {
1027 if (ps->infix.tail == strlen(ps->infix.string))
1028 return 0;
1029
1030 return ps->infix.string[ps->infix.tail];
1031 }
1032
infix_advance(struct filter_parse_state * ps)1033 static void infix_advance(struct filter_parse_state *ps)
1034 {
1035 ps->infix.cnt--;
1036 ps->infix.tail++;
1037 }
1038
is_precedence_lower(struct filter_parse_state * ps,int a,int b)1039 static inline int is_precedence_lower(struct filter_parse_state *ps,
1040 int a, int b)
1041 {
1042 return ps->ops[a].precedence < ps->ops[b].precedence;
1043 }
1044
is_op_char(struct filter_parse_state * ps,char c)1045 static inline int is_op_char(struct filter_parse_state *ps, char c)
1046 {
1047 int i;
1048
1049 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1050 if (ps->ops[i].string[0] == c)
1051 return 1;
1052 }
1053
1054 return 0;
1055 }
1056
infix_get_op(struct filter_parse_state * ps,char firstc)1057 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1058 {
1059 char nextc = infix_peek(ps);
1060 char opstr[3];
1061 int i;
1062
1063 opstr[0] = firstc;
1064 opstr[1] = nextc;
1065 opstr[2] = '\0';
1066
1067 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1068 if (!strcmp(opstr, ps->ops[i].string)) {
1069 infix_advance(ps);
1070 return ps->ops[i].id;
1071 }
1072 }
1073
1074 opstr[1] = '\0';
1075
1076 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1077 if (!strcmp(opstr, ps->ops[i].string))
1078 return ps->ops[i].id;
1079 }
1080
1081 return OP_NONE;
1082 }
1083
clear_operand_string(struct filter_parse_state * ps)1084 static inline void clear_operand_string(struct filter_parse_state *ps)
1085 {
1086 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1087 ps->operand.tail = 0;
1088 }
1089
append_operand_char(struct filter_parse_state * ps,char c)1090 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1091 {
1092 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1093 return -EINVAL;
1094
1095 ps->operand.string[ps->operand.tail++] = c;
1096
1097 return 0;
1098 }
1099
filter_opstack_push(struct filter_parse_state * ps,int op)1100 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1101 {
1102 struct opstack_op *opstack_op;
1103
1104 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1105 if (!opstack_op)
1106 return -ENOMEM;
1107
1108 opstack_op->op = op;
1109 list_add(&opstack_op->list, &ps->opstack);
1110
1111 return 0;
1112 }
1113
filter_opstack_empty(struct filter_parse_state * ps)1114 static int filter_opstack_empty(struct filter_parse_state *ps)
1115 {
1116 return list_empty(&ps->opstack);
1117 }
1118
filter_opstack_top(struct filter_parse_state * ps)1119 static int filter_opstack_top(struct filter_parse_state *ps)
1120 {
1121 struct opstack_op *opstack_op;
1122
1123 if (filter_opstack_empty(ps))
1124 return OP_NONE;
1125
1126 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1127
1128 return opstack_op->op;
1129 }
1130
filter_opstack_pop(struct filter_parse_state * ps)1131 static int filter_opstack_pop(struct filter_parse_state *ps)
1132 {
1133 struct opstack_op *opstack_op;
1134 int op;
1135
1136 if (filter_opstack_empty(ps))
1137 return OP_NONE;
1138
1139 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1140 op = opstack_op->op;
1141 list_del(&opstack_op->list);
1142
1143 kfree(opstack_op);
1144
1145 return op;
1146 }
1147
filter_opstack_clear(struct filter_parse_state * ps)1148 static void filter_opstack_clear(struct filter_parse_state *ps)
1149 {
1150 while (!filter_opstack_empty(ps))
1151 filter_opstack_pop(ps);
1152 }
1153
curr_operand(struct filter_parse_state * ps)1154 static char *curr_operand(struct filter_parse_state *ps)
1155 {
1156 return ps->operand.string;
1157 }
1158
postfix_append_operand(struct filter_parse_state * ps,char * operand)1159 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1160 {
1161 struct postfix_elt *elt;
1162
1163 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1164 if (!elt)
1165 return -ENOMEM;
1166
1167 elt->op = OP_NONE;
1168 elt->operand = kstrdup(operand, GFP_KERNEL);
1169 if (!elt->operand) {
1170 kfree(elt);
1171 return -ENOMEM;
1172 }
1173
1174 list_add_tail(&elt->list, &ps->postfix);
1175
1176 return 0;
1177 }
1178
postfix_append_op(struct filter_parse_state * ps,int op)1179 static int postfix_append_op(struct filter_parse_state *ps, int op)
1180 {
1181 struct postfix_elt *elt;
1182
1183 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1184 if (!elt)
1185 return -ENOMEM;
1186
1187 elt->op = op;
1188 elt->operand = NULL;
1189
1190 list_add_tail(&elt->list, &ps->postfix);
1191
1192 return 0;
1193 }
1194
postfix_clear(struct filter_parse_state * ps)1195 static void postfix_clear(struct filter_parse_state *ps)
1196 {
1197 struct postfix_elt *elt;
1198
1199 while (!list_empty(&ps->postfix)) {
1200 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1201 list_del(&elt->list);
1202 kfree(elt->operand);
1203 kfree(elt);
1204 }
1205 }
1206
filter_parse(struct filter_parse_state * ps)1207 static int filter_parse(struct filter_parse_state *ps)
1208 {
1209 int in_string = 0;
1210 int op, top_op;
1211 char ch;
1212
1213 while ((ch = infix_next(ps))) {
1214 if (ch == '"') {
1215 in_string ^= 1;
1216 continue;
1217 }
1218
1219 if (in_string)
1220 goto parse_operand;
1221
1222 if (isspace(ch))
1223 continue;
1224
1225 if (is_op_char(ps, ch)) {
1226 op = infix_get_op(ps, ch);
1227 if (op == OP_NONE) {
1228 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1229 return -EINVAL;
1230 }
1231
1232 if (strlen(curr_operand(ps))) {
1233 postfix_append_operand(ps, curr_operand(ps));
1234 clear_operand_string(ps);
1235 }
1236
1237 while (!filter_opstack_empty(ps)) {
1238 top_op = filter_opstack_top(ps);
1239 if (!is_precedence_lower(ps, top_op, op)) {
1240 top_op = filter_opstack_pop(ps);
1241 postfix_append_op(ps, top_op);
1242 continue;
1243 }
1244 break;
1245 }
1246
1247 filter_opstack_push(ps, op);
1248 continue;
1249 }
1250
1251 if (ch == '(') {
1252 filter_opstack_push(ps, OP_OPEN_PAREN);
1253 continue;
1254 }
1255
1256 if (ch == ')') {
1257 if (strlen(curr_operand(ps))) {
1258 postfix_append_operand(ps, curr_operand(ps));
1259 clear_operand_string(ps);
1260 }
1261
1262 top_op = filter_opstack_pop(ps);
1263 while (top_op != OP_NONE) {
1264 if (top_op == OP_OPEN_PAREN)
1265 break;
1266 postfix_append_op(ps, top_op);
1267 top_op = filter_opstack_pop(ps);
1268 }
1269 if (top_op == OP_NONE) {
1270 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1271 return -EINVAL;
1272 }
1273 continue;
1274 }
1275 parse_operand:
1276 if (append_operand_char(ps, ch)) {
1277 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1278 return -EINVAL;
1279 }
1280 }
1281
1282 if (strlen(curr_operand(ps)))
1283 postfix_append_operand(ps, curr_operand(ps));
1284
1285 while (!filter_opstack_empty(ps)) {
1286 top_op = filter_opstack_pop(ps);
1287 if (top_op == OP_NONE)
1288 break;
1289 if (top_op == OP_OPEN_PAREN) {
1290 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1291 return -EINVAL;
1292 }
1293 postfix_append_op(ps, top_op);
1294 }
1295
1296 return 0;
1297 }
1298
create_pred(struct filter_parse_state * ps,struct ftrace_event_call * call,int op,char * operand1,char * operand2)1299 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1300 struct ftrace_event_call *call,
1301 int op, char *operand1, char *operand2)
1302 {
1303 struct ftrace_event_field *field;
1304 static struct filter_pred pred;
1305
1306 memset(&pred, 0, sizeof(pred));
1307 pred.op = op;
1308
1309 if (op == OP_AND || op == OP_OR)
1310 return &pred;
1311
1312 if (!operand1 || !operand2) {
1313 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1314 return NULL;
1315 }
1316
1317 field = trace_find_event_field(call, operand1);
1318 if (!field) {
1319 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1320 return NULL;
1321 }
1322
1323 strcpy(pred.regex.pattern, operand2);
1324 pred.regex.len = strlen(pred.regex.pattern);
1325 pred.field = field;
1326 return init_pred(ps, field, &pred) ? NULL : &pred;
1327 }
1328
check_preds(struct filter_parse_state * ps)1329 static int check_preds(struct filter_parse_state *ps)
1330 {
1331 int n_normal_preds = 0, n_logical_preds = 0;
1332 struct postfix_elt *elt;
1333
1334 list_for_each_entry(elt, &ps->postfix, list) {
1335 if (elt->op == OP_NONE)
1336 continue;
1337
1338 if (elt->op == OP_AND || elt->op == OP_OR) {
1339 n_logical_preds++;
1340 continue;
1341 }
1342 n_normal_preds++;
1343 }
1344
1345 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1346 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1347 return -EINVAL;
1348 }
1349
1350 return 0;
1351 }
1352
count_preds(struct filter_parse_state * ps)1353 static int count_preds(struct filter_parse_state *ps)
1354 {
1355 struct postfix_elt *elt;
1356 int n_preds = 0;
1357
1358 list_for_each_entry(elt, &ps->postfix, list) {
1359 if (elt->op == OP_NONE)
1360 continue;
1361 n_preds++;
1362 }
1363
1364 return n_preds;
1365 }
1366
1367 struct check_pred_data {
1368 int count;
1369 int max;
1370 };
1371
check_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1372 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1373 int *err, void *data)
1374 {
1375 struct check_pred_data *d = data;
1376
1377 if (WARN_ON(d->count++ > d->max)) {
1378 *err = -EINVAL;
1379 return WALK_PRED_ABORT;
1380 }
1381 return WALK_PRED_DEFAULT;
1382 }
1383
1384 /*
1385 * The tree is walked at filtering of an event. If the tree is not correctly
1386 * built, it may cause an infinite loop. Check here that the tree does
1387 * indeed terminate.
1388 */
check_pred_tree(struct event_filter * filter,struct filter_pred * root)1389 static int check_pred_tree(struct event_filter *filter,
1390 struct filter_pred *root)
1391 {
1392 struct check_pred_data data = {
1393 /*
1394 * The max that we can hit a node is three times.
1395 * Once going down, once coming up from left, and
1396 * once coming up from right. This is more than enough
1397 * since leafs are only hit a single time.
1398 */
1399 .max = 3 * filter->n_preds,
1400 .count = 0,
1401 };
1402
1403 return walk_pred_tree(filter->preds, root,
1404 check_pred_tree_cb, &data);
1405 }
1406
count_leafs_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1407 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1408 int *err, void *data)
1409 {
1410 int *count = data;
1411
1412 if ((move == MOVE_DOWN) &&
1413 (pred->left == FILTER_PRED_INVALID))
1414 (*count)++;
1415
1416 return WALK_PRED_DEFAULT;
1417 }
1418
count_leafs(struct filter_pred * preds,struct filter_pred * root)1419 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1420 {
1421 int count = 0, ret;
1422
1423 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1424 WARN_ON(ret);
1425 return count;
1426 }
1427
1428 struct fold_pred_data {
1429 struct filter_pred *root;
1430 int count;
1431 int children;
1432 };
1433
fold_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1434 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1435 int *err, void *data)
1436 {
1437 struct fold_pred_data *d = data;
1438 struct filter_pred *root = d->root;
1439
1440 if (move != MOVE_DOWN)
1441 return WALK_PRED_DEFAULT;
1442 if (pred->left != FILTER_PRED_INVALID)
1443 return WALK_PRED_DEFAULT;
1444
1445 if (WARN_ON(d->count == d->children)) {
1446 *err = -EINVAL;
1447 return WALK_PRED_ABORT;
1448 }
1449
1450 pred->index &= ~FILTER_PRED_FOLD;
1451 root->ops[d->count++] = pred->index;
1452 return WALK_PRED_DEFAULT;
1453 }
1454
fold_pred(struct filter_pred * preds,struct filter_pred * root)1455 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1456 {
1457 struct fold_pred_data data = {
1458 .root = root,
1459 .count = 0,
1460 };
1461 int children;
1462
1463 /* No need to keep the fold flag */
1464 root->index &= ~FILTER_PRED_FOLD;
1465
1466 /* If the root is a leaf then do nothing */
1467 if (root->left == FILTER_PRED_INVALID)
1468 return 0;
1469
1470 /* count the children */
1471 children = count_leafs(preds, &preds[root->left]);
1472 children += count_leafs(preds, &preds[root->right]);
1473
1474 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1475 if (!root->ops)
1476 return -ENOMEM;
1477
1478 root->val = children;
1479 data.children = children;
1480 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1481 }
1482
fold_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1483 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1484 int *err, void *data)
1485 {
1486 struct filter_pred *preds = data;
1487
1488 if (move != MOVE_DOWN)
1489 return WALK_PRED_DEFAULT;
1490 if (!(pred->index & FILTER_PRED_FOLD))
1491 return WALK_PRED_DEFAULT;
1492
1493 *err = fold_pred(preds, pred);
1494 if (*err)
1495 return WALK_PRED_ABORT;
1496
1497 /* eveyrhing below is folded, continue with parent */
1498 return WALK_PRED_PARENT;
1499 }
1500
1501 /*
1502 * To optimize the processing of the ops, if we have several "ors" or
1503 * "ands" together, we can put them in an array and process them all
1504 * together speeding up the filter logic.
1505 */
fold_pred_tree(struct event_filter * filter,struct filter_pred * root)1506 static int fold_pred_tree(struct event_filter *filter,
1507 struct filter_pred *root)
1508 {
1509 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1510 filter->preds);
1511 }
1512
replace_preds(struct ftrace_event_call * call,struct event_filter * filter,struct filter_parse_state * ps,char * filter_string,bool dry_run)1513 static int replace_preds(struct ftrace_event_call *call,
1514 struct event_filter *filter,
1515 struct filter_parse_state *ps,
1516 char *filter_string,
1517 bool dry_run)
1518 {
1519 char *operand1 = NULL, *operand2 = NULL;
1520 struct filter_pred *pred;
1521 struct filter_pred *root;
1522 struct postfix_elt *elt;
1523 struct pred_stack stack = { }; /* init to NULL */
1524 int err;
1525 int n_preds = 0;
1526
1527 n_preds = count_preds(ps);
1528 if (n_preds >= MAX_FILTER_PRED) {
1529 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1530 return -ENOSPC;
1531 }
1532
1533 err = check_preds(ps);
1534 if (err)
1535 return err;
1536
1537 if (!dry_run) {
1538 err = __alloc_pred_stack(&stack, n_preds);
1539 if (err)
1540 return err;
1541 err = __alloc_preds(filter, n_preds);
1542 if (err)
1543 goto fail;
1544 }
1545
1546 n_preds = 0;
1547 list_for_each_entry(elt, &ps->postfix, list) {
1548 if (elt->op == OP_NONE) {
1549 if (!operand1)
1550 operand1 = elt->operand;
1551 else if (!operand2)
1552 operand2 = elt->operand;
1553 else {
1554 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1555 err = -EINVAL;
1556 goto fail;
1557 }
1558 continue;
1559 }
1560
1561 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1562 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1563 err = -ENOSPC;
1564 goto fail;
1565 }
1566
1567 pred = create_pred(ps, call, elt->op, operand1, operand2);
1568 if (!pred) {
1569 err = -EINVAL;
1570 goto fail;
1571 }
1572
1573 if (!dry_run) {
1574 err = filter_add_pred(ps, filter, pred, &stack);
1575 if (err)
1576 goto fail;
1577 }
1578
1579 operand1 = operand2 = NULL;
1580 }
1581
1582 if (!dry_run) {
1583 /* We should have one item left on the stack */
1584 pred = __pop_pred_stack(&stack);
1585 if (!pred)
1586 return -EINVAL;
1587 /* This item is where we start from in matching */
1588 root = pred;
1589 /* Make sure the stack is empty */
1590 pred = __pop_pred_stack(&stack);
1591 if (WARN_ON(pred)) {
1592 err = -EINVAL;
1593 filter->root = NULL;
1594 goto fail;
1595 }
1596 err = check_pred_tree(filter, root);
1597 if (err)
1598 goto fail;
1599
1600 /* Optimize the tree */
1601 err = fold_pred_tree(filter, root);
1602 if (err)
1603 goto fail;
1604
1605 /* We don't set root until we know it works */
1606 barrier();
1607 filter->root = root;
1608 }
1609
1610 err = 0;
1611 fail:
1612 __free_pred_stack(&stack);
1613 return err;
1614 }
1615
1616 struct filter_list {
1617 struct list_head list;
1618 struct event_filter *filter;
1619 };
1620
replace_system_preds(struct event_subsystem * system,struct filter_parse_state * ps,char * filter_string)1621 static int replace_system_preds(struct event_subsystem *system,
1622 struct filter_parse_state *ps,
1623 char *filter_string)
1624 {
1625 struct ftrace_event_call *call;
1626 struct filter_list *filter_item;
1627 struct filter_list *tmp;
1628 LIST_HEAD(filter_list);
1629 bool fail = true;
1630 int err;
1631
1632 list_for_each_entry(call, &ftrace_events, list) {
1633
1634 if (strcmp(call->class->system, system->name) != 0)
1635 continue;
1636
1637 /*
1638 * Try to see if the filter can be applied
1639 * (filter arg is ignored on dry_run)
1640 */
1641 err = replace_preds(call, NULL, ps, filter_string, true);
1642 if (err)
1643 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1644 else
1645 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1646 }
1647
1648 list_for_each_entry(call, &ftrace_events, list) {
1649 struct event_filter *filter;
1650
1651 if (strcmp(call->class->system, system->name) != 0)
1652 continue;
1653
1654 if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1655 continue;
1656
1657 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1658 if (!filter_item)
1659 goto fail_mem;
1660
1661 list_add_tail(&filter_item->list, &filter_list);
1662
1663 filter_item->filter = __alloc_filter();
1664 if (!filter_item->filter)
1665 goto fail_mem;
1666 filter = filter_item->filter;
1667
1668 /* Can only fail on no memory */
1669 err = replace_filter_string(filter, filter_string);
1670 if (err)
1671 goto fail_mem;
1672
1673 err = replace_preds(call, filter, ps, filter_string, false);
1674 if (err) {
1675 filter_disable(call);
1676 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1677 append_filter_err(ps, filter);
1678 } else
1679 call->flags |= TRACE_EVENT_FL_FILTERED;
1680 /*
1681 * Regardless of if this returned an error, we still
1682 * replace the filter for the call.
1683 */
1684 filter = call->filter;
1685 rcu_assign_pointer(call->filter, filter_item->filter);
1686 filter_item->filter = filter;
1687
1688 fail = false;
1689 }
1690
1691 if (fail)
1692 goto fail;
1693
1694 /*
1695 * The calls can still be using the old filters.
1696 * Do a synchronize_sched() to ensure all calls are
1697 * done with them before we free them.
1698 */
1699 synchronize_sched();
1700 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1701 __free_filter(filter_item->filter);
1702 list_del(&filter_item->list);
1703 kfree(filter_item);
1704 }
1705 return 0;
1706 fail:
1707 /* No call succeeded */
1708 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1709 list_del(&filter_item->list);
1710 kfree(filter_item);
1711 }
1712 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1713 return -EINVAL;
1714 fail_mem:
1715 /* If any call succeeded, we still need to sync */
1716 if (!fail)
1717 synchronize_sched();
1718 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1719 __free_filter(filter_item->filter);
1720 list_del(&filter_item->list);
1721 kfree(filter_item);
1722 }
1723 return -ENOMEM;
1724 }
1725
create_filter_start(char * filter_str,bool set_str,struct filter_parse_state ** psp,struct event_filter ** filterp)1726 static int create_filter_start(char *filter_str, bool set_str,
1727 struct filter_parse_state **psp,
1728 struct event_filter **filterp)
1729 {
1730 struct event_filter *filter;
1731 struct filter_parse_state *ps = NULL;
1732 int err = 0;
1733
1734 WARN_ON_ONCE(*psp || *filterp);
1735
1736 /* allocate everything, and if any fails, free all and fail */
1737 filter = __alloc_filter();
1738 if (filter && set_str)
1739 err = replace_filter_string(filter, filter_str);
1740
1741 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1742
1743 if (!filter || !ps || err) {
1744 kfree(ps);
1745 __free_filter(filter);
1746 return -ENOMEM;
1747 }
1748
1749 /* we're committed to creating a new filter */
1750 *filterp = filter;
1751 *psp = ps;
1752
1753 parse_init(ps, filter_ops, filter_str);
1754 err = filter_parse(ps);
1755 if (err && set_str)
1756 append_filter_err(ps, filter);
1757 return err;
1758 }
1759
create_filter_finish(struct filter_parse_state * ps)1760 static void create_filter_finish(struct filter_parse_state *ps)
1761 {
1762 if (ps) {
1763 filter_opstack_clear(ps);
1764 postfix_clear(ps);
1765 kfree(ps);
1766 }
1767 }
1768
1769 /**
1770 * create_filter - create a filter for a ftrace_event_call
1771 * @call: ftrace_event_call to create a filter for
1772 * @filter_str: filter string
1773 * @set_str: remember @filter_str and enable detailed error in filter
1774 * @filterp: out param for created filter (always updated on return)
1775 *
1776 * Creates a filter for @call with @filter_str. If @set_str is %true,
1777 * @filter_str is copied and recorded in the new filter.
1778 *
1779 * On success, returns 0 and *@filterp points to the new filter. On
1780 * failure, returns -errno and *@filterp may point to %NULL or to a new
1781 * filter. In the latter case, the returned filter contains error
1782 * information if @set_str is %true and the caller is responsible for
1783 * freeing it.
1784 */
create_filter(struct ftrace_event_call * call,char * filter_str,bool set_str,struct event_filter ** filterp)1785 static int create_filter(struct ftrace_event_call *call,
1786 char *filter_str, bool set_str,
1787 struct event_filter **filterp)
1788 {
1789 struct event_filter *filter = NULL;
1790 struct filter_parse_state *ps = NULL;
1791 int err;
1792
1793 err = create_filter_start(filter_str, set_str, &ps, &filter);
1794 if (!err) {
1795 err = replace_preds(call, filter, ps, filter_str, false);
1796 if (err && set_str)
1797 append_filter_err(ps, filter);
1798 }
1799 create_filter_finish(ps);
1800
1801 *filterp = filter;
1802 return err;
1803 }
1804
1805 /**
1806 * create_system_filter - create a filter for an event_subsystem
1807 * @system: event_subsystem to create a filter for
1808 * @filter_str: filter string
1809 * @filterp: out param for created filter (always updated on return)
1810 *
1811 * Identical to create_filter() except that it creates a subsystem filter
1812 * and always remembers @filter_str.
1813 */
create_system_filter(struct event_subsystem * system,char * filter_str,struct event_filter ** filterp)1814 static int create_system_filter(struct event_subsystem *system,
1815 char *filter_str, struct event_filter **filterp)
1816 {
1817 struct event_filter *filter = NULL;
1818 struct filter_parse_state *ps = NULL;
1819 int err;
1820
1821 err = create_filter_start(filter_str, true, &ps, &filter);
1822 if (!err) {
1823 err = replace_system_preds(system, ps, filter_str);
1824 if (!err) {
1825 /* System filters just show a default message */
1826 kfree(filter->filter_string);
1827 filter->filter_string = NULL;
1828 } else {
1829 append_filter_err(ps, filter);
1830 }
1831 }
1832 create_filter_finish(ps);
1833
1834 *filterp = filter;
1835 return err;
1836 }
1837
apply_event_filter(struct ftrace_event_call * call,char * filter_string)1838 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1839 {
1840 struct event_filter *filter;
1841 int err = 0;
1842
1843 mutex_lock(&event_mutex);
1844
1845 if (!strcmp(strstrip(filter_string), "0")) {
1846 filter_disable(call);
1847 filter = call->filter;
1848 if (!filter)
1849 goto out_unlock;
1850 RCU_INIT_POINTER(call->filter, NULL);
1851 /* Make sure the filter is not being used */
1852 synchronize_sched();
1853 __free_filter(filter);
1854 goto out_unlock;
1855 }
1856
1857 err = create_filter(call, filter_string, true, &filter);
1858
1859 /*
1860 * Always swap the call filter with the new filter
1861 * even if there was an error. If there was an error
1862 * in the filter, we disable the filter and show the error
1863 * string
1864 */
1865 if (filter) {
1866 struct event_filter *tmp = call->filter;
1867
1868 if (!err)
1869 call->flags |= TRACE_EVENT_FL_FILTERED;
1870 else
1871 filter_disable(call);
1872
1873 rcu_assign_pointer(call->filter, filter);
1874
1875 if (tmp) {
1876 /* Make sure the call is done with the filter */
1877 synchronize_sched();
1878 __free_filter(tmp);
1879 }
1880 }
1881 out_unlock:
1882 mutex_unlock(&event_mutex);
1883
1884 return err;
1885 }
1886
apply_subsystem_event_filter(struct ftrace_subsystem_dir * dir,char * filter_string)1887 int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1888 char *filter_string)
1889 {
1890 struct event_subsystem *system = dir->subsystem;
1891 struct event_filter *filter;
1892 int err = 0;
1893
1894 mutex_lock(&event_mutex);
1895
1896 /* Make sure the system still has events */
1897 if (!dir->nr_events) {
1898 err = -ENODEV;
1899 goto out_unlock;
1900 }
1901
1902 if (!strcmp(strstrip(filter_string), "0")) {
1903 filter_free_subsystem_preds(system);
1904 remove_filter_string(system->filter);
1905 filter = system->filter;
1906 system->filter = NULL;
1907 /* Ensure all filters are no longer used */
1908 synchronize_sched();
1909 filter_free_subsystem_filters(system);
1910 __free_filter(filter);
1911 goto out_unlock;
1912 }
1913
1914 err = create_system_filter(system, filter_string, &filter);
1915 if (filter) {
1916 /*
1917 * No event actually uses the system filter
1918 * we can free it without synchronize_sched().
1919 */
1920 __free_filter(system->filter);
1921 system->filter = filter;
1922 }
1923 out_unlock:
1924 mutex_unlock(&event_mutex);
1925
1926 return err;
1927 }
1928
1929 #ifdef CONFIG_PERF_EVENTS
1930
ftrace_profile_free_filter(struct perf_event * event)1931 void ftrace_profile_free_filter(struct perf_event *event)
1932 {
1933 struct event_filter *filter = event->filter;
1934
1935 event->filter = NULL;
1936 __free_filter(filter);
1937 }
1938
1939 struct function_filter_data {
1940 struct ftrace_ops *ops;
1941 int first_filter;
1942 int first_notrace;
1943 };
1944
1945 #ifdef CONFIG_FUNCTION_TRACER
1946 static char **
ftrace_function_filter_re(char * buf,int len,int * count)1947 ftrace_function_filter_re(char *buf, int len, int *count)
1948 {
1949 char *str, *sep, **re;
1950
1951 str = kstrndup(buf, len, GFP_KERNEL);
1952 if (!str)
1953 return NULL;
1954
1955 /*
1956 * The argv_split function takes white space
1957 * as a separator, so convert ',' into spaces.
1958 */
1959 while ((sep = strchr(str, ',')))
1960 *sep = ' ';
1961
1962 re = argv_split(GFP_KERNEL, str, count);
1963 kfree(str);
1964 return re;
1965 }
1966
ftrace_function_set_regexp(struct ftrace_ops * ops,int filter,int reset,char * re,int len)1967 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
1968 int reset, char *re, int len)
1969 {
1970 int ret;
1971
1972 if (filter)
1973 ret = ftrace_set_filter(ops, re, len, reset);
1974 else
1975 ret = ftrace_set_notrace(ops, re, len, reset);
1976
1977 return ret;
1978 }
1979
__ftrace_function_set_filter(int filter,char * buf,int len,struct function_filter_data * data)1980 static int __ftrace_function_set_filter(int filter, char *buf, int len,
1981 struct function_filter_data *data)
1982 {
1983 int i, re_cnt, ret = -EINVAL;
1984 int *reset;
1985 char **re;
1986
1987 reset = filter ? &data->first_filter : &data->first_notrace;
1988
1989 /*
1990 * The 'ip' field could have multiple filters set, separated
1991 * either by space or comma. We first cut the filter and apply
1992 * all pieces separatelly.
1993 */
1994 re = ftrace_function_filter_re(buf, len, &re_cnt);
1995 if (!re)
1996 return -EINVAL;
1997
1998 for (i = 0; i < re_cnt; i++) {
1999 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2000 re[i], strlen(re[i]));
2001 if (ret)
2002 break;
2003
2004 if (*reset)
2005 *reset = 0;
2006 }
2007
2008 argv_free(re);
2009 return ret;
2010 }
2011
ftrace_function_check_pred(struct filter_pred * pred,int leaf)2012 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2013 {
2014 struct ftrace_event_field *field = pred->field;
2015
2016 if (leaf) {
2017 /*
2018 * Check the leaf predicate for function trace, verify:
2019 * - only '==' and '!=' is used
2020 * - the 'ip' field is used
2021 */
2022 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2023 return -EINVAL;
2024
2025 if (strcmp(field->name, "ip"))
2026 return -EINVAL;
2027 } else {
2028 /*
2029 * Check the non leaf predicate for function trace, verify:
2030 * - only '||' is used
2031 */
2032 if (pred->op != OP_OR)
2033 return -EINVAL;
2034 }
2035
2036 return 0;
2037 }
2038
ftrace_function_set_filter_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2039 static int ftrace_function_set_filter_cb(enum move_type move,
2040 struct filter_pred *pred,
2041 int *err, void *data)
2042 {
2043 /* Checking the node is valid for function trace. */
2044 if ((move != MOVE_DOWN) ||
2045 (pred->left != FILTER_PRED_INVALID)) {
2046 *err = ftrace_function_check_pred(pred, 0);
2047 } else {
2048 *err = ftrace_function_check_pred(pred, 1);
2049 if (*err)
2050 return WALK_PRED_ABORT;
2051
2052 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2053 pred->regex.pattern,
2054 pred->regex.len,
2055 data);
2056 }
2057
2058 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2059 }
2060
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2061 static int ftrace_function_set_filter(struct perf_event *event,
2062 struct event_filter *filter)
2063 {
2064 struct function_filter_data data = {
2065 .first_filter = 1,
2066 .first_notrace = 1,
2067 .ops = &event->ftrace_ops,
2068 };
2069
2070 return walk_pred_tree(filter->preds, filter->root,
2071 ftrace_function_set_filter_cb, &data);
2072 }
2073 #else
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2074 static int ftrace_function_set_filter(struct perf_event *event,
2075 struct event_filter *filter)
2076 {
2077 return -ENODEV;
2078 }
2079 #endif /* CONFIG_FUNCTION_TRACER */
2080
ftrace_profile_set_filter(struct perf_event * event,int event_id,char * filter_str)2081 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2082 char *filter_str)
2083 {
2084 int err;
2085 struct event_filter *filter;
2086 struct ftrace_event_call *call;
2087
2088 mutex_lock(&event_mutex);
2089
2090 call = event->tp_event;
2091
2092 err = -EINVAL;
2093 if (!call)
2094 goto out_unlock;
2095
2096 err = -EEXIST;
2097 if (event->filter)
2098 goto out_unlock;
2099
2100 err = create_filter(call, filter_str, false, &filter);
2101 if (err)
2102 goto free_filter;
2103
2104 if (ftrace_event_is_function(call))
2105 err = ftrace_function_set_filter(event, filter);
2106 else
2107 event->filter = filter;
2108
2109 free_filter:
2110 if (err || ftrace_event_is_function(call))
2111 __free_filter(filter);
2112
2113 out_unlock:
2114 mutex_unlock(&event_mutex);
2115
2116 return err;
2117 }
2118
2119 #endif /* CONFIG_PERF_EVENTS */
2120
2121 #ifdef CONFIG_FTRACE_STARTUP_TEST
2122
2123 #include <linux/types.h>
2124 #include <linux/tracepoint.h>
2125
2126 #define CREATE_TRACE_POINTS
2127 #include "trace_events_filter_test.h"
2128
2129 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2130 { \
2131 .filter = FILTER, \
2132 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2133 .e = ve, .f = vf, .g = vg, .h = vh }, \
2134 .match = m, \
2135 .not_visited = nvisit, \
2136 }
2137 #define YES 1
2138 #define NO 0
2139
2140 static struct test_filter_data_t {
2141 char *filter;
2142 struct ftrace_raw_ftrace_test_filter rec;
2143 int match;
2144 char *not_visited;
2145 } test_filter_data[] = {
2146 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2147 "e == 1 && f == 1 && g == 1 && h == 1"
2148 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2149 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2150 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2151 #undef FILTER
2152 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2153 "e == 1 || f == 1 || g == 1 || h == 1"
2154 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2155 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2156 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2157 #undef FILTER
2158 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2159 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2160 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2161 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2162 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2163 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2164 #undef FILTER
2165 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2166 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2167 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2168 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2169 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2170 #undef FILTER
2171 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2172 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2173 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2174 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2175 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2176 #undef FILTER
2177 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2178 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2179 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2180 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2181 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2182 #undef FILTER
2183 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2184 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2185 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2186 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2187 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2188 #undef FILTER
2189 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2190 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2191 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2192 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2193 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2194 };
2195
2196 #undef DATA_REC
2197 #undef FILTER
2198 #undef YES
2199 #undef NO
2200
2201 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2202
2203 static int test_pred_visited;
2204
test_pred_visited_fn(struct filter_pred * pred,void * event)2205 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2206 {
2207 struct ftrace_event_field *field = pred->field;
2208
2209 test_pred_visited = 1;
2210 printk(KERN_INFO "\npred visited %s\n", field->name);
2211 return 1;
2212 }
2213
test_walk_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2214 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2215 int *err, void *data)
2216 {
2217 char *fields = data;
2218
2219 if ((move == MOVE_DOWN) &&
2220 (pred->left == FILTER_PRED_INVALID)) {
2221 struct ftrace_event_field *field = pred->field;
2222
2223 if (!field) {
2224 WARN(1, "all leafs should have field defined");
2225 return WALK_PRED_DEFAULT;
2226 }
2227 if (!strchr(fields, *field->name))
2228 return WALK_PRED_DEFAULT;
2229
2230 WARN_ON(!pred->fn);
2231 pred->fn = test_pred_visited_fn;
2232 }
2233 return WALK_PRED_DEFAULT;
2234 }
2235
ftrace_test_event_filter(void)2236 static __init int ftrace_test_event_filter(void)
2237 {
2238 int i;
2239
2240 printk(KERN_INFO "Testing ftrace filter: ");
2241
2242 for (i = 0; i < DATA_CNT; i++) {
2243 struct event_filter *filter = NULL;
2244 struct test_filter_data_t *d = &test_filter_data[i];
2245 int err;
2246
2247 err = create_filter(&event_ftrace_test_filter, d->filter,
2248 false, &filter);
2249 if (err) {
2250 printk(KERN_INFO
2251 "Failed to get filter for '%s', err %d\n",
2252 d->filter, err);
2253 __free_filter(filter);
2254 break;
2255 }
2256
2257 /*
2258 * The preemption disabling is not really needed for self
2259 * tests, but the rcu dereference will complain without it.
2260 */
2261 preempt_disable();
2262 if (*d->not_visited)
2263 walk_pred_tree(filter->preds, filter->root,
2264 test_walk_pred_cb,
2265 d->not_visited);
2266
2267 test_pred_visited = 0;
2268 err = filter_match_preds(filter, &d->rec);
2269 preempt_enable();
2270
2271 __free_filter(filter);
2272
2273 if (test_pred_visited) {
2274 printk(KERN_INFO
2275 "Failed, unwanted pred visited for filter %s\n",
2276 d->filter);
2277 break;
2278 }
2279
2280 if (err != d->match) {
2281 printk(KERN_INFO
2282 "Failed to match filter '%s', expected %d\n",
2283 d->filter, d->match);
2284 break;
2285 }
2286 }
2287
2288 if (i == DATA_CNT)
2289 printk(KERN_CONT "OK\n");
2290
2291 return 0;
2292 }
2293
2294 late_initcall(ftrace_test_event_filter);
2295
2296 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2297