• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * trace_events_trigger - trace event triggers
4   *
5   * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6   */
7  
8  #include <linux/security.h>
9  #include <linux/module.h>
10  #include <linux/ctype.h>
11  #include <linux/mutex.h>
12  #include <linux/slab.h>
13  #include <linux/rculist.h>
14  
15  #include "trace.h"
16  
17  static LIST_HEAD(trigger_commands);
18  static DEFINE_MUTEX(trigger_cmd_mutex);
19  
trigger_data_free(struct event_trigger_data * data)20  void trigger_data_free(struct event_trigger_data *data)
21  {
22  	if (data->cmd_ops->set_filter)
23  		data->cmd_ops->set_filter(NULL, data, NULL);
24  
25  	/* make sure current triggers exit before free */
26  	tracepoint_synchronize_unregister();
27  
28  	kfree(data);
29  }
30  
31  /**
32   * event_triggers_call - Call triggers associated with a trace event
33   * @file: The trace_event_file associated with the event
34   * @rec: The trace entry for the event, NULL for unconditional invocation
35   *
36   * For each trigger associated with an event, invoke the trigger
37   * function registered with the associated trigger command.  If rec is
38   * non-NULL, it means that the trigger requires further processing and
39   * shouldn't be unconditionally invoked.  If rec is non-NULL and the
40   * trigger has a filter associated with it, rec will checked against
41   * the filter and if the record matches the trigger will be invoked.
42   * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43   * in any case until the current event is written, the trigger
44   * function isn't invoked but the bit associated with the deferred
45   * trigger is set in the return value.
46   *
47   * Returns an enum event_trigger_type value containing a set bit for
48   * any trigger that should be deferred, ETT_NONE if nothing to defer.
49   *
50   * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51   *
52   * Return: an enum event_trigger_type value containing a set bit for
53   * any trigger that should be deferred, ETT_NONE if nothing to defer.
54   */
55  enum event_trigger_type
event_triggers_call(struct trace_event_file * file,void * rec,struct ring_buffer_event * event)56  event_triggers_call(struct trace_event_file *file, void *rec,
57  		    struct ring_buffer_event *event)
58  {
59  	struct event_trigger_data *data;
60  	enum event_trigger_type tt = ETT_NONE;
61  	struct event_filter *filter;
62  
63  	if (list_empty(&file->triggers))
64  		return tt;
65  
66  	list_for_each_entry_rcu(data, &file->triggers, list) {
67  		if (data->paused)
68  			continue;
69  		if (!rec) {
70  			data->ops->func(data, rec, event);
71  			continue;
72  		}
73  		filter = rcu_dereference_sched(data->filter);
74  		if (filter && !filter_match_preds(filter, rec))
75  			continue;
76  		if (event_command_post_trigger(data->cmd_ops)) {
77  			tt |= data->cmd_ops->trigger_type;
78  			continue;
79  		}
80  		data->ops->func(data, rec, event);
81  	}
82  	return tt;
83  }
84  EXPORT_SYMBOL_GPL(event_triggers_call);
85  
86  /**
87   * event_triggers_post_call - Call 'post_triggers' for a trace event
88   * @file: The trace_event_file associated with the event
89   * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90   *
91   * For each trigger associated with an event, invoke the trigger
92   * function registered with the associated trigger command, if the
93   * corresponding bit is set in the tt enum passed into this function.
94   * See @event_triggers_call for details on how those bits are set.
95   *
96   * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97   */
98  void
event_triggers_post_call(struct trace_event_file * file,enum event_trigger_type tt)99  event_triggers_post_call(struct trace_event_file *file,
100  			 enum event_trigger_type tt)
101  {
102  	struct event_trigger_data *data;
103  
104  	list_for_each_entry_rcu(data, &file->triggers, list) {
105  		if (data->paused)
106  			continue;
107  		if (data->cmd_ops->trigger_type & tt)
108  			data->ops->func(data, NULL, NULL);
109  	}
110  }
111  EXPORT_SYMBOL_GPL(event_triggers_post_call);
112  
113  #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
114  
trigger_next(struct seq_file * m,void * t,loff_t * pos)115  static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116  {
117  	struct trace_event_file *event_file = event_file_data(m->private);
118  
119  	if (t == SHOW_AVAILABLE_TRIGGERS) {
120  		(*pos)++;
121  		return NULL;
122  	}
123  	return seq_list_next(t, &event_file->triggers, pos);
124  }
125  
trigger_start(struct seq_file * m,loff_t * pos)126  static void *trigger_start(struct seq_file *m, loff_t *pos)
127  {
128  	struct trace_event_file *event_file;
129  
130  	/* ->stop() is called even if ->start() fails */
131  	mutex_lock(&event_mutex);
132  	event_file = event_file_data(m->private);
133  	if (unlikely(!event_file))
134  		return ERR_PTR(-ENODEV);
135  
136  	if (list_empty(&event_file->triggers))
137  		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
138  
139  	return seq_list_start(&event_file->triggers, *pos);
140  }
141  
trigger_stop(struct seq_file * m,void * t)142  static void trigger_stop(struct seq_file *m, void *t)
143  {
144  	mutex_unlock(&event_mutex);
145  }
146  
trigger_show(struct seq_file * m,void * v)147  static int trigger_show(struct seq_file *m, void *v)
148  {
149  	struct event_trigger_data *data;
150  	struct event_command *p;
151  
152  	if (v == SHOW_AVAILABLE_TRIGGERS) {
153  		seq_puts(m, "# Available triggers:\n");
154  		seq_putc(m, '#');
155  		mutex_lock(&trigger_cmd_mutex);
156  		list_for_each_entry_reverse(p, &trigger_commands, list)
157  			seq_printf(m, " %s", p->name);
158  		seq_putc(m, '\n');
159  		mutex_unlock(&trigger_cmd_mutex);
160  		return 0;
161  	}
162  
163  	data = list_entry(v, struct event_trigger_data, list);
164  	data->ops->print(m, data->ops, data);
165  
166  	return 0;
167  }
168  
169  static const struct seq_operations event_triggers_seq_ops = {
170  	.start = trigger_start,
171  	.next = trigger_next,
172  	.stop = trigger_stop,
173  	.show = trigger_show,
174  };
175  
event_trigger_regex_open(struct inode * inode,struct file * file)176  static int event_trigger_regex_open(struct inode *inode, struct file *file)
177  {
178  	int ret;
179  
180  	ret = security_locked_down(LOCKDOWN_TRACEFS);
181  	if (ret)
182  		return ret;
183  
184  	mutex_lock(&event_mutex);
185  
186  	if (unlikely(!event_file_data(file))) {
187  		mutex_unlock(&event_mutex);
188  		return -ENODEV;
189  	}
190  
191  	if ((file->f_mode & FMODE_WRITE) &&
192  	    (file->f_flags & O_TRUNC)) {
193  		struct trace_event_file *event_file;
194  		struct event_command *p;
195  
196  		event_file = event_file_data(file);
197  
198  		list_for_each_entry(p, &trigger_commands, list) {
199  			if (p->unreg_all)
200  				p->unreg_all(event_file);
201  		}
202  	}
203  
204  	if (file->f_mode & FMODE_READ) {
205  		ret = seq_open(file, &event_triggers_seq_ops);
206  		if (!ret) {
207  			struct seq_file *m = file->private_data;
208  			m->private = file;
209  		}
210  	}
211  
212  	mutex_unlock(&event_mutex);
213  
214  	return ret;
215  }
216  
trigger_process_regex(struct trace_event_file * file,char * buff)217  int trigger_process_regex(struct trace_event_file *file, char *buff)
218  {
219  	char *command, *next;
220  	struct event_command *p;
221  	int ret = -EINVAL;
222  
223  	next = buff = skip_spaces(buff);
224  	command = strsep(&next, ": \t");
225  	if (next) {
226  		next = skip_spaces(next);
227  		if (!*next)
228  			next = NULL;
229  	}
230  	command = (command[0] != '!') ? command : command + 1;
231  
232  	mutex_lock(&trigger_cmd_mutex);
233  	list_for_each_entry(p, &trigger_commands, list) {
234  		if (strcmp(p->name, command) == 0) {
235  			ret = p->func(p, file, buff, command, next);
236  			goto out_unlock;
237  		}
238  	}
239   out_unlock:
240  	mutex_unlock(&trigger_cmd_mutex);
241  
242  	return ret;
243  }
244  
event_trigger_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)245  static ssize_t event_trigger_regex_write(struct file *file,
246  					 const char __user *ubuf,
247  					 size_t cnt, loff_t *ppos)
248  {
249  	struct trace_event_file *event_file;
250  	ssize_t ret;
251  	char *buf;
252  
253  	if (!cnt)
254  		return 0;
255  
256  	if (cnt >= PAGE_SIZE)
257  		return -EINVAL;
258  
259  	buf = memdup_user_nul(ubuf, cnt);
260  	if (IS_ERR(buf))
261  		return PTR_ERR(buf);
262  
263  	strim(buf);
264  
265  	mutex_lock(&event_mutex);
266  	event_file = event_file_data(file);
267  	if (unlikely(!event_file)) {
268  		mutex_unlock(&event_mutex);
269  		kfree(buf);
270  		return -ENODEV;
271  	}
272  	ret = trigger_process_regex(event_file, buf);
273  	mutex_unlock(&event_mutex);
274  
275  	kfree(buf);
276  	if (ret < 0)
277  		goto out;
278  
279  	*ppos += cnt;
280  	ret = cnt;
281   out:
282  	return ret;
283  }
284  
event_trigger_regex_release(struct inode * inode,struct file * file)285  static int event_trigger_regex_release(struct inode *inode, struct file *file)
286  {
287  	mutex_lock(&event_mutex);
288  
289  	if (file->f_mode & FMODE_READ)
290  		seq_release(inode, file);
291  
292  	mutex_unlock(&event_mutex);
293  
294  	return 0;
295  }
296  
297  static ssize_t
event_trigger_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)298  event_trigger_write(struct file *filp, const char __user *ubuf,
299  		    size_t cnt, loff_t *ppos)
300  {
301  	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
302  }
303  
304  static int
event_trigger_open(struct inode * inode,struct file * filp)305  event_trigger_open(struct inode *inode, struct file *filp)
306  {
307  	/* Checks for tracefs lockdown */
308  	return event_trigger_regex_open(inode, filp);
309  }
310  
311  static int
event_trigger_release(struct inode * inode,struct file * file)312  event_trigger_release(struct inode *inode, struct file *file)
313  {
314  	return event_trigger_regex_release(inode, file);
315  }
316  
317  const struct file_operations event_trigger_fops = {
318  	.open = event_trigger_open,
319  	.read = seq_read,
320  	.write = event_trigger_write,
321  	.llseek = tracing_lseek,
322  	.release = event_trigger_release,
323  };
324  
325  /*
326   * Currently we only register event commands from __init, so mark this
327   * __init too.
328   */
register_event_command(struct event_command * cmd)329  __init int register_event_command(struct event_command *cmd)
330  {
331  	struct event_command *p;
332  	int ret = 0;
333  
334  	mutex_lock(&trigger_cmd_mutex);
335  	list_for_each_entry(p, &trigger_commands, list) {
336  		if (strcmp(cmd->name, p->name) == 0) {
337  			ret = -EBUSY;
338  			goto out_unlock;
339  		}
340  	}
341  	list_add(&cmd->list, &trigger_commands);
342   out_unlock:
343  	mutex_unlock(&trigger_cmd_mutex);
344  
345  	return ret;
346  }
347  
348  /*
349   * Currently we only unregister event commands from __init, so mark
350   * this __init too.
351   */
unregister_event_command(struct event_command * cmd)352  __init int unregister_event_command(struct event_command *cmd)
353  {
354  	struct event_command *p, *n;
355  	int ret = -ENODEV;
356  
357  	mutex_lock(&trigger_cmd_mutex);
358  	list_for_each_entry_safe(p, n, &trigger_commands, list) {
359  		if (strcmp(cmd->name, p->name) == 0) {
360  			ret = 0;
361  			list_del_init(&p->list);
362  			goto out_unlock;
363  		}
364  	}
365   out_unlock:
366  	mutex_unlock(&trigger_cmd_mutex);
367  
368  	return ret;
369  }
370  
371  /**
372   * event_trigger_print - Generic event_trigger_ops @print implementation
373   * @name: The name of the event trigger
374   * @m: The seq_file being printed to
375   * @data: Trigger-specific data
376   * @filter_str: filter_str to print, if present
377   *
378   * Common implementation for event triggers to print themselves.
379   *
380   * Usually wrapped by a function that simply sets the @name of the
381   * trigger command and then invokes this.
382   *
383   * Return: 0 on success, errno otherwise
384   */
385  static int
event_trigger_print(const char * name,struct seq_file * m,void * data,char * filter_str)386  event_trigger_print(const char *name, struct seq_file *m,
387  		    void *data, char *filter_str)
388  {
389  	long count = (long)data;
390  
391  	seq_puts(m, name);
392  
393  	if (count == -1)
394  		seq_puts(m, ":unlimited");
395  	else
396  		seq_printf(m, ":count=%ld", count);
397  
398  	if (filter_str)
399  		seq_printf(m, " if %s\n", filter_str);
400  	else
401  		seq_putc(m, '\n');
402  
403  	return 0;
404  }
405  
406  /**
407   * event_trigger_init - Generic event_trigger_ops @init implementation
408   * @ops: The trigger ops associated with the trigger
409   * @data: Trigger-specific data
410   *
411   * Common implementation of event trigger initialization.
412   *
413   * Usually used directly as the @init method in event trigger
414   * implementations.
415   *
416   * Return: 0 on success, errno otherwise
417   */
event_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)418  int event_trigger_init(struct event_trigger_ops *ops,
419  		       struct event_trigger_data *data)
420  {
421  	data->ref++;
422  	return 0;
423  }
424  
425  /**
426   * event_trigger_free - Generic event_trigger_ops @free implementation
427   * @ops: The trigger ops associated with the trigger
428   * @data: Trigger-specific data
429   *
430   * Common implementation of event trigger de-initialization.
431   *
432   * Usually used directly as the @free method in event trigger
433   * implementations.
434   */
435  static void
event_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)436  event_trigger_free(struct event_trigger_ops *ops,
437  		   struct event_trigger_data *data)
438  {
439  	if (WARN_ON_ONCE(data->ref <= 0))
440  		return;
441  
442  	data->ref--;
443  	if (!data->ref)
444  		trigger_data_free(data);
445  }
446  
trace_event_trigger_enable_disable(struct trace_event_file * file,int trigger_enable)447  int trace_event_trigger_enable_disable(struct trace_event_file *file,
448  				       int trigger_enable)
449  {
450  	int ret = 0;
451  
452  	if (trigger_enable) {
453  		if (atomic_inc_return(&file->tm_ref) > 1)
454  			return ret;
455  		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
456  		ret = trace_event_enable_disable(file, 1, 1);
457  	} else {
458  		if (atomic_dec_return(&file->tm_ref) > 0)
459  			return ret;
460  		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
461  		ret = trace_event_enable_disable(file, 0, 1);
462  	}
463  
464  	return ret;
465  }
466  
467  /**
468   * clear_event_triggers - Clear all triggers associated with a trace array
469   * @tr: The trace array to clear
470   *
471   * For each trigger, the triggering event has its tm_ref decremented
472   * via trace_event_trigger_enable_disable(), and any associated event
473   * (in the case of enable/disable_event triggers) will have its sm_ref
474   * decremented via free()->trace_event_enable_disable().  That
475   * combination effectively reverses the soft-mode/trigger state added
476   * by trigger registration.
477   *
478   * Must be called with event_mutex held.
479   */
480  void
clear_event_triggers(struct trace_array * tr)481  clear_event_triggers(struct trace_array *tr)
482  {
483  	struct trace_event_file *file;
484  
485  	list_for_each_entry(file, &tr->events, list) {
486  		struct event_trigger_data *data, *n;
487  		list_for_each_entry_safe(data, n, &file->triggers, list) {
488  			trace_event_trigger_enable_disable(file, 0);
489  			list_del_rcu(&data->list);
490  			if (data->ops->free)
491  				data->ops->free(data->ops, data);
492  		}
493  	}
494  }
495  
496  /**
497   * update_cond_flag - Set or reset the TRIGGER_COND bit
498   * @file: The trace_event_file associated with the event
499   *
500   * If an event has triggers and any of those triggers has a filter or
501   * a post_trigger, trigger invocation needs to be deferred until after
502   * the current event has logged its data, and the event should have
503   * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
504   * cleared.
505   */
update_cond_flag(struct trace_event_file * file)506  void update_cond_flag(struct trace_event_file *file)
507  {
508  	struct event_trigger_data *data;
509  	bool set_cond = false;
510  
511  	lockdep_assert_held(&event_mutex);
512  
513  	list_for_each_entry(data, &file->triggers, list) {
514  		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
515  		    event_command_needs_rec(data->cmd_ops)) {
516  			set_cond = true;
517  			break;
518  		}
519  	}
520  
521  	if (set_cond)
522  		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
523  	else
524  		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
525  }
526  
527  /**
528   * register_trigger - Generic event_command @reg implementation
529   * @glob: The raw string used to register the trigger
530   * @ops: The trigger ops associated with the trigger
531   * @data: Trigger-specific data to associate with the trigger
532   * @file: The trace_event_file associated with the event
533   *
534   * Common implementation for event trigger registration.
535   *
536   * Usually used directly as the @reg method in event command
537   * implementations.
538   *
539   * Return: 0 on success, errno otherwise
540   */
register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)541  static int register_trigger(char *glob, struct event_trigger_ops *ops,
542  			    struct event_trigger_data *data,
543  			    struct trace_event_file *file)
544  {
545  	struct event_trigger_data *test;
546  	int ret = 0;
547  
548  	lockdep_assert_held(&event_mutex);
549  
550  	list_for_each_entry(test, &file->triggers, list) {
551  		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
552  			ret = -EEXIST;
553  			goto out;
554  		}
555  	}
556  
557  	if (data->ops->init) {
558  		ret = data->ops->init(data->ops, data);
559  		if (ret < 0)
560  			goto out;
561  	}
562  
563  	list_add_rcu(&data->list, &file->triggers);
564  	ret++;
565  
566  	update_cond_flag(file);
567  	if (trace_event_trigger_enable_disable(file, 1) < 0) {
568  		list_del_rcu(&data->list);
569  		update_cond_flag(file);
570  		ret--;
571  	}
572  out:
573  	return ret;
574  }
575  
576  /**
577   * unregister_trigger - Generic event_command @unreg implementation
578   * @glob: The raw string used to register the trigger
579   * @ops: The trigger ops associated with the trigger
580   * @test: Trigger-specific data used to find the trigger to remove
581   * @file: The trace_event_file associated with the event
582   *
583   * Common implementation for event trigger unregistration.
584   *
585   * Usually used directly as the @unreg method in event command
586   * implementations.
587   */
unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)588  static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
589  			       struct event_trigger_data *test,
590  			       struct trace_event_file *file)
591  {
592  	struct event_trigger_data *data;
593  	bool unregistered = false;
594  
595  	lockdep_assert_held(&event_mutex);
596  
597  	list_for_each_entry(data, &file->triggers, list) {
598  		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
599  			unregistered = true;
600  			list_del_rcu(&data->list);
601  			trace_event_trigger_enable_disable(file, 0);
602  			update_cond_flag(file);
603  			break;
604  		}
605  	}
606  
607  	if (unregistered && data->ops->free)
608  		data->ops->free(data->ops, data);
609  }
610  
611  /**
612   * event_trigger_callback - Generic event_command @func implementation
613   * @cmd_ops: The command ops, used for trigger registration
614   * @file: The trace_event_file associated with the event
615   * @glob: The raw string used to register the trigger
616   * @cmd: The cmd portion of the string used to register the trigger
617   * @param: The params portion of the string used to register the trigger
618   *
619   * Common implementation for event command parsing and trigger
620   * instantiation.
621   *
622   * Usually used directly as the @func method in event command
623   * implementations.
624   *
625   * Return: 0 on success, errno otherwise
626   */
627  static int
event_trigger_callback(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)628  event_trigger_callback(struct event_command *cmd_ops,
629  		       struct trace_event_file *file,
630  		       char *glob, char *cmd, char *param)
631  {
632  	struct event_trigger_data *trigger_data;
633  	struct event_trigger_ops *trigger_ops;
634  	char *trigger = NULL;
635  	char *number;
636  	int ret;
637  
638  	/* separate the trigger from the filter (t:n [if filter]) */
639  	if (param && isdigit(param[0])) {
640  		trigger = strsep(&param, " \t");
641  		if (param) {
642  			param = skip_spaces(param);
643  			if (!*param)
644  				param = NULL;
645  		}
646  	}
647  
648  	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
649  
650  	ret = -ENOMEM;
651  	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
652  	if (!trigger_data)
653  		goto out;
654  
655  	trigger_data->count = -1;
656  	trigger_data->ops = trigger_ops;
657  	trigger_data->cmd_ops = cmd_ops;
658  	trigger_data->private_data = file;
659  	INIT_LIST_HEAD(&trigger_data->list);
660  	INIT_LIST_HEAD(&trigger_data->named_list);
661  
662  	if (glob[0] == '!') {
663  		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
664  		kfree(trigger_data);
665  		ret = 0;
666  		goto out;
667  	}
668  
669  	if (trigger) {
670  		number = strsep(&trigger, ":");
671  
672  		ret = -EINVAL;
673  		if (!strlen(number))
674  			goto out_free;
675  
676  		/*
677  		 * We use the callback data field (which is a pointer)
678  		 * as our counter.
679  		 */
680  		ret = kstrtoul(number, 0, &trigger_data->count);
681  		if (ret)
682  			goto out_free;
683  	}
684  
685  	if (!param) /* if param is non-empty, it's supposed to be a filter */
686  		goto out_reg;
687  
688  	if (!cmd_ops->set_filter)
689  		goto out_reg;
690  
691  	ret = cmd_ops->set_filter(param, trigger_data, file);
692  	if (ret < 0)
693  		goto out_free;
694  
695   out_reg:
696  	/* Up the trigger_data count to make sure reg doesn't free it on failure */
697  	event_trigger_init(trigger_ops, trigger_data);
698  	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
699  	/*
700  	 * The above returns on success the # of functions enabled,
701  	 * but if it didn't find any functions it returns zero.
702  	 * Consider no functions a failure too.
703  	 */
704  	if (!ret) {
705  		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
706  		ret = -ENOENT;
707  	} else if (ret > 0)
708  		ret = 0;
709  
710  	/* Down the counter of trigger_data or free it if not used anymore */
711  	event_trigger_free(trigger_ops, trigger_data);
712   out:
713  	return ret;
714  
715   out_free:
716  	if (cmd_ops->set_filter)
717  		cmd_ops->set_filter(NULL, trigger_data, NULL);
718  	kfree(trigger_data);
719  	goto out;
720  }
721  
722  /**
723   * set_trigger_filter - Generic event_command @set_filter implementation
724   * @filter_str: The filter string for the trigger, NULL to remove filter
725   * @trigger_data: Trigger-specific data
726   * @file: The trace_event_file associated with the event
727   *
728   * Common implementation for event command filter parsing and filter
729   * instantiation.
730   *
731   * Usually used directly as the @set_filter method in event command
732   * implementations.
733   *
734   * Also used to remove a filter (if filter_str = NULL).
735   *
736   * Return: 0 on success, errno otherwise
737   */
set_trigger_filter(char * filter_str,struct event_trigger_data * trigger_data,struct trace_event_file * file)738  int set_trigger_filter(char *filter_str,
739  		       struct event_trigger_data *trigger_data,
740  		       struct trace_event_file *file)
741  {
742  	struct event_trigger_data *data = trigger_data;
743  	struct event_filter *filter = NULL, *tmp;
744  	int ret = -EINVAL;
745  	char *s;
746  
747  	if (!filter_str) /* clear the current filter */
748  		goto assign;
749  
750  	s = strsep(&filter_str, " \t");
751  
752  	if (!strlen(s) || strcmp(s, "if") != 0)
753  		goto out;
754  
755  	if (!filter_str)
756  		goto out;
757  
758  	/* The filter is for the 'trigger' event, not the triggered event */
759  	ret = create_event_filter(file->tr, file->event_call,
760  				  filter_str, false, &filter);
761  	/*
762  	 * If create_event_filter() fails, filter still needs to be freed.
763  	 * Which the calling code will do with data->filter.
764  	 */
765   assign:
766  	tmp = rcu_access_pointer(data->filter);
767  
768  	rcu_assign_pointer(data->filter, filter);
769  
770  	if (tmp) {
771  		/* Make sure the call is done with the filter */
772  		tracepoint_synchronize_unregister();
773  		free_event_filter(tmp);
774  	}
775  
776  	kfree(data->filter_str);
777  	data->filter_str = NULL;
778  
779  	if (filter_str) {
780  		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
781  		if (!data->filter_str) {
782  			free_event_filter(rcu_access_pointer(data->filter));
783  			data->filter = NULL;
784  			ret = -ENOMEM;
785  		}
786  	}
787   out:
788  	return ret;
789  }
790  
791  static LIST_HEAD(named_triggers);
792  
793  /**
794   * find_named_trigger - Find the common named trigger associated with @name
795   * @name: The name of the set of named triggers to find the common data for
796   *
797   * Named triggers are sets of triggers that share a common set of
798   * trigger data.  The first named trigger registered with a given name
799   * owns the common trigger data that the others subsequently
800   * registered with the same name will reference.  This function
801   * returns the common trigger data associated with that first
802   * registered instance.
803   *
804   * Return: the common trigger data for the given named trigger on
805   * success, NULL otherwise.
806   */
find_named_trigger(const char * name)807  struct event_trigger_data *find_named_trigger(const char *name)
808  {
809  	struct event_trigger_data *data;
810  
811  	if (!name)
812  		return NULL;
813  
814  	list_for_each_entry(data, &named_triggers, named_list) {
815  		if (data->named_data)
816  			continue;
817  		if (strcmp(data->name, name) == 0)
818  			return data;
819  	}
820  
821  	return NULL;
822  }
823  
824  /**
825   * is_named_trigger - determine if a given trigger is a named trigger
826   * @test: The trigger data to test
827   *
828   * Return: true if 'test' is a named trigger, false otherwise.
829   */
is_named_trigger(struct event_trigger_data * test)830  bool is_named_trigger(struct event_trigger_data *test)
831  {
832  	struct event_trigger_data *data;
833  
834  	list_for_each_entry(data, &named_triggers, named_list) {
835  		if (test == data)
836  			return true;
837  	}
838  
839  	return false;
840  }
841  
842  /**
843   * save_named_trigger - save the trigger in the named trigger list
844   * @name: The name of the named trigger set
845   * @data: The trigger data to save
846   *
847   * Return: 0 if successful, negative error otherwise.
848   */
save_named_trigger(const char * name,struct event_trigger_data * data)849  int save_named_trigger(const char *name, struct event_trigger_data *data)
850  {
851  	data->name = kstrdup(name, GFP_KERNEL);
852  	if (!data->name)
853  		return -ENOMEM;
854  
855  	list_add(&data->named_list, &named_triggers);
856  
857  	return 0;
858  }
859  
860  /**
861   * del_named_trigger - delete a trigger from the named trigger list
862   * @data: The trigger data to delete
863   */
del_named_trigger(struct event_trigger_data * data)864  void del_named_trigger(struct event_trigger_data *data)
865  {
866  	kfree(data->name);
867  	data->name = NULL;
868  
869  	list_del(&data->named_list);
870  }
871  
__pause_named_trigger(struct event_trigger_data * data,bool pause)872  static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
873  {
874  	struct event_trigger_data *test;
875  
876  	list_for_each_entry(test, &named_triggers, named_list) {
877  		if (strcmp(test->name, data->name) == 0) {
878  			if (pause) {
879  				test->paused_tmp = test->paused;
880  				test->paused = true;
881  			} else {
882  				test->paused = test->paused_tmp;
883  			}
884  		}
885  	}
886  }
887  
888  /**
889   * pause_named_trigger - Pause all named triggers with the same name
890   * @data: The trigger data of a named trigger to pause
891   *
892   * Pauses a named trigger along with all other triggers having the
893   * same name.  Because named triggers share a common set of data,
894   * pausing only one is meaningless, so pausing one named trigger needs
895   * to pause all triggers with the same name.
896   */
pause_named_trigger(struct event_trigger_data * data)897  void pause_named_trigger(struct event_trigger_data *data)
898  {
899  	__pause_named_trigger(data, true);
900  }
901  
902  /**
903   * unpause_named_trigger - Un-pause all named triggers with the same name
904   * @data: The trigger data of a named trigger to unpause
905   *
906   * Un-pauses a named trigger along with all other triggers having the
907   * same name.  Because named triggers share a common set of data,
908   * unpausing only one is meaningless, so unpausing one named trigger
909   * needs to unpause all triggers with the same name.
910   */
unpause_named_trigger(struct event_trigger_data * data)911  void unpause_named_trigger(struct event_trigger_data *data)
912  {
913  	__pause_named_trigger(data, false);
914  }
915  
916  /**
917   * set_named_trigger_data - Associate common named trigger data
918   * @data: The trigger data of a named trigger to unpause
919   *
920   * Named triggers are sets of triggers that share a common set of
921   * trigger data.  The first named trigger registered with a given name
922   * owns the common trigger data that the others subsequently
923   * registered with the same name will reference.  This function
924   * associates the common trigger data from the first trigger with the
925   * given trigger.
926   */
set_named_trigger_data(struct event_trigger_data * data,struct event_trigger_data * named_data)927  void set_named_trigger_data(struct event_trigger_data *data,
928  			    struct event_trigger_data *named_data)
929  {
930  	data->named_data = named_data;
931  }
932  
933  struct event_trigger_data *
get_named_trigger_data(struct event_trigger_data * data)934  get_named_trigger_data(struct event_trigger_data *data)
935  {
936  	return data->named_data;
937  }
938  
939  static void
traceon_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)940  traceon_trigger(struct event_trigger_data *data, void *rec,
941  		struct ring_buffer_event *event)
942  {
943  	struct trace_event_file *file = data->private_data;
944  
945  	if (file) {
946  		if (tracer_tracing_is_on(file->tr))
947  			return;
948  
949  		tracer_tracing_on(file->tr);
950  		return;
951  	}
952  
953  	if (tracing_is_on())
954  		return;
955  
956  	tracing_on();
957  }
958  
959  static void
traceon_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)960  traceon_count_trigger(struct event_trigger_data *data, void *rec,
961  		      struct ring_buffer_event *event)
962  {
963  	struct trace_event_file *file = data->private_data;
964  
965  	if (file) {
966  		if (tracer_tracing_is_on(file->tr))
967  			return;
968  	} else {
969  		if (tracing_is_on())
970  			return;
971  	}
972  
973  	if (!data->count)
974  		return;
975  
976  	if (data->count != -1)
977  		(data->count)--;
978  
979  	if (file)
980  		tracer_tracing_on(file->tr);
981  	else
982  		tracing_on();
983  }
984  
985  static void
traceoff_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)986  traceoff_trigger(struct event_trigger_data *data, void *rec,
987  		 struct ring_buffer_event *event)
988  {
989  	struct trace_event_file *file = data->private_data;
990  
991  	if (file) {
992  		if (!tracer_tracing_is_on(file->tr))
993  			return;
994  
995  		tracer_tracing_off(file->tr);
996  		return;
997  	}
998  
999  	if (!tracing_is_on())
1000  		return;
1001  
1002  	tracing_off();
1003  }
1004  
1005  static void
traceoff_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1006  traceoff_count_trigger(struct event_trigger_data *data, void *rec,
1007  		       struct ring_buffer_event *event)
1008  {
1009  	struct trace_event_file *file = data->private_data;
1010  
1011  	if (file) {
1012  		if (!tracer_tracing_is_on(file->tr))
1013  			return;
1014  	} else {
1015  		if (!tracing_is_on())
1016  			return;
1017  	}
1018  
1019  	if (!data->count)
1020  		return;
1021  
1022  	if (data->count != -1)
1023  		(data->count)--;
1024  
1025  	if (file)
1026  		tracer_tracing_off(file->tr);
1027  	else
1028  		tracing_off();
1029  }
1030  
1031  static int
traceon_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1032  traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1033  		      struct event_trigger_data *data)
1034  {
1035  	return event_trigger_print("traceon", m, (void *)data->count,
1036  				   data->filter_str);
1037  }
1038  
1039  static int
traceoff_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1040  traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1041  		       struct event_trigger_data *data)
1042  {
1043  	return event_trigger_print("traceoff", m, (void *)data->count,
1044  				   data->filter_str);
1045  }
1046  
1047  static struct event_trigger_ops traceon_trigger_ops = {
1048  	.func			= traceon_trigger,
1049  	.print			= traceon_trigger_print,
1050  	.init			= event_trigger_init,
1051  	.free			= event_trigger_free,
1052  };
1053  
1054  static struct event_trigger_ops traceon_count_trigger_ops = {
1055  	.func			= traceon_count_trigger,
1056  	.print			= traceon_trigger_print,
1057  	.init			= event_trigger_init,
1058  	.free			= event_trigger_free,
1059  };
1060  
1061  static struct event_trigger_ops traceoff_trigger_ops = {
1062  	.func			= traceoff_trigger,
1063  	.print			= traceoff_trigger_print,
1064  	.init			= event_trigger_init,
1065  	.free			= event_trigger_free,
1066  };
1067  
1068  static struct event_trigger_ops traceoff_count_trigger_ops = {
1069  	.func			= traceoff_count_trigger,
1070  	.print			= traceoff_trigger_print,
1071  	.init			= event_trigger_init,
1072  	.free			= event_trigger_free,
1073  };
1074  
1075  static struct event_trigger_ops *
onoff_get_trigger_ops(char * cmd,char * param)1076  onoff_get_trigger_ops(char *cmd, char *param)
1077  {
1078  	struct event_trigger_ops *ops;
1079  
1080  	/* we register both traceon and traceoff to this callback */
1081  	if (strcmp(cmd, "traceon") == 0)
1082  		ops = param ? &traceon_count_trigger_ops :
1083  			&traceon_trigger_ops;
1084  	else
1085  		ops = param ? &traceoff_count_trigger_ops :
1086  			&traceoff_trigger_ops;
1087  
1088  	return ops;
1089  }
1090  
1091  static struct event_command trigger_traceon_cmd = {
1092  	.name			= "traceon",
1093  	.trigger_type		= ETT_TRACE_ONOFF,
1094  	.func			= event_trigger_callback,
1095  	.reg			= register_trigger,
1096  	.unreg			= unregister_trigger,
1097  	.get_trigger_ops	= onoff_get_trigger_ops,
1098  	.set_filter		= set_trigger_filter,
1099  };
1100  
1101  static struct event_command trigger_traceoff_cmd = {
1102  	.name			= "traceoff",
1103  	.trigger_type		= ETT_TRACE_ONOFF,
1104  	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1105  	.func			= event_trigger_callback,
1106  	.reg			= register_trigger,
1107  	.unreg			= unregister_trigger,
1108  	.get_trigger_ops	= onoff_get_trigger_ops,
1109  	.set_filter		= set_trigger_filter,
1110  };
1111  
1112  #ifdef CONFIG_TRACER_SNAPSHOT
1113  static void
snapshot_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1114  snapshot_trigger(struct event_trigger_data *data, void *rec,
1115  		 struct ring_buffer_event *event)
1116  {
1117  	struct trace_event_file *file = data->private_data;
1118  
1119  	if (file)
1120  		tracing_snapshot_instance(file->tr);
1121  	else
1122  		tracing_snapshot();
1123  }
1124  
1125  static void
snapshot_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1126  snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1127  		       struct ring_buffer_event *event)
1128  {
1129  	if (!data->count)
1130  		return;
1131  
1132  	if (data->count != -1)
1133  		(data->count)--;
1134  
1135  	snapshot_trigger(data, rec, event);
1136  }
1137  
1138  static int
register_snapshot_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1139  register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1140  			  struct event_trigger_data *data,
1141  			  struct trace_event_file *file)
1142  {
1143  	int ret = tracing_alloc_snapshot_instance(file->tr);
1144  
1145  	if (ret < 0)
1146  		return ret;
1147  
1148  	return register_trigger(glob, ops, data, file);
1149  }
1150  
1151  static int
snapshot_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1152  snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1153  		       struct event_trigger_data *data)
1154  {
1155  	return event_trigger_print("snapshot", m, (void *)data->count,
1156  				   data->filter_str);
1157  }
1158  
1159  static struct event_trigger_ops snapshot_trigger_ops = {
1160  	.func			= snapshot_trigger,
1161  	.print			= snapshot_trigger_print,
1162  	.init			= event_trigger_init,
1163  	.free			= event_trigger_free,
1164  };
1165  
1166  static struct event_trigger_ops snapshot_count_trigger_ops = {
1167  	.func			= snapshot_count_trigger,
1168  	.print			= snapshot_trigger_print,
1169  	.init			= event_trigger_init,
1170  	.free			= event_trigger_free,
1171  };
1172  
1173  static struct event_trigger_ops *
snapshot_get_trigger_ops(char * cmd,char * param)1174  snapshot_get_trigger_ops(char *cmd, char *param)
1175  {
1176  	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1177  }
1178  
1179  static struct event_command trigger_snapshot_cmd = {
1180  	.name			= "snapshot",
1181  	.trigger_type		= ETT_SNAPSHOT,
1182  	.func			= event_trigger_callback,
1183  	.reg			= register_snapshot_trigger,
1184  	.unreg			= unregister_trigger,
1185  	.get_trigger_ops	= snapshot_get_trigger_ops,
1186  	.set_filter		= set_trigger_filter,
1187  };
1188  
register_trigger_snapshot_cmd(void)1189  static __init int register_trigger_snapshot_cmd(void)
1190  {
1191  	int ret;
1192  
1193  	ret = register_event_command(&trigger_snapshot_cmd);
1194  	WARN_ON(ret < 0);
1195  
1196  	return ret;
1197  }
1198  #else
register_trigger_snapshot_cmd(void)1199  static __init int register_trigger_snapshot_cmd(void) { return 0; }
1200  #endif /* CONFIG_TRACER_SNAPSHOT */
1201  
1202  #ifdef CONFIG_STACKTRACE
1203  #ifdef CONFIG_UNWINDER_ORC
1204  /* Skip 2:
1205   *   event_triggers_post_call()
1206   *   trace_event_raw_event_xxx()
1207   */
1208  # define STACK_SKIP 2
1209  #else
1210  /*
1211   * Skip 4:
1212   *   stacktrace_trigger()
1213   *   event_triggers_post_call()
1214   *   trace_event_buffer_commit()
1215   *   trace_event_raw_event_xxx()
1216   */
1217  #define STACK_SKIP 4
1218  #endif
1219  
1220  static void
stacktrace_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1221  stacktrace_trigger(struct event_trigger_data *data, void *rec,
1222  		   struct ring_buffer_event *event)
1223  {
1224  	struct trace_event_file *file = data->private_data;
1225  	unsigned long flags;
1226  
1227  	if (file) {
1228  		local_save_flags(flags);
1229  		__trace_stack(file->tr, flags, STACK_SKIP, preempt_count());
1230  	} else
1231  		trace_dump_stack(STACK_SKIP);
1232  }
1233  
1234  static void
stacktrace_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1235  stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1236  			 struct ring_buffer_event *event)
1237  {
1238  	if (!data->count)
1239  		return;
1240  
1241  	if (data->count != -1)
1242  		(data->count)--;
1243  
1244  	stacktrace_trigger(data, rec, event);
1245  }
1246  
1247  static int
stacktrace_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1248  stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1249  			 struct event_trigger_data *data)
1250  {
1251  	return event_trigger_print("stacktrace", m, (void *)data->count,
1252  				   data->filter_str);
1253  }
1254  
1255  static struct event_trigger_ops stacktrace_trigger_ops = {
1256  	.func			= stacktrace_trigger,
1257  	.print			= stacktrace_trigger_print,
1258  	.init			= event_trigger_init,
1259  	.free			= event_trigger_free,
1260  };
1261  
1262  static struct event_trigger_ops stacktrace_count_trigger_ops = {
1263  	.func			= stacktrace_count_trigger,
1264  	.print			= stacktrace_trigger_print,
1265  	.init			= event_trigger_init,
1266  	.free			= event_trigger_free,
1267  };
1268  
1269  static struct event_trigger_ops *
stacktrace_get_trigger_ops(char * cmd,char * param)1270  stacktrace_get_trigger_ops(char *cmd, char *param)
1271  {
1272  	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1273  }
1274  
1275  static struct event_command trigger_stacktrace_cmd = {
1276  	.name			= "stacktrace",
1277  	.trigger_type		= ETT_STACKTRACE,
1278  	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1279  	.func			= event_trigger_callback,
1280  	.reg			= register_trigger,
1281  	.unreg			= unregister_trigger,
1282  	.get_trigger_ops	= stacktrace_get_trigger_ops,
1283  	.set_filter		= set_trigger_filter,
1284  };
1285  
register_trigger_stacktrace_cmd(void)1286  static __init int register_trigger_stacktrace_cmd(void)
1287  {
1288  	int ret;
1289  
1290  	ret = register_event_command(&trigger_stacktrace_cmd);
1291  	WARN_ON(ret < 0);
1292  
1293  	return ret;
1294  }
1295  #else
register_trigger_stacktrace_cmd(void)1296  static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1297  #endif /* CONFIG_STACKTRACE */
1298  
unregister_trigger_traceon_traceoff_cmds(void)1299  static __init void unregister_trigger_traceon_traceoff_cmds(void)
1300  {
1301  	unregister_event_command(&trigger_traceon_cmd);
1302  	unregister_event_command(&trigger_traceoff_cmd);
1303  }
1304  
1305  static void
event_enable_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1306  event_enable_trigger(struct event_trigger_data *data, void *rec,
1307  		     struct ring_buffer_event *event)
1308  {
1309  	struct enable_trigger_data *enable_data = data->private_data;
1310  
1311  	if (enable_data->enable)
1312  		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1313  	else
1314  		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1315  }
1316  
1317  static void
event_enable_count_trigger(struct event_trigger_data * data,void * rec,struct ring_buffer_event * event)1318  event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1319  			   struct ring_buffer_event *event)
1320  {
1321  	struct enable_trigger_data *enable_data = data->private_data;
1322  
1323  	if (!data->count)
1324  		return;
1325  
1326  	/* Skip if the event is in a state we want to switch to */
1327  	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1328  		return;
1329  
1330  	if (data->count != -1)
1331  		(data->count)--;
1332  
1333  	event_enable_trigger(data, rec, event);
1334  }
1335  
event_enable_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1336  int event_enable_trigger_print(struct seq_file *m,
1337  			       struct event_trigger_ops *ops,
1338  			       struct event_trigger_data *data)
1339  {
1340  	struct enable_trigger_data *enable_data = data->private_data;
1341  
1342  	seq_printf(m, "%s:%s:%s",
1343  		   enable_data->hist ?
1344  		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1345  		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1346  		   enable_data->file->event_call->class->system,
1347  		   trace_event_name(enable_data->file->event_call));
1348  
1349  	if (data->count == -1)
1350  		seq_puts(m, ":unlimited");
1351  	else
1352  		seq_printf(m, ":count=%ld", data->count);
1353  
1354  	if (data->filter_str)
1355  		seq_printf(m, " if %s\n", data->filter_str);
1356  	else
1357  		seq_putc(m, '\n');
1358  
1359  	return 0;
1360  }
1361  
event_enable_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)1362  void event_enable_trigger_free(struct event_trigger_ops *ops,
1363  			       struct event_trigger_data *data)
1364  {
1365  	struct enable_trigger_data *enable_data = data->private_data;
1366  
1367  	if (WARN_ON_ONCE(data->ref <= 0))
1368  		return;
1369  
1370  	data->ref--;
1371  	if (!data->ref) {
1372  		/* Remove the SOFT_MODE flag */
1373  		trace_event_enable_disable(enable_data->file, 0, 1);
1374  		module_put(enable_data->file->event_call->mod);
1375  		trigger_data_free(data);
1376  		kfree(enable_data);
1377  	}
1378  }
1379  
1380  static struct event_trigger_ops event_enable_trigger_ops = {
1381  	.func			= event_enable_trigger,
1382  	.print			= event_enable_trigger_print,
1383  	.init			= event_trigger_init,
1384  	.free			= event_enable_trigger_free,
1385  };
1386  
1387  static struct event_trigger_ops event_enable_count_trigger_ops = {
1388  	.func			= event_enable_count_trigger,
1389  	.print			= event_enable_trigger_print,
1390  	.init			= event_trigger_init,
1391  	.free			= event_enable_trigger_free,
1392  };
1393  
1394  static struct event_trigger_ops event_disable_trigger_ops = {
1395  	.func			= event_enable_trigger,
1396  	.print			= event_enable_trigger_print,
1397  	.init			= event_trigger_init,
1398  	.free			= event_enable_trigger_free,
1399  };
1400  
1401  static struct event_trigger_ops event_disable_count_trigger_ops = {
1402  	.func			= event_enable_count_trigger,
1403  	.print			= event_enable_trigger_print,
1404  	.init			= event_trigger_init,
1405  	.free			= event_enable_trigger_free,
1406  };
1407  
event_enable_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)1408  int event_enable_trigger_func(struct event_command *cmd_ops,
1409  			      struct trace_event_file *file,
1410  			      char *glob, char *cmd, char *param)
1411  {
1412  	struct trace_event_file *event_enable_file;
1413  	struct enable_trigger_data *enable_data;
1414  	struct event_trigger_data *trigger_data;
1415  	struct event_trigger_ops *trigger_ops;
1416  	struct trace_array *tr = file->tr;
1417  	const char *system;
1418  	const char *event;
1419  	bool hist = false;
1420  	char *trigger;
1421  	char *number;
1422  	bool enable;
1423  	int ret;
1424  
1425  	if (!param)
1426  		return -EINVAL;
1427  
1428  	/* separate the trigger from the filter (s:e:n [if filter]) */
1429  	trigger = strsep(&param, " \t");
1430  	if (!trigger)
1431  		return -EINVAL;
1432  	if (param) {
1433  		param = skip_spaces(param);
1434  		if (!*param)
1435  			param = NULL;
1436  	}
1437  
1438  	system = strsep(&trigger, ":");
1439  	if (!trigger)
1440  		return -EINVAL;
1441  
1442  	event = strsep(&trigger, ":");
1443  
1444  	ret = -EINVAL;
1445  	event_enable_file = find_event_file(tr, system, event);
1446  	if (!event_enable_file)
1447  		goto out;
1448  
1449  #ifdef CONFIG_HIST_TRIGGERS
1450  	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1451  		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1452  
1453  	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1454  		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1455  #else
1456  	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1457  #endif
1458  	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1459  
1460  	ret = -ENOMEM;
1461  	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1462  	if (!trigger_data)
1463  		goto out;
1464  
1465  	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1466  	if (!enable_data) {
1467  		kfree(trigger_data);
1468  		goto out;
1469  	}
1470  
1471  	trigger_data->count = -1;
1472  	trigger_data->ops = trigger_ops;
1473  	trigger_data->cmd_ops = cmd_ops;
1474  	INIT_LIST_HEAD(&trigger_data->list);
1475  	RCU_INIT_POINTER(trigger_data->filter, NULL);
1476  
1477  	enable_data->hist = hist;
1478  	enable_data->enable = enable;
1479  	enable_data->file = event_enable_file;
1480  	trigger_data->private_data = enable_data;
1481  
1482  	if (glob[0] == '!') {
1483  		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1484  		kfree(trigger_data);
1485  		kfree(enable_data);
1486  		ret = 0;
1487  		goto out;
1488  	}
1489  
1490  	/* Up the trigger_data count to make sure nothing frees it on failure */
1491  	event_trigger_init(trigger_ops, trigger_data);
1492  
1493  	if (trigger) {
1494  		number = strsep(&trigger, ":");
1495  
1496  		ret = -EINVAL;
1497  		if (!strlen(number))
1498  			goto out_free;
1499  
1500  		/*
1501  		 * We use the callback data field (which is a pointer)
1502  		 * as our counter.
1503  		 */
1504  		ret = kstrtoul(number, 0, &trigger_data->count);
1505  		if (ret)
1506  			goto out_free;
1507  	}
1508  
1509  	if (!param) /* if param is non-empty, it's supposed to be a filter */
1510  		goto out_reg;
1511  
1512  	if (!cmd_ops->set_filter)
1513  		goto out_reg;
1514  
1515  	ret = cmd_ops->set_filter(param, trigger_data, file);
1516  	if (ret < 0)
1517  		goto out_free;
1518  
1519   out_reg:
1520  	/* Don't let event modules unload while probe registered */
1521  	ret = try_module_get(event_enable_file->event_call->mod);
1522  	if (!ret) {
1523  		ret = -EBUSY;
1524  		goto out_free;
1525  	}
1526  
1527  	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1528  	if (ret < 0)
1529  		goto out_put;
1530  	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1531  	/*
1532  	 * The above returns on success the # of functions enabled,
1533  	 * but if it didn't find any functions it returns zero.
1534  	 * Consider no functions a failure too.
1535  	 */
1536  	if (!ret) {
1537  		ret = -ENOENT;
1538  		goto out_disable;
1539  	} else if (ret < 0)
1540  		goto out_disable;
1541  	/* Just return zero, not the number of enabled functions */
1542  	ret = 0;
1543  	event_trigger_free(trigger_ops, trigger_data);
1544   out:
1545  	return ret;
1546  
1547   out_disable:
1548  	trace_event_enable_disable(event_enable_file, 0, 1);
1549   out_put:
1550  	module_put(event_enable_file->event_call->mod);
1551   out_free:
1552  	if (cmd_ops->set_filter)
1553  		cmd_ops->set_filter(NULL, trigger_data, NULL);
1554  	event_trigger_free(trigger_ops, trigger_data);
1555  	kfree(enable_data);
1556  	goto out;
1557  }
1558  
event_enable_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1559  int event_enable_register_trigger(char *glob,
1560  				  struct event_trigger_ops *ops,
1561  				  struct event_trigger_data *data,
1562  				  struct trace_event_file *file)
1563  {
1564  	struct enable_trigger_data *enable_data = data->private_data;
1565  	struct enable_trigger_data *test_enable_data;
1566  	struct event_trigger_data *test;
1567  	int ret = 0;
1568  
1569  	lockdep_assert_held(&event_mutex);
1570  
1571  	list_for_each_entry(test, &file->triggers, list) {
1572  		test_enable_data = test->private_data;
1573  		if (test_enable_data &&
1574  		    (test->cmd_ops->trigger_type ==
1575  		     data->cmd_ops->trigger_type) &&
1576  		    (test_enable_data->file == enable_data->file)) {
1577  			ret = -EEXIST;
1578  			goto out;
1579  		}
1580  	}
1581  
1582  	if (data->ops->init) {
1583  		ret = data->ops->init(data->ops, data);
1584  		if (ret < 0)
1585  			goto out;
1586  	}
1587  
1588  	list_add_rcu(&data->list, &file->triggers);
1589  	ret++;
1590  
1591  	update_cond_flag(file);
1592  	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1593  		list_del_rcu(&data->list);
1594  		update_cond_flag(file);
1595  		ret--;
1596  	}
1597  out:
1598  	return ret;
1599  }
1600  
event_enable_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)1601  void event_enable_unregister_trigger(char *glob,
1602  				     struct event_trigger_ops *ops,
1603  				     struct event_trigger_data *test,
1604  				     struct trace_event_file *file)
1605  {
1606  	struct enable_trigger_data *test_enable_data = test->private_data;
1607  	struct enable_trigger_data *enable_data;
1608  	struct event_trigger_data *data;
1609  	bool unregistered = false;
1610  
1611  	lockdep_assert_held(&event_mutex);
1612  
1613  	list_for_each_entry(data, &file->triggers, list) {
1614  		enable_data = data->private_data;
1615  		if (enable_data &&
1616  		    (data->cmd_ops->trigger_type ==
1617  		     test->cmd_ops->trigger_type) &&
1618  		    (enable_data->file == test_enable_data->file)) {
1619  			unregistered = true;
1620  			list_del_rcu(&data->list);
1621  			trace_event_trigger_enable_disable(file, 0);
1622  			update_cond_flag(file);
1623  			break;
1624  		}
1625  	}
1626  
1627  	if (unregistered && data->ops->free)
1628  		data->ops->free(data->ops, data);
1629  }
1630  
1631  static struct event_trigger_ops *
event_enable_get_trigger_ops(char * cmd,char * param)1632  event_enable_get_trigger_ops(char *cmd, char *param)
1633  {
1634  	struct event_trigger_ops *ops;
1635  	bool enable;
1636  
1637  #ifdef CONFIG_HIST_TRIGGERS
1638  	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1639  		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1640  #else
1641  	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1642  #endif
1643  	if (enable)
1644  		ops = param ? &event_enable_count_trigger_ops :
1645  			&event_enable_trigger_ops;
1646  	else
1647  		ops = param ? &event_disable_count_trigger_ops :
1648  			&event_disable_trigger_ops;
1649  
1650  	return ops;
1651  }
1652  
1653  static struct event_command trigger_enable_cmd = {
1654  	.name			= ENABLE_EVENT_STR,
1655  	.trigger_type		= ETT_EVENT_ENABLE,
1656  	.func			= event_enable_trigger_func,
1657  	.reg			= event_enable_register_trigger,
1658  	.unreg			= event_enable_unregister_trigger,
1659  	.get_trigger_ops	= event_enable_get_trigger_ops,
1660  	.set_filter		= set_trigger_filter,
1661  };
1662  
1663  static struct event_command trigger_disable_cmd = {
1664  	.name			= DISABLE_EVENT_STR,
1665  	.trigger_type		= ETT_EVENT_ENABLE,
1666  	.func			= event_enable_trigger_func,
1667  	.reg			= event_enable_register_trigger,
1668  	.unreg			= event_enable_unregister_trigger,
1669  	.get_trigger_ops	= event_enable_get_trigger_ops,
1670  	.set_filter		= set_trigger_filter,
1671  };
1672  
unregister_trigger_enable_disable_cmds(void)1673  static __init void unregister_trigger_enable_disable_cmds(void)
1674  {
1675  	unregister_event_command(&trigger_enable_cmd);
1676  	unregister_event_command(&trigger_disable_cmd);
1677  }
1678  
register_trigger_enable_disable_cmds(void)1679  static __init int register_trigger_enable_disable_cmds(void)
1680  {
1681  	int ret;
1682  
1683  	ret = register_event_command(&trigger_enable_cmd);
1684  	if (WARN_ON(ret < 0))
1685  		return ret;
1686  	ret = register_event_command(&trigger_disable_cmd);
1687  	if (WARN_ON(ret < 0))
1688  		unregister_trigger_enable_disable_cmds();
1689  
1690  	return ret;
1691  }
1692  
register_trigger_traceon_traceoff_cmds(void)1693  static __init int register_trigger_traceon_traceoff_cmds(void)
1694  {
1695  	int ret;
1696  
1697  	ret = register_event_command(&trigger_traceon_cmd);
1698  	if (WARN_ON(ret < 0))
1699  		return ret;
1700  	ret = register_event_command(&trigger_traceoff_cmd);
1701  	if (WARN_ON(ret < 0))
1702  		unregister_trigger_traceon_traceoff_cmds();
1703  
1704  	return ret;
1705  }
1706  
register_trigger_cmds(void)1707  __init int register_trigger_cmds(void)
1708  {
1709  	register_trigger_traceon_traceoff_cmds();
1710  	register_trigger_snapshot_cmd();
1711  	register_trigger_stacktrace_cmd();
1712  	register_trigger_enable_disable_cmds();
1713  	register_trigger_hist_enable_disable_cmds();
1714  	register_trigger_hist_cmd();
1715  
1716  	return 0;
1717  }
1718