• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_trigger - trace event triggers
4  *
5  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14 
15 #include "trace.h"
16 
17 static LIST_HEAD(trigger_commands);
18 static DEFINE_MUTEX(trigger_cmd_mutex);
19 
trigger_data_free(struct event_trigger_data * data)20 void trigger_data_free(struct event_trigger_data *data)
21 {
22 	if (data->cmd_ops->set_filter)
23 		data->cmd_ops->set_filter(NULL, data, NULL);
24 
25 	/* make sure current triggers exit before free */
26 	tracepoint_synchronize_unregister();
27 
28 	kfree(data);
29 }
30 
31 /**
32  * event_triggers_call - Call triggers associated with a trace event
33  * @file: The trace_event_file associated with the event
34  * @rec: The trace entry for the event, NULL for unconditional invocation
35  *
36  * For each trigger associated with an event, invoke the trigger
37  * function registered with the associated trigger command.  If rec is
38  * non-NULL, it means that the trigger requires further processing and
39  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
40  * trigger has a filter associated with it, rec will checked against
41  * the filter and if the record matches the trigger will be invoked.
42  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43  * in any case until the current event is written, the trigger
44  * function isn't invoked but the bit associated with the deferred
45  * trigger is set in the return value.
46  *
47  * Returns an enum event_trigger_type value containing a set bit for
48  * any trigger that should be deferred, ETT_NONE if nothing to defer.
49  *
50  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51  *
52  * Return: an enum event_trigger_type value containing a set bit for
53  * any trigger that should be deferred, ETT_NONE if nothing to defer.
54  */
55 enum event_trigger_type
event_triggers_call(struct trace_event_file * file,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)56 event_triggers_call(struct trace_event_file *file,
57 		    struct trace_buffer *buffer, void *rec,
58 		    struct ring_buffer_event *event)
59 {
60 	struct event_trigger_data *data;
61 	enum event_trigger_type tt = ETT_NONE;
62 	struct event_filter *filter;
63 
64 	if (list_empty(&file->triggers))
65 		return tt;
66 
67 	list_for_each_entry_rcu(data, &file->triggers, list) {
68 		if (data->paused)
69 			continue;
70 		if (!rec) {
71 			data->ops->func(data, buffer, rec, event);
72 			continue;
73 		}
74 		filter = rcu_dereference_sched(data->filter);
75 		if (filter && !filter_match_preds(filter, rec))
76 			continue;
77 		if (event_command_post_trigger(data->cmd_ops)) {
78 			tt |= data->cmd_ops->trigger_type;
79 			continue;
80 		}
81 		data->ops->func(data, buffer, rec, event);
82 	}
83 	return tt;
84 }
85 EXPORT_SYMBOL_GPL(event_triggers_call);
86 
87 /**
88  * event_triggers_post_call - Call 'post_triggers' for a trace event
89  * @file: The trace_event_file associated with the event
90  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
91  *
92  * For each trigger associated with an event, invoke the trigger
93  * function registered with the associated trigger command, if the
94  * corresponding bit is set in the tt enum passed into this function.
95  * See @event_triggers_call for details on how those bits are set.
96  *
97  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
98  */
99 void
event_triggers_post_call(struct trace_event_file * file,enum event_trigger_type tt)100 event_triggers_post_call(struct trace_event_file *file,
101 			 enum event_trigger_type tt)
102 {
103 	struct event_trigger_data *data;
104 
105 	list_for_each_entry_rcu(data, &file->triggers, list) {
106 		if (data->paused)
107 			continue;
108 		if (data->cmd_ops->trigger_type & tt)
109 			data->ops->func(data, NULL, NULL, NULL);
110 	}
111 }
112 EXPORT_SYMBOL_GPL(event_triggers_post_call);
113 
114 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
115 
trigger_next(struct seq_file * m,void * t,loff_t * pos)116 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
117 {
118 	struct trace_event_file *event_file = event_file_data(m->private);
119 
120 	if (t == SHOW_AVAILABLE_TRIGGERS) {
121 		(*pos)++;
122 		return NULL;
123 	}
124 	return seq_list_next(t, &event_file->triggers, pos);
125 }
126 
check_user_trigger(struct trace_event_file * file)127 static bool check_user_trigger(struct trace_event_file *file)
128 {
129 	struct event_trigger_data *data;
130 
131 	list_for_each_entry_rcu(data, &file->triggers, list,
132 				lockdep_is_held(&event_mutex)) {
133 		if (data->flags & EVENT_TRIGGER_FL_PROBE)
134 			continue;
135 		return true;
136 	}
137 	return false;
138 }
139 
trigger_start(struct seq_file * m,loff_t * pos)140 static void *trigger_start(struct seq_file *m, loff_t *pos)
141 {
142 	struct trace_event_file *event_file;
143 
144 	/* ->stop() is called even if ->start() fails */
145 	mutex_lock(&event_mutex);
146 	event_file = event_file_data(m->private);
147 	if (unlikely(!event_file))
148 		return ERR_PTR(-ENODEV);
149 
150 	if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
151 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
152 
153 	return seq_list_start(&event_file->triggers, *pos);
154 }
155 
trigger_stop(struct seq_file * m,void * t)156 static void trigger_stop(struct seq_file *m, void *t)
157 {
158 	mutex_unlock(&event_mutex);
159 }
160 
trigger_show(struct seq_file * m,void * v)161 static int trigger_show(struct seq_file *m, void *v)
162 {
163 	struct event_trigger_data *data;
164 	struct event_command *p;
165 
166 	if (v == SHOW_AVAILABLE_TRIGGERS) {
167 		seq_puts(m, "# Available triggers:\n");
168 		seq_putc(m, '#');
169 		mutex_lock(&trigger_cmd_mutex);
170 		list_for_each_entry_reverse(p, &trigger_commands, list)
171 			seq_printf(m, " %s", p->name);
172 		seq_putc(m, '\n');
173 		mutex_unlock(&trigger_cmd_mutex);
174 		return 0;
175 	}
176 
177 	data = list_entry(v, struct event_trigger_data, list);
178 	data->ops->print(m, data->ops, data);
179 
180 	return 0;
181 }
182 
183 static const struct seq_operations event_triggers_seq_ops = {
184 	.start = trigger_start,
185 	.next = trigger_next,
186 	.stop = trigger_stop,
187 	.show = trigger_show,
188 };
189 
event_trigger_regex_open(struct inode * inode,struct file * file)190 static int event_trigger_regex_open(struct inode *inode, struct file *file)
191 {
192 	int ret;
193 
194 	ret = security_locked_down(LOCKDOWN_TRACEFS);
195 	if (ret)
196 		return ret;
197 
198 	mutex_lock(&event_mutex);
199 
200 	if (unlikely(!event_file_data(file))) {
201 		mutex_unlock(&event_mutex);
202 		return -ENODEV;
203 	}
204 
205 	if ((file->f_mode & FMODE_WRITE) &&
206 	    (file->f_flags & O_TRUNC)) {
207 		struct trace_event_file *event_file;
208 		struct event_command *p;
209 
210 		event_file = event_file_data(file);
211 
212 		list_for_each_entry(p, &trigger_commands, list) {
213 			if (p->unreg_all)
214 				p->unreg_all(event_file);
215 		}
216 	}
217 
218 	if (file->f_mode & FMODE_READ) {
219 		ret = seq_open(file, &event_triggers_seq_ops);
220 		if (!ret) {
221 			struct seq_file *m = file->private_data;
222 			m->private = file;
223 		}
224 	}
225 
226 	mutex_unlock(&event_mutex);
227 
228 	return ret;
229 }
230 
trigger_process_regex(struct trace_event_file * file,char * buff)231 int trigger_process_regex(struct trace_event_file *file, char *buff)
232 {
233 	char *command, *next;
234 	struct event_command *p;
235 	int ret = -EINVAL;
236 
237 	next = buff = skip_spaces(buff);
238 	command = strsep(&next, ": \t");
239 	if (next) {
240 		next = skip_spaces(next);
241 		if (!*next)
242 			next = NULL;
243 	}
244 	command = (command[0] != '!') ? command : command + 1;
245 
246 	mutex_lock(&trigger_cmd_mutex);
247 	list_for_each_entry(p, &trigger_commands, list) {
248 		if (strcmp(p->name, command) == 0) {
249 			ret = p->func(p, file, buff, command, next);
250 			goto out_unlock;
251 		}
252 	}
253  out_unlock:
254 	mutex_unlock(&trigger_cmd_mutex);
255 
256 	return ret;
257 }
258 
event_trigger_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)259 static ssize_t event_trigger_regex_write(struct file *file,
260 					 const char __user *ubuf,
261 					 size_t cnt, loff_t *ppos)
262 {
263 	struct trace_event_file *event_file;
264 	ssize_t ret;
265 	char *buf;
266 
267 	if (!cnt)
268 		return 0;
269 
270 	if (cnt >= PAGE_SIZE)
271 		return -EINVAL;
272 
273 	buf = memdup_user_nul(ubuf, cnt);
274 	if (IS_ERR(buf))
275 		return PTR_ERR(buf);
276 
277 	strim(buf);
278 
279 	mutex_lock(&event_mutex);
280 	event_file = event_file_data(file);
281 	if (unlikely(!event_file)) {
282 		mutex_unlock(&event_mutex);
283 		kfree(buf);
284 		return -ENODEV;
285 	}
286 	ret = trigger_process_regex(event_file, buf);
287 	mutex_unlock(&event_mutex);
288 
289 	kfree(buf);
290 	if (ret < 0)
291 		goto out;
292 
293 	*ppos += cnt;
294 	ret = cnt;
295  out:
296 	return ret;
297 }
298 
event_trigger_regex_release(struct inode * inode,struct file * file)299 static int event_trigger_regex_release(struct inode *inode, struct file *file)
300 {
301 	mutex_lock(&event_mutex);
302 
303 	if (file->f_mode & FMODE_READ)
304 		seq_release(inode, file);
305 
306 	mutex_unlock(&event_mutex);
307 
308 	return 0;
309 }
310 
311 static ssize_t
event_trigger_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)312 event_trigger_write(struct file *filp, const char __user *ubuf,
313 		    size_t cnt, loff_t *ppos)
314 {
315 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
316 }
317 
318 static int
event_trigger_open(struct inode * inode,struct file * filp)319 event_trigger_open(struct inode *inode, struct file *filp)
320 {
321 	/* Checks for tracefs lockdown */
322 	return event_trigger_regex_open(inode, filp);
323 }
324 
325 static int
event_trigger_release(struct inode * inode,struct file * file)326 event_trigger_release(struct inode *inode, struct file *file)
327 {
328 	return event_trigger_regex_release(inode, file);
329 }
330 
331 const struct file_operations event_trigger_fops = {
332 	.open = event_trigger_open,
333 	.read = seq_read,
334 	.write = event_trigger_write,
335 	.llseek = tracing_lseek,
336 	.release = event_trigger_release,
337 };
338 
339 /*
340  * Currently we only register event commands from __init, so mark this
341  * __init too.
342  */
register_event_command(struct event_command * cmd)343 __init int register_event_command(struct event_command *cmd)
344 {
345 	struct event_command *p;
346 	int ret = 0;
347 
348 	mutex_lock(&trigger_cmd_mutex);
349 	list_for_each_entry(p, &trigger_commands, list) {
350 		if (strcmp(cmd->name, p->name) == 0) {
351 			ret = -EBUSY;
352 			goto out_unlock;
353 		}
354 	}
355 	list_add(&cmd->list, &trigger_commands);
356  out_unlock:
357 	mutex_unlock(&trigger_cmd_mutex);
358 
359 	return ret;
360 }
361 
362 /*
363  * Currently we only unregister event commands from __init, so mark
364  * this __init too.
365  */
unregister_event_command(struct event_command * cmd)366 __init int unregister_event_command(struct event_command *cmd)
367 {
368 	struct event_command *p, *n;
369 	int ret = -ENODEV;
370 
371 	mutex_lock(&trigger_cmd_mutex);
372 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
373 		if (strcmp(cmd->name, p->name) == 0) {
374 			ret = 0;
375 			list_del_init(&p->list);
376 			goto out_unlock;
377 		}
378 	}
379  out_unlock:
380 	mutex_unlock(&trigger_cmd_mutex);
381 
382 	return ret;
383 }
384 
385 /**
386  * event_trigger_print - Generic event_trigger_ops @print implementation
387  * @name: The name of the event trigger
388  * @m: The seq_file being printed to
389  * @data: Trigger-specific data
390  * @filter_str: filter_str to print, if present
391  *
392  * Common implementation for event triggers to print themselves.
393  *
394  * Usually wrapped by a function that simply sets the @name of the
395  * trigger command and then invokes this.
396  *
397  * Return: 0 on success, errno otherwise
398  */
399 static int
event_trigger_print(const char * name,struct seq_file * m,void * data,char * filter_str)400 event_trigger_print(const char *name, struct seq_file *m,
401 		    void *data, char *filter_str)
402 {
403 	long count = (long)data;
404 
405 	seq_puts(m, name);
406 
407 	if (count == -1)
408 		seq_puts(m, ":unlimited");
409 	else
410 		seq_printf(m, ":count=%ld", count);
411 
412 	if (filter_str)
413 		seq_printf(m, " if %s\n", filter_str);
414 	else
415 		seq_putc(m, '\n');
416 
417 	return 0;
418 }
419 
420 /**
421  * event_trigger_init - Generic event_trigger_ops @init implementation
422  * @ops: The trigger ops associated with the trigger
423  * @data: Trigger-specific data
424  *
425  * Common implementation of event trigger initialization.
426  *
427  * Usually used directly as the @init method in event trigger
428  * implementations.
429  *
430  * Return: 0 on success, errno otherwise
431  */
event_trigger_init(struct event_trigger_ops * ops,struct event_trigger_data * data)432 int event_trigger_init(struct event_trigger_ops *ops,
433 		       struct event_trigger_data *data)
434 {
435 	data->ref++;
436 	return 0;
437 }
438 
439 /**
440  * event_trigger_free - Generic event_trigger_ops @free implementation
441  * @ops: The trigger ops associated with the trigger
442  * @data: Trigger-specific data
443  *
444  * Common implementation of event trigger de-initialization.
445  *
446  * Usually used directly as the @free method in event trigger
447  * implementations.
448  */
449 static void
event_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)450 event_trigger_free(struct event_trigger_ops *ops,
451 		   struct event_trigger_data *data)
452 {
453 	if (WARN_ON_ONCE(data->ref <= 0))
454 		return;
455 
456 	data->ref--;
457 	if (!data->ref)
458 		trigger_data_free(data);
459 }
460 
trace_event_trigger_enable_disable(struct trace_event_file * file,int trigger_enable)461 int trace_event_trigger_enable_disable(struct trace_event_file *file,
462 				       int trigger_enable)
463 {
464 	int ret = 0;
465 
466 	if (trigger_enable) {
467 		if (atomic_inc_return(&file->tm_ref) > 1)
468 			return ret;
469 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
470 		ret = trace_event_enable_disable(file, 1, 1);
471 	} else {
472 		if (atomic_dec_return(&file->tm_ref) > 0)
473 			return ret;
474 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
475 		ret = trace_event_enable_disable(file, 0, 1);
476 	}
477 
478 	return ret;
479 }
480 
481 /**
482  * clear_event_triggers - Clear all triggers associated with a trace array
483  * @tr: The trace array to clear
484  *
485  * For each trigger, the triggering event has its tm_ref decremented
486  * via trace_event_trigger_enable_disable(), and any associated event
487  * (in the case of enable/disable_event triggers) will have its sm_ref
488  * decremented via free()->trace_event_enable_disable().  That
489  * combination effectively reverses the soft-mode/trigger state added
490  * by trigger registration.
491  *
492  * Must be called with event_mutex held.
493  */
494 void
clear_event_triggers(struct trace_array * tr)495 clear_event_triggers(struct trace_array *tr)
496 {
497 	struct trace_event_file *file;
498 
499 	list_for_each_entry(file, &tr->events, list) {
500 		struct event_trigger_data *data, *n;
501 		list_for_each_entry_safe(data, n, &file->triggers, list) {
502 			trace_event_trigger_enable_disable(file, 0);
503 			list_del_rcu(&data->list);
504 			if (data->ops->free)
505 				data->ops->free(data->ops, data);
506 		}
507 	}
508 }
509 
510 /**
511  * update_cond_flag - Set or reset the TRIGGER_COND bit
512  * @file: The trace_event_file associated with the event
513  *
514  * If an event has triggers and any of those triggers has a filter or
515  * a post_trigger, trigger invocation needs to be deferred until after
516  * the current event has logged its data, and the event should have
517  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
518  * cleared.
519  */
update_cond_flag(struct trace_event_file * file)520 void update_cond_flag(struct trace_event_file *file)
521 {
522 	struct event_trigger_data *data;
523 	bool set_cond = false;
524 
525 	lockdep_assert_held(&event_mutex);
526 
527 	list_for_each_entry(data, &file->triggers, list) {
528 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
529 		    event_command_needs_rec(data->cmd_ops)) {
530 			set_cond = true;
531 			break;
532 		}
533 	}
534 
535 	if (set_cond)
536 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
537 	else
538 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
539 }
540 
541 /**
542  * register_trigger - Generic event_command @reg implementation
543  * @glob: The raw string used to register the trigger
544  * @ops: The trigger ops associated with the trigger
545  * @data: Trigger-specific data to associate with the trigger
546  * @file: The trace_event_file associated with the event
547  *
548  * Common implementation for event trigger registration.
549  *
550  * Usually used directly as the @reg method in event command
551  * implementations.
552  *
553  * Return: 0 on success, errno otherwise
554  */
register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)555 static int register_trigger(char *glob, struct event_trigger_ops *ops,
556 			    struct event_trigger_data *data,
557 			    struct trace_event_file *file)
558 {
559 	struct event_trigger_data *test;
560 	int ret = 0;
561 
562 	lockdep_assert_held(&event_mutex);
563 
564 	list_for_each_entry(test, &file->triggers, list) {
565 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
566 			ret = -EEXIST;
567 			goto out;
568 		}
569 	}
570 
571 	if (data->ops->init) {
572 		ret = data->ops->init(data->ops, data);
573 		if (ret < 0)
574 			goto out;
575 	}
576 
577 	list_add_rcu(&data->list, &file->triggers);
578 	ret++;
579 
580 	update_cond_flag(file);
581 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
582 		list_del_rcu(&data->list);
583 		update_cond_flag(file);
584 		ret--;
585 	}
586 out:
587 	return ret;
588 }
589 
590 /**
591  * unregister_trigger - Generic event_command @unreg implementation
592  * @glob: The raw string used to register the trigger
593  * @ops: The trigger ops associated with the trigger
594  * @test: Trigger-specific data used to find the trigger to remove
595  * @file: The trace_event_file associated with the event
596  *
597  * Common implementation for event trigger unregistration.
598  *
599  * Usually used directly as the @unreg method in event command
600  * implementations.
601  */
unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)602 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
603 			       struct event_trigger_data *test,
604 			       struct trace_event_file *file)
605 {
606 	struct event_trigger_data *data;
607 	bool unregistered = false;
608 
609 	lockdep_assert_held(&event_mutex);
610 
611 	list_for_each_entry(data, &file->triggers, list) {
612 		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
613 			unregistered = true;
614 			list_del_rcu(&data->list);
615 			trace_event_trigger_enable_disable(file, 0);
616 			update_cond_flag(file);
617 			break;
618 		}
619 	}
620 
621 	if (unregistered && data->ops->free)
622 		data->ops->free(data->ops, data);
623 }
624 
625 /**
626  * event_trigger_callback - Generic event_command @func implementation
627  * @cmd_ops: The command ops, used for trigger registration
628  * @file: The trace_event_file associated with the event
629  * @glob: The raw string used to register the trigger
630  * @cmd: The cmd portion of the string used to register the trigger
631  * @param: The params portion of the string used to register the trigger
632  *
633  * Common implementation for event command parsing and trigger
634  * instantiation.
635  *
636  * Usually used directly as the @func method in event command
637  * implementations.
638  *
639  * Return: 0 on success, errno otherwise
640  */
641 static int
event_trigger_callback(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)642 event_trigger_callback(struct event_command *cmd_ops,
643 		       struct trace_event_file *file,
644 		       char *glob, char *cmd, char *param)
645 {
646 	struct event_trigger_data *trigger_data;
647 	struct event_trigger_ops *trigger_ops;
648 	char *trigger = NULL;
649 	char *number;
650 	int ret;
651 
652 	/* separate the trigger from the filter (t:n [if filter]) */
653 	if (param && isdigit(param[0])) {
654 		trigger = strsep(&param, " \t");
655 		if (param) {
656 			param = skip_spaces(param);
657 			if (!*param)
658 				param = NULL;
659 		}
660 	}
661 
662 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
663 
664 	ret = -ENOMEM;
665 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
666 	if (!trigger_data)
667 		goto out;
668 
669 	trigger_data->count = -1;
670 	trigger_data->ops = trigger_ops;
671 	trigger_data->cmd_ops = cmd_ops;
672 	trigger_data->private_data = file;
673 	INIT_LIST_HEAD(&trigger_data->list);
674 	INIT_LIST_HEAD(&trigger_data->named_list);
675 
676 	if (glob[0] == '!') {
677 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
678 		kfree(trigger_data);
679 		ret = 0;
680 		goto out;
681 	}
682 
683 	if (trigger) {
684 		number = strsep(&trigger, ":");
685 
686 		ret = -EINVAL;
687 		if (!strlen(number))
688 			goto out_free;
689 
690 		/*
691 		 * We use the callback data field (which is a pointer)
692 		 * as our counter.
693 		 */
694 		ret = kstrtoul(number, 0, &trigger_data->count);
695 		if (ret)
696 			goto out_free;
697 	}
698 
699 	if (!param) /* if param is non-empty, it's supposed to be a filter */
700 		goto out_reg;
701 
702 	if (!cmd_ops->set_filter)
703 		goto out_reg;
704 
705 	ret = cmd_ops->set_filter(param, trigger_data, file);
706 	if (ret < 0)
707 		goto out_free;
708 
709  out_reg:
710 	/* Up the trigger_data count to make sure reg doesn't free it on failure */
711 	event_trigger_init(trigger_ops, trigger_data);
712 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
713 	/*
714 	 * The above returns on success the # of functions enabled,
715 	 * but if it didn't find any functions it returns zero.
716 	 * Consider no functions a failure too.
717 	 */
718 	if (!ret) {
719 		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
720 		ret = -ENOENT;
721 	} else if (ret > 0)
722 		ret = 0;
723 
724 	/* Down the counter of trigger_data or free it if not used anymore */
725 	event_trigger_free(trigger_ops, trigger_data);
726  out:
727 	return ret;
728 
729  out_free:
730 	if (cmd_ops->set_filter)
731 		cmd_ops->set_filter(NULL, trigger_data, NULL);
732 	kfree(trigger_data);
733 	goto out;
734 }
735 
736 /**
737  * set_trigger_filter - Generic event_command @set_filter implementation
738  * @filter_str: The filter string for the trigger, NULL to remove filter
739  * @trigger_data: Trigger-specific data
740  * @file: The trace_event_file associated with the event
741  *
742  * Common implementation for event command filter parsing and filter
743  * instantiation.
744  *
745  * Usually used directly as the @set_filter method in event command
746  * implementations.
747  *
748  * Also used to remove a filter (if filter_str = NULL).
749  *
750  * Return: 0 on success, errno otherwise
751  */
set_trigger_filter(char * filter_str,struct event_trigger_data * trigger_data,struct trace_event_file * file)752 int set_trigger_filter(char *filter_str,
753 		       struct event_trigger_data *trigger_data,
754 		       struct trace_event_file *file)
755 {
756 	struct event_trigger_data *data = trigger_data;
757 	struct event_filter *filter = NULL, *tmp;
758 	int ret = -EINVAL;
759 	char *s;
760 
761 	if (!filter_str) /* clear the current filter */
762 		goto assign;
763 
764 	s = strsep(&filter_str, " \t");
765 
766 	if (!strlen(s) || strcmp(s, "if") != 0)
767 		goto out;
768 
769 	if (!filter_str)
770 		goto out;
771 
772 	/* The filter is for the 'trigger' event, not the triggered event */
773 	ret = create_event_filter(file->tr, file->event_call,
774 				  filter_str, false, &filter);
775 	/*
776 	 * If create_event_filter() fails, filter still needs to be freed.
777 	 * Which the calling code will do with data->filter.
778 	 */
779  assign:
780 	tmp = rcu_access_pointer(data->filter);
781 
782 	rcu_assign_pointer(data->filter, filter);
783 
784 	if (tmp) {
785 		/* Make sure the call is done with the filter */
786 		tracepoint_synchronize_unregister();
787 		free_event_filter(tmp);
788 	}
789 
790 	kfree(data->filter_str);
791 	data->filter_str = NULL;
792 
793 	if (filter_str) {
794 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
795 		if (!data->filter_str) {
796 			free_event_filter(rcu_access_pointer(data->filter));
797 			data->filter = NULL;
798 			ret = -ENOMEM;
799 		}
800 	}
801  out:
802 	return ret;
803 }
804 
805 static LIST_HEAD(named_triggers);
806 
807 /**
808  * find_named_trigger - Find the common named trigger associated with @name
809  * @name: The name of the set of named triggers to find the common data for
810  *
811  * Named triggers are sets of triggers that share a common set of
812  * trigger data.  The first named trigger registered with a given name
813  * owns the common trigger data that the others subsequently
814  * registered with the same name will reference.  This function
815  * returns the common trigger data associated with that first
816  * registered instance.
817  *
818  * Return: the common trigger data for the given named trigger on
819  * success, NULL otherwise.
820  */
find_named_trigger(const char * name)821 struct event_trigger_data *find_named_trigger(const char *name)
822 {
823 	struct event_trigger_data *data;
824 
825 	if (!name)
826 		return NULL;
827 
828 	list_for_each_entry(data, &named_triggers, named_list) {
829 		if (data->named_data)
830 			continue;
831 		if (strcmp(data->name, name) == 0)
832 			return data;
833 	}
834 
835 	return NULL;
836 }
837 
838 /**
839  * is_named_trigger - determine if a given trigger is a named trigger
840  * @test: The trigger data to test
841  *
842  * Return: true if 'test' is a named trigger, false otherwise.
843  */
is_named_trigger(struct event_trigger_data * test)844 bool is_named_trigger(struct event_trigger_data *test)
845 {
846 	struct event_trigger_data *data;
847 
848 	list_for_each_entry(data, &named_triggers, named_list) {
849 		if (test == data)
850 			return true;
851 	}
852 
853 	return false;
854 }
855 
856 /**
857  * save_named_trigger - save the trigger in the named trigger list
858  * @name: The name of the named trigger set
859  * @data: The trigger data to save
860  *
861  * Return: 0 if successful, negative error otherwise.
862  */
save_named_trigger(const char * name,struct event_trigger_data * data)863 int save_named_trigger(const char *name, struct event_trigger_data *data)
864 {
865 	data->name = kstrdup(name, GFP_KERNEL);
866 	if (!data->name)
867 		return -ENOMEM;
868 
869 	list_add(&data->named_list, &named_triggers);
870 
871 	return 0;
872 }
873 
874 /**
875  * del_named_trigger - delete a trigger from the named trigger list
876  * @data: The trigger data to delete
877  */
del_named_trigger(struct event_trigger_data * data)878 void del_named_trigger(struct event_trigger_data *data)
879 {
880 	kfree(data->name);
881 	data->name = NULL;
882 
883 	list_del(&data->named_list);
884 }
885 
__pause_named_trigger(struct event_trigger_data * data,bool pause)886 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
887 {
888 	struct event_trigger_data *test;
889 
890 	list_for_each_entry(test, &named_triggers, named_list) {
891 		if (strcmp(test->name, data->name) == 0) {
892 			if (pause) {
893 				test->paused_tmp = test->paused;
894 				test->paused = true;
895 			} else {
896 				test->paused = test->paused_tmp;
897 			}
898 		}
899 	}
900 }
901 
902 /**
903  * pause_named_trigger - Pause all named triggers with the same name
904  * @data: The trigger data of a named trigger to pause
905  *
906  * Pauses a named trigger along with all other triggers having the
907  * same name.  Because named triggers share a common set of data,
908  * pausing only one is meaningless, so pausing one named trigger needs
909  * to pause all triggers with the same name.
910  */
pause_named_trigger(struct event_trigger_data * data)911 void pause_named_trigger(struct event_trigger_data *data)
912 {
913 	__pause_named_trigger(data, true);
914 }
915 
916 /**
917  * unpause_named_trigger - Un-pause all named triggers with the same name
918  * @data: The trigger data of a named trigger to unpause
919  *
920  * Un-pauses a named trigger along with all other triggers having the
921  * same name.  Because named triggers share a common set of data,
922  * unpausing only one is meaningless, so unpausing one named trigger
923  * needs to unpause all triggers with the same name.
924  */
unpause_named_trigger(struct event_trigger_data * data)925 void unpause_named_trigger(struct event_trigger_data *data)
926 {
927 	__pause_named_trigger(data, false);
928 }
929 
930 /**
931  * set_named_trigger_data - Associate common named trigger data
932  * @data: The trigger data to associate
933  * @named_data: The common named trigger to be associated
934  *
935  * Named triggers are sets of triggers that share a common set of
936  * trigger data.  The first named trigger registered with a given name
937  * owns the common trigger data that the others subsequently
938  * registered with the same name will reference.  This function
939  * associates the common trigger data from the first trigger with the
940  * given trigger.
941  */
set_named_trigger_data(struct event_trigger_data * data,struct event_trigger_data * named_data)942 void set_named_trigger_data(struct event_trigger_data *data,
943 			    struct event_trigger_data *named_data)
944 {
945 	data->named_data = named_data;
946 }
947 
948 struct event_trigger_data *
get_named_trigger_data(struct event_trigger_data * data)949 get_named_trigger_data(struct event_trigger_data *data)
950 {
951 	return data->named_data;
952 }
953 
954 static void
traceon_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)955 traceon_trigger(struct event_trigger_data *data,
956 		struct trace_buffer *buffer, void *rec,
957 		struct ring_buffer_event *event)
958 {
959 	struct trace_event_file *file = data->private_data;
960 
961 	if (file) {
962 		if (tracer_tracing_is_on(file->tr))
963 			return;
964 
965 		tracer_tracing_on(file->tr);
966 		return;
967 	}
968 
969 	if (tracing_is_on())
970 		return;
971 
972 	tracing_on();
973 }
974 
975 static void
traceon_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)976 traceon_count_trigger(struct event_trigger_data *data,
977 		      struct trace_buffer *buffer, void *rec,
978 		      struct ring_buffer_event *event)
979 {
980 	struct trace_event_file *file = data->private_data;
981 
982 	if (file) {
983 		if (tracer_tracing_is_on(file->tr))
984 			return;
985 	} else {
986 		if (tracing_is_on())
987 			return;
988 	}
989 
990 	if (!data->count)
991 		return;
992 
993 	if (data->count != -1)
994 		(data->count)--;
995 
996 	if (file)
997 		tracer_tracing_on(file->tr);
998 	else
999 		tracing_on();
1000 }
1001 
1002 static void
traceoff_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1003 traceoff_trigger(struct event_trigger_data *data,
1004 		 struct trace_buffer *buffer, void *rec,
1005 		 struct ring_buffer_event *event)
1006 {
1007 	struct trace_event_file *file = data->private_data;
1008 
1009 	if (file) {
1010 		if (!tracer_tracing_is_on(file->tr))
1011 			return;
1012 
1013 		tracer_tracing_off(file->tr);
1014 		return;
1015 	}
1016 
1017 	if (!tracing_is_on())
1018 		return;
1019 
1020 	tracing_off();
1021 }
1022 
1023 static void
traceoff_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1024 traceoff_count_trigger(struct event_trigger_data *data,
1025 		       struct trace_buffer *buffer, void *rec,
1026 		       struct ring_buffer_event *event)
1027 {
1028 	struct trace_event_file *file = data->private_data;
1029 
1030 	if (file) {
1031 		if (!tracer_tracing_is_on(file->tr))
1032 			return;
1033 	} else {
1034 		if (!tracing_is_on())
1035 			return;
1036 	}
1037 
1038 	if (!data->count)
1039 		return;
1040 
1041 	if (data->count != -1)
1042 		(data->count)--;
1043 
1044 	if (file)
1045 		tracer_tracing_off(file->tr);
1046 	else
1047 		tracing_off();
1048 }
1049 
1050 static int
traceon_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1051 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1052 		      struct event_trigger_data *data)
1053 {
1054 	return event_trigger_print("traceon", m, (void *)data->count,
1055 				   data->filter_str);
1056 }
1057 
1058 static int
traceoff_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1059 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1060 		       struct event_trigger_data *data)
1061 {
1062 	return event_trigger_print("traceoff", m, (void *)data->count,
1063 				   data->filter_str);
1064 }
1065 
1066 static struct event_trigger_ops traceon_trigger_ops = {
1067 	.func			= traceon_trigger,
1068 	.print			= traceon_trigger_print,
1069 	.init			= event_trigger_init,
1070 	.free			= event_trigger_free,
1071 };
1072 
1073 static struct event_trigger_ops traceon_count_trigger_ops = {
1074 	.func			= traceon_count_trigger,
1075 	.print			= traceon_trigger_print,
1076 	.init			= event_trigger_init,
1077 	.free			= event_trigger_free,
1078 };
1079 
1080 static struct event_trigger_ops traceoff_trigger_ops = {
1081 	.func			= traceoff_trigger,
1082 	.print			= traceoff_trigger_print,
1083 	.init			= event_trigger_init,
1084 	.free			= event_trigger_free,
1085 };
1086 
1087 static struct event_trigger_ops traceoff_count_trigger_ops = {
1088 	.func			= traceoff_count_trigger,
1089 	.print			= traceoff_trigger_print,
1090 	.init			= event_trigger_init,
1091 	.free			= event_trigger_free,
1092 };
1093 
1094 static struct event_trigger_ops *
onoff_get_trigger_ops(char * cmd,char * param)1095 onoff_get_trigger_ops(char *cmd, char *param)
1096 {
1097 	struct event_trigger_ops *ops;
1098 
1099 	/* we register both traceon and traceoff to this callback */
1100 	if (strcmp(cmd, "traceon") == 0)
1101 		ops = param ? &traceon_count_trigger_ops :
1102 			&traceon_trigger_ops;
1103 	else
1104 		ops = param ? &traceoff_count_trigger_ops :
1105 			&traceoff_trigger_ops;
1106 
1107 	return ops;
1108 }
1109 
1110 static struct event_command trigger_traceon_cmd = {
1111 	.name			= "traceon",
1112 	.trigger_type		= ETT_TRACE_ONOFF,
1113 	.func			= event_trigger_callback,
1114 	.reg			= register_trigger,
1115 	.unreg			= unregister_trigger,
1116 	.get_trigger_ops	= onoff_get_trigger_ops,
1117 	.set_filter		= set_trigger_filter,
1118 };
1119 
1120 static struct event_command trigger_traceoff_cmd = {
1121 	.name			= "traceoff",
1122 	.trigger_type		= ETT_TRACE_ONOFF,
1123 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1124 	.func			= event_trigger_callback,
1125 	.reg			= register_trigger,
1126 	.unreg			= unregister_trigger,
1127 	.get_trigger_ops	= onoff_get_trigger_ops,
1128 	.set_filter		= set_trigger_filter,
1129 };
1130 
1131 #ifdef CONFIG_TRACER_SNAPSHOT
1132 static void
snapshot_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1133 snapshot_trigger(struct event_trigger_data *data,
1134 		 struct trace_buffer *buffer, void *rec,
1135 		 struct ring_buffer_event *event)
1136 {
1137 	struct trace_event_file *file = data->private_data;
1138 
1139 	if (file)
1140 		tracing_snapshot_instance(file->tr);
1141 	else
1142 		tracing_snapshot();
1143 }
1144 
1145 static void
snapshot_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1146 snapshot_count_trigger(struct event_trigger_data *data,
1147 		       struct trace_buffer *buffer, void *rec,
1148 		       struct ring_buffer_event *event)
1149 {
1150 	if (!data->count)
1151 		return;
1152 
1153 	if (data->count != -1)
1154 		(data->count)--;
1155 
1156 	snapshot_trigger(data, buffer, rec, event);
1157 }
1158 
1159 static int
register_snapshot_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1160 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1161 			  struct event_trigger_data *data,
1162 			  struct trace_event_file *file)
1163 {
1164 	int ret = tracing_alloc_snapshot_instance(file->tr);
1165 
1166 	if (ret < 0)
1167 		return ret;
1168 
1169 	return register_trigger(glob, ops, data, file);
1170 }
1171 
1172 static int
snapshot_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1173 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1174 		       struct event_trigger_data *data)
1175 {
1176 	return event_trigger_print("snapshot", m, (void *)data->count,
1177 				   data->filter_str);
1178 }
1179 
1180 static struct event_trigger_ops snapshot_trigger_ops = {
1181 	.func			= snapshot_trigger,
1182 	.print			= snapshot_trigger_print,
1183 	.init			= event_trigger_init,
1184 	.free			= event_trigger_free,
1185 };
1186 
1187 static struct event_trigger_ops snapshot_count_trigger_ops = {
1188 	.func			= snapshot_count_trigger,
1189 	.print			= snapshot_trigger_print,
1190 	.init			= event_trigger_init,
1191 	.free			= event_trigger_free,
1192 };
1193 
1194 static struct event_trigger_ops *
snapshot_get_trigger_ops(char * cmd,char * param)1195 snapshot_get_trigger_ops(char *cmd, char *param)
1196 {
1197 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1198 }
1199 
1200 static struct event_command trigger_snapshot_cmd = {
1201 	.name			= "snapshot",
1202 	.trigger_type		= ETT_SNAPSHOT,
1203 	.func			= event_trigger_callback,
1204 	.reg			= register_snapshot_trigger,
1205 	.unreg			= unregister_trigger,
1206 	.get_trigger_ops	= snapshot_get_trigger_ops,
1207 	.set_filter		= set_trigger_filter,
1208 };
1209 
register_trigger_snapshot_cmd(void)1210 static __init int register_trigger_snapshot_cmd(void)
1211 {
1212 	int ret;
1213 
1214 	ret = register_event_command(&trigger_snapshot_cmd);
1215 	WARN_ON(ret < 0);
1216 
1217 	return ret;
1218 }
1219 #else
register_trigger_snapshot_cmd(void)1220 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1221 #endif /* CONFIG_TRACER_SNAPSHOT */
1222 
1223 #ifdef CONFIG_STACKTRACE
1224 #ifdef CONFIG_UNWINDER_ORC
1225 /* Skip 2:
1226  *   event_triggers_post_call()
1227  *   trace_event_raw_event_xxx()
1228  */
1229 # define STACK_SKIP 2
1230 #else
1231 /*
1232  * Skip 4:
1233  *   stacktrace_trigger()
1234  *   event_triggers_post_call()
1235  *   trace_event_buffer_commit()
1236  *   trace_event_raw_event_xxx()
1237  */
1238 #define STACK_SKIP 4
1239 #endif
1240 
1241 static void
stacktrace_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1242 stacktrace_trigger(struct event_trigger_data *data,
1243 		   struct trace_buffer *buffer,  void *rec,
1244 		   struct ring_buffer_event *event)
1245 {
1246 	struct trace_event_file *file = data->private_data;
1247 
1248 	if (file)
1249 		__trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1250 	else
1251 		trace_dump_stack(STACK_SKIP);
1252 }
1253 
1254 static void
stacktrace_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1255 stacktrace_count_trigger(struct event_trigger_data *data,
1256 			 struct trace_buffer *buffer, void *rec,
1257 			 struct ring_buffer_event *event)
1258 {
1259 	if (!data->count)
1260 		return;
1261 
1262 	if (data->count != -1)
1263 		(data->count)--;
1264 
1265 	stacktrace_trigger(data, buffer, rec, event);
1266 }
1267 
1268 static int
stacktrace_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1269 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1270 			 struct event_trigger_data *data)
1271 {
1272 	return event_trigger_print("stacktrace", m, (void *)data->count,
1273 				   data->filter_str);
1274 }
1275 
1276 static struct event_trigger_ops stacktrace_trigger_ops = {
1277 	.func			= stacktrace_trigger,
1278 	.print			= stacktrace_trigger_print,
1279 	.init			= event_trigger_init,
1280 	.free			= event_trigger_free,
1281 };
1282 
1283 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1284 	.func			= stacktrace_count_trigger,
1285 	.print			= stacktrace_trigger_print,
1286 	.init			= event_trigger_init,
1287 	.free			= event_trigger_free,
1288 };
1289 
1290 static struct event_trigger_ops *
stacktrace_get_trigger_ops(char * cmd,char * param)1291 stacktrace_get_trigger_ops(char *cmd, char *param)
1292 {
1293 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1294 }
1295 
1296 static struct event_command trigger_stacktrace_cmd = {
1297 	.name			= "stacktrace",
1298 	.trigger_type		= ETT_STACKTRACE,
1299 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1300 	.func			= event_trigger_callback,
1301 	.reg			= register_trigger,
1302 	.unreg			= unregister_trigger,
1303 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1304 	.set_filter		= set_trigger_filter,
1305 };
1306 
register_trigger_stacktrace_cmd(void)1307 static __init int register_trigger_stacktrace_cmd(void)
1308 {
1309 	int ret;
1310 
1311 	ret = register_event_command(&trigger_stacktrace_cmd);
1312 	WARN_ON(ret < 0);
1313 
1314 	return ret;
1315 }
1316 #else
register_trigger_stacktrace_cmd(void)1317 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1318 #endif /* CONFIG_STACKTRACE */
1319 
unregister_trigger_traceon_traceoff_cmds(void)1320 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1321 {
1322 	unregister_event_command(&trigger_traceon_cmd);
1323 	unregister_event_command(&trigger_traceoff_cmd);
1324 }
1325 
1326 static void
event_enable_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1327 event_enable_trigger(struct event_trigger_data *data,
1328 		     struct trace_buffer *buffer,  void *rec,
1329 		     struct ring_buffer_event *event)
1330 {
1331 	struct enable_trigger_data *enable_data = data->private_data;
1332 
1333 	if (enable_data->enable)
1334 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1335 	else
1336 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1337 }
1338 
1339 static void
event_enable_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1340 event_enable_count_trigger(struct event_trigger_data *data,
1341 			   struct trace_buffer *buffer,  void *rec,
1342 			   struct ring_buffer_event *event)
1343 {
1344 	struct enable_trigger_data *enable_data = data->private_data;
1345 
1346 	if (!data->count)
1347 		return;
1348 
1349 	/* Skip if the event is in a state we want to switch to */
1350 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1351 		return;
1352 
1353 	if (data->count != -1)
1354 		(data->count)--;
1355 
1356 	event_enable_trigger(data, buffer, rec, event);
1357 }
1358 
event_enable_trigger_print(struct seq_file * m,struct event_trigger_ops * ops,struct event_trigger_data * data)1359 int event_enable_trigger_print(struct seq_file *m,
1360 			       struct event_trigger_ops *ops,
1361 			       struct event_trigger_data *data)
1362 {
1363 	struct enable_trigger_data *enable_data = data->private_data;
1364 
1365 	seq_printf(m, "%s:%s:%s",
1366 		   enable_data->hist ?
1367 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1368 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1369 		   enable_data->file->event_call->class->system,
1370 		   trace_event_name(enable_data->file->event_call));
1371 
1372 	if (data->count == -1)
1373 		seq_puts(m, ":unlimited");
1374 	else
1375 		seq_printf(m, ":count=%ld", data->count);
1376 
1377 	if (data->filter_str)
1378 		seq_printf(m, " if %s\n", data->filter_str);
1379 	else
1380 		seq_putc(m, '\n');
1381 
1382 	return 0;
1383 }
1384 
event_enable_trigger_free(struct event_trigger_ops * ops,struct event_trigger_data * data)1385 void event_enable_trigger_free(struct event_trigger_ops *ops,
1386 			       struct event_trigger_data *data)
1387 {
1388 	struct enable_trigger_data *enable_data = data->private_data;
1389 
1390 	if (WARN_ON_ONCE(data->ref <= 0))
1391 		return;
1392 
1393 	data->ref--;
1394 	if (!data->ref) {
1395 		/* Remove the SOFT_MODE flag */
1396 		trace_event_enable_disable(enable_data->file, 0, 1);
1397 		trace_event_put_ref(enable_data->file->event_call);
1398 		trigger_data_free(data);
1399 		kfree(enable_data);
1400 	}
1401 }
1402 
1403 static struct event_trigger_ops event_enable_trigger_ops = {
1404 	.func			= event_enable_trigger,
1405 	.print			= event_enable_trigger_print,
1406 	.init			= event_trigger_init,
1407 	.free			= event_enable_trigger_free,
1408 };
1409 
1410 static struct event_trigger_ops event_enable_count_trigger_ops = {
1411 	.func			= event_enable_count_trigger,
1412 	.print			= event_enable_trigger_print,
1413 	.init			= event_trigger_init,
1414 	.free			= event_enable_trigger_free,
1415 };
1416 
1417 static struct event_trigger_ops event_disable_trigger_ops = {
1418 	.func			= event_enable_trigger,
1419 	.print			= event_enable_trigger_print,
1420 	.init			= event_trigger_init,
1421 	.free			= event_enable_trigger_free,
1422 };
1423 
1424 static struct event_trigger_ops event_disable_count_trigger_ops = {
1425 	.func			= event_enable_count_trigger,
1426 	.print			= event_enable_trigger_print,
1427 	.init			= event_trigger_init,
1428 	.free			= event_enable_trigger_free,
1429 };
1430 
event_enable_trigger_func(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param)1431 int event_enable_trigger_func(struct event_command *cmd_ops,
1432 			      struct trace_event_file *file,
1433 			      char *glob, char *cmd, char *param)
1434 {
1435 	struct trace_event_file *event_enable_file;
1436 	struct enable_trigger_data *enable_data;
1437 	struct event_trigger_data *trigger_data;
1438 	struct event_trigger_ops *trigger_ops;
1439 	struct trace_array *tr = file->tr;
1440 	const char *system;
1441 	const char *event;
1442 	bool hist = false;
1443 	char *trigger;
1444 	char *number;
1445 	bool enable;
1446 	int ret;
1447 
1448 	if (!param)
1449 		return -EINVAL;
1450 
1451 	/* separate the trigger from the filter (s:e:n [if filter]) */
1452 	trigger = strsep(&param, " \t");
1453 	if (!trigger)
1454 		return -EINVAL;
1455 	if (param) {
1456 		param = skip_spaces(param);
1457 		if (!*param)
1458 			param = NULL;
1459 	}
1460 
1461 	system = strsep(&trigger, ":");
1462 	if (!trigger)
1463 		return -EINVAL;
1464 
1465 	event = strsep(&trigger, ":");
1466 
1467 	ret = -EINVAL;
1468 	event_enable_file = find_event_file(tr, system, event);
1469 	if (!event_enable_file)
1470 		goto out;
1471 
1472 #ifdef CONFIG_HIST_TRIGGERS
1473 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1474 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1475 
1476 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1477 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1478 #else
1479 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1480 #endif
1481 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1482 
1483 	ret = -ENOMEM;
1484 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1485 	if (!trigger_data)
1486 		goto out;
1487 
1488 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1489 	if (!enable_data) {
1490 		kfree(trigger_data);
1491 		goto out;
1492 	}
1493 
1494 	trigger_data->count = -1;
1495 	trigger_data->ops = trigger_ops;
1496 	trigger_data->cmd_ops = cmd_ops;
1497 	INIT_LIST_HEAD(&trigger_data->list);
1498 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1499 
1500 	enable_data->hist = hist;
1501 	enable_data->enable = enable;
1502 	enable_data->file = event_enable_file;
1503 	trigger_data->private_data = enable_data;
1504 
1505 	if (glob[0] == '!') {
1506 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1507 		kfree(trigger_data);
1508 		kfree(enable_data);
1509 		ret = 0;
1510 		goto out;
1511 	}
1512 
1513 	/* Up the trigger_data count to make sure nothing frees it on failure */
1514 	event_trigger_init(trigger_ops, trigger_data);
1515 
1516 	if (trigger) {
1517 		number = strsep(&trigger, ":");
1518 
1519 		ret = -EINVAL;
1520 		if (!strlen(number))
1521 			goto out_free;
1522 
1523 		/*
1524 		 * We use the callback data field (which is a pointer)
1525 		 * as our counter.
1526 		 */
1527 		ret = kstrtoul(number, 0, &trigger_data->count);
1528 		if (ret)
1529 			goto out_free;
1530 	}
1531 
1532 	if (!param) /* if param is non-empty, it's supposed to be a filter */
1533 		goto out_reg;
1534 
1535 	if (!cmd_ops->set_filter)
1536 		goto out_reg;
1537 
1538 	ret = cmd_ops->set_filter(param, trigger_data, file);
1539 	if (ret < 0)
1540 		goto out_free;
1541 
1542  out_reg:
1543 	/* Don't let event modules unload while probe registered */
1544 	ret = trace_event_try_get_ref(event_enable_file->event_call);
1545 	if (!ret) {
1546 		ret = -EBUSY;
1547 		goto out_free;
1548 	}
1549 
1550 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1551 	if (ret < 0)
1552 		goto out_put;
1553 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1554 	/*
1555 	 * The above returns on success the # of functions enabled,
1556 	 * but if it didn't find any functions it returns zero.
1557 	 * Consider no functions a failure too.
1558 	 */
1559 	if (!ret) {
1560 		ret = -ENOENT;
1561 		goto out_disable;
1562 	} else if (ret < 0)
1563 		goto out_disable;
1564 	/* Just return zero, not the number of enabled functions */
1565 	ret = 0;
1566 	event_trigger_free(trigger_ops, trigger_data);
1567  out:
1568 	return ret;
1569 
1570  out_disable:
1571 	trace_event_enable_disable(event_enable_file, 0, 1);
1572  out_put:
1573 	trace_event_put_ref(event_enable_file->event_call);
1574  out_free:
1575 	if (cmd_ops->set_filter)
1576 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1577 	event_trigger_free(trigger_ops, trigger_data);
1578 	kfree(enable_data);
1579 	goto out;
1580 }
1581 
event_enable_register_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * data,struct trace_event_file * file)1582 int event_enable_register_trigger(char *glob,
1583 				  struct event_trigger_ops *ops,
1584 				  struct event_trigger_data *data,
1585 				  struct trace_event_file *file)
1586 {
1587 	struct enable_trigger_data *enable_data = data->private_data;
1588 	struct enable_trigger_data *test_enable_data;
1589 	struct event_trigger_data *test;
1590 	int ret = 0;
1591 
1592 	lockdep_assert_held(&event_mutex);
1593 
1594 	list_for_each_entry(test, &file->triggers, list) {
1595 		test_enable_data = test->private_data;
1596 		if (test_enable_data &&
1597 		    (test->cmd_ops->trigger_type ==
1598 		     data->cmd_ops->trigger_type) &&
1599 		    (test_enable_data->file == enable_data->file)) {
1600 			ret = -EEXIST;
1601 			goto out;
1602 		}
1603 	}
1604 
1605 	if (data->ops->init) {
1606 		ret = data->ops->init(data->ops, data);
1607 		if (ret < 0)
1608 			goto out;
1609 	}
1610 
1611 	list_add_rcu(&data->list, &file->triggers);
1612 	ret++;
1613 
1614 	update_cond_flag(file);
1615 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1616 		list_del_rcu(&data->list);
1617 		update_cond_flag(file);
1618 		ret--;
1619 	}
1620 out:
1621 	return ret;
1622 }
1623 
event_enable_unregister_trigger(char * glob,struct event_trigger_ops * ops,struct event_trigger_data * test,struct trace_event_file * file)1624 void event_enable_unregister_trigger(char *glob,
1625 				     struct event_trigger_ops *ops,
1626 				     struct event_trigger_data *test,
1627 				     struct trace_event_file *file)
1628 {
1629 	struct enable_trigger_data *test_enable_data = test->private_data;
1630 	struct enable_trigger_data *enable_data;
1631 	struct event_trigger_data *data;
1632 	bool unregistered = false;
1633 
1634 	lockdep_assert_held(&event_mutex);
1635 
1636 	list_for_each_entry(data, &file->triggers, list) {
1637 		enable_data = data->private_data;
1638 		if (enable_data &&
1639 		    (data->cmd_ops->trigger_type ==
1640 		     test->cmd_ops->trigger_type) &&
1641 		    (enable_data->file == test_enable_data->file)) {
1642 			unregistered = true;
1643 			list_del_rcu(&data->list);
1644 			trace_event_trigger_enable_disable(file, 0);
1645 			update_cond_flag(file);
1646 			break;
1647 		}
1648 	}
1649 
1650 	if (unregistered && data->ops->free)
1651 		data->ops->free(data->ops, data);
1652 }
1653 
1654 static struct event_trigger_ops *
event_enable_get_trigger_ops(char * cmd,char * param)1655 event_enable_get_trigger_ops(char *cmd, char *param)
1656 {
1657 	struct event_trigger_ops *ops;
1658 	bool enable;
1659 
1660 #ifdef CONFIG_HIST_TRIGGERS
1661 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1662 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1663 #else
1664 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1665 #endif
1666 	if (enable)
1667 		ops = param ? &event_enable_count_trigger_ops :
1668 			&event_enable_trigger_ops;
1669 	else
1670 		ops = param ? &event_disable_count_trigger_ops :
1671 			&event_disable_trigger_ops;
1672 
1673 	return ops;
1674 }
1675 
1676 static struct event_command trigger_enable_cmd = {
1677 	.name			= ENABLE_EVENT_STR,
1678 	.trigger_type		= ETT_EVENT_ENABLE,
1679 	.func			= event_enable_trigger_func,
1680 	.reg			= event_enable_register_trigger,
1681 	.unreg			= event_enable_unregister_trigger,
1682 	.get_trigger_ops	= event_enable_get_trigger_ops,
1683 	.set_filter		= set_trigger_filter,
1684 };
1685 
1686 static struct event_command trigger_disable_cmd = {
1687 	.name			= DISABLE_EVENT_STR,
1688 	.trigger_type		= ETT_EVENT_ENABLE,
1689 	.func			= event_enable_trigger_func,
1690 	.reg			= event_enable_register_trigger,
1691 	.unreg			= event_enable_unregister_trigger,
1692 	.get_trigger_ops	= event_enable_get_trigger_ops,
1693 	.set_filter		= set_trigger_filter,
1694 };
1695 
unregister_trigger_enable_disable_cmds(void)1696 static __init void unregister_trigger_enable_disable_cmds(void)
1697 {
1698 	unregister_event_command(&trigger_enable_cmd);
1699 	unregister_event_command(&trigger_disable_cmd);
1700 }
1701 
register_trigger_enable_disable_cmds(void)1702 static __init int register_trigger_enable_disable_cmds(void)
1703 {
1704 	int ret;
1705 
1706 	ret = register_event_command(&trigger_enable_cmd);
1707 	if (WARN_ON(ret < 0))
1708 		return ret;
1709 	ret = register_event_command(&trigger_disable_cmd);
1710 	if (WARN_ON(ret < 0))
1711 		unregister_trigger_enable_disable_cmds();
1712 
1713 	return ret;
1714 }
1715 
register_trigger_traceon_traceoff_cmds(void)1716 static __init int register_trigger_traceon_traceoff_cmds(void)
1717 {
1718 	int ret;
1719 
1720 	ret = register_event_command(&trigger_traceon_cmd);
1721 	if (WARN_ON(ret < 0))
1722 		return ret;
1723 	ret = register_event_command(&trigger_traceoff_cmd);
1724 	if (WARN_ON(ret < 0))
1725 		unregister_trigger_traceon_traceoff_cmds();
1726 
1727 	return ret;
1728 }
1729 
register_trigger_cmds(void)1730 __init int register_trigger_cmds(void)
1731 {
1732 	register_trigger_traceon_traceoff_cmds();
1733 	register_trigger_snapshot_cmd();
1734 	register_trigger_stacktrace_cmd();
1735 	register_trigger_enable_disable_cmds();
1736 	register_trigger_hist_enable_disable_cmds();
1737 	register_trigger_hist_cmd();
1738 
1739 	return 0;
1740 }
1741