• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2008-2014 Mathieu Desnoyers
4  */
5 #include <linux/module.h>
6 #include <linux/mutex.h>
7 #include <linux/types.h>
8 #include <linux/jhash.h>
9 #include <linux/list.h>
10 #include <linux/rcupdate.h>
11 #include <linux/tracepoint.h>
12 #include <linux/err.h>
13 #include <linux/slab.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task.h>
16 #include <linux/static_key.h>
17 
18 enum tp_func_state {
19 	TP_FUNC_0,
20 	TP_FUNC_1,
21 	TP_FUNC_2,
22 	TP_FUNC_N,
23 };
24 
25 extern tracepoint_ptr_t __start___tracepoints_ptrs[];
26 extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
27 
28 DEFINE_SRCU(tracepoint_srcu);
29 EXPORT_SYMBOL_GPL(tracepoint_srcu);
30 
31 enum tp_transition_sync {
32 	TP_TRANSITION_SYNC_1_0_1,
33 	TP_TRANSITION_SYNC_N_2_1,
34 
35 	_NR_TP_TRANSITION_SYNC,
36 };
37 
38 struct tp_transition_snapshot {
39 	unsigned long rcu;
40 	unsigned long srcu;
41 	bool ongoing;
42 };
43 
44 /* Protected by tracepoints_mutex */
45 static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
46 
tp_rcu_get_state(enum tp_transition_sync sync)47 static void tp_rcu_get_state(enum tp_transition_sync sync)
48 {
49 	struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
50 
51 	/* Keep the latest get_state snapshot. */
52 	snapshot->rcu = get_state_synchronize_rcu();
53 	snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
54 	snapshot->ongoing = true;
55 }
56 
tp_rcu_cond_sync(enum tp_transition_sync sync)57 static void tp_rcu_cond_sync(enum tp_transition_sync sync)
58 {
59 	struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
60 
61 	if (!snapshot->ongoing)
62 		return;
63 	cond_synchronize_rcu(snapshot->rcu);
64 	if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
65 		synchronize_srcu(&tracepoint_srcu);
66 	snapshot->ongoing = false;
67 }
68 
69 /* Set to 1 to enable tracepoint debug output */
70 static const int tracepoint_debug;
71 
72 #ifdef CONFIG_MODULES
73 /*
74  * Tracepoint module list mutex protects the local module list.
75  */
76 static DEFINE_MUTEX(tracepoint_module_list_mutex);
77 
78 /* Local list of struct tp_module */
79 static LIST_HEAD(tracepoint_module_list);
80 #endif /* CONFIG_MODULES */
81 
82 /*
83  * tracepoints_mutex protects the builtin and module tracepoints.
84  * tracepoints_mutex nests inside tracepoint_module_list_mutex.
85  */
86 static DEFINE_MUTEX(tracepoints_mutex);
87 
88 static struct rcu_head *early_probes;
89 static bool ok_to_free_tracepoints;
90 
91 /*
92  * Note about RCU :
93  * It is used to delay the free of multiple probes array until a quiescent
94  * state is reached.
95  */
96 struct tp_probes {
97 	struct rcu_head rcu;
98 	struct tracepoint_func probes[];
99 };
100 
101 /* Called in removal of a func but failed to allocate a new tp_funcs */
tp_stub_func(void)102 static void tp_stub_func(void)
103 {
104 	return;
105 }
106 
allocate_probes(int count)107 static inline void *allocate_probes(int count)
108 {
109 	struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
110 				       GFP_KERNEL);
111 	return p == NULL ? NULL : p->probes;
112 }
113 
srcu_free_old_probes(struct rcu_head * head)114 static void srcu_free_old_probes(struct rcu_head *head)
115 {
116 	kfree(container_of(head, struct tp_probes, rcu));
117 }
118 
rcu_free_old_probes(struct rcu_head * head)119 static void rcu_free_old_probes(struct rcu_head *head)
120 {
121 	call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
122 }
123 
release_early_probes(void)124 static __init int release_early_probes(void)
125 {
126 	struct rcu_head *tmp;
127 
128 	ok_to_free_tracepoints = true;
129 
130 	while (early_probes) {
131 		tmp = early_probes;
132 		early_probes = tmp->next;
133 		call_rcu(tmp, rcu_free_old_probes);
134 	}
135 
136 	return 0;
137 }
138 
139 /* SRCU is initialized at core_initcall */
140 postcore_initcall(release_early_probes);
141 
release_probes(struct tracepoint_func * old)142 static inline void release_probes(struct tracepoint_func *old)
143 {
144 	if (old) {
145 		struct tp_probes *tp_probes = container_of(old,
146 			struct tp_probes, probes[0]);
147 
148 		/*
149 		 * We can't free probes if SRCU is not initialized yet.
150 		 * Postpone the freeing till after SRCU is initialized.
151 		 */
152 		if (unlikely(!ok_to_free_tracepoints)) {
153 			tp_probes->rcu.next = early_probes;
154 			early_probes = &tp_probes->rcu;
155 			return;
156 		}
157 
158 		/*
159 		 * Tracepoint probes are protected by both sched RCU and SRCU,
160 		 * by calling the SRCU callback in the sched RCU callback we
161 		 * cover both cases. So let us chain the SRCU and sched RCU
162 		 * callbacks to wait for both grace periods.
163 		 */
164 		call_rcu(&tp_probes->rcu, rcu_free_old_probes);
165 	}
166 }
167 
debug_print_probes(struct tracepoint_func * funcs)168 static void debug_print_probes(struct tracepoint_func *funcs)
169 {
170 	int i;
171 
172 	if (!tracepoint_debug || !funcs)
173 		return;
174 
175 	for (i = 0; funcs[i].func; i++)
176 		printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
177 }
178 
179 static struct tracepoint_func *
func_add(struct tracepoint_func ** funcs,struct tracepoint_func * tp_func,int prio)180 func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
181 	 int prio)
182 {
183 	struct tracepoint_func *old, *new;
184 	int nr_probes = 0;
185 	int stub_funcs = 0;
186 	int pos = -1;
187 
188 	if (WARN_ON(!tp_func->func))
189 		return ERR_PTR(-EINVAL);
190 
191 	debug_print_probes(*funcs);
192 	old = *funcs;
193 	if (old) {
194 		/* (N -> N+1), (N != 0, 1) probes */
195 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
196 			/* Insert before probes of lower priority */
197 			if (pos < 0 && old[nr_probes].prio < prio)
198 				pos = nr_probes;
199 			if (old[nr_probes].func == tp_func->func &&
200 			    old[nr_probes].data == tp_func->data)
201 				return ERR_PTR(-EEXIST);
202 			if (old[nr_probes].func == tp_stub_func)
203 				stub_funcs++;
204 		}
205 	}
206 	/* + 2 : one for new probe, one for NULL func - stub functions */
207 	new = allocate_probes(nr_probes + 2 - stub_funcs);
208 	if (new == NULL)
209 		return ERR_PTR(-ENOMEM);
210 	if (old) {
211 		if (stub_funcs) {
212 			/* Need to copy one at a time to remove stubs */
213 			int probes = 0;
214 
215 			pos = -1;
216 			for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
217 				if (old[nr_probes].func == tp_stub_func)
218 					continue;
219 				if (pos < 0 && old[nr_probes].prio < prio)
220 					pos = probes++;
221 				new[probes++] = old[nr_probes];
222 			}
223 			nr_probes = probes;
224 			if (pos < 0)
225 				pos = probes;
226 			else
227 				nr_probes--; /* Account for insertion */
228 
229 		} else if (pos < 0) {
230 			pos = nr_probes;
231 			memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
232 		} else {
233 			/* Copy higher priority probes ahead of the new probe */
234 			memcpy(new, old, pos * sizeof(struct tracepoint_func));
235 			/* Copy the rest after it. */
236 			memcpy(new + pos + 1, old + pos,
237 			       (nr_probes - pos) * sizeof(struct tracepoint_func));
238 		}
239 	} else
240 		pos = 0;
241 	new[pos] = *tp_func;
242 	new[nr_probes + 1].func = NULL;
243 	*funcs = new;
244 	debug_print_probes(*funcs);
245 	return old;
246 }
247 
func_remove(struct tracepoint_func ** funcs,struct tracepoint_func * tp_func)248 static void *func_remove(struct tracepoint_func **funcs,
249 		struct tracepoint_func *tp_func)
250 {
251 	int nr_probes = 0, nr_del = 0, i;
252 	struct tracepoint_func *old, *new;
253 
254 	old = *funcs;
255 
256 	if (!old)
257 		return ERR_PTR(-ENOENT);
258 
259 	debug_print_probes(*funcs);
260 	/* (N -> M), (N > 1, M >= 0) probes */
261 	if (tp_func->func) {
262 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
263 			if ((old[nr_probes].func == tp_func->func &&
264 			     old[nr_probes].data == tp_func->data) ||
265 			    old[nr_probes].func == tp_stub_func)
266 				nr_del++;
267 		}
268 	}
269 
270 	/*
271 	 * If probe is NULL, then nr_probes = nr_del = 0, and then the
272 	 * entire entry will be removed.
273 	 */
274 	if (nr_probes - nr_del == 0) {
275 		/* N -> 0, (N > 1) */
276 		*funcs = NULL;
277 		debug_print_probes(*funcs);
278 		return old;
279 	} else {
280 		int j = 0;
281 		/* N -> M, (N > 1, M > 0) */
282 		/* + 1 for NULL */
283 		new = allocate_probes(nr_probes - nr_del + 1);
284 		if (new) {
285 			for (i = 0; old[i].func; i++)
286 				if ((old[i].func != tp_func->func
287 				     || old[i].data != tp_func->data)
288 				    && old[i].func != tp_stub_func)
289 					new[j++] = old[i];
290 			new[nr_probes - nr_del].func = NULL;
291 			*funcs = new;
292 		} else {
293 			/*
294 			 * Failed to allocate, replace the old function
295 			 * with calls to tp_stub_func.
296 			 */
297 			for (i = 0; old[i].func; i++)
298 				if (old[i].func == tp_func->func &&
299 				    old[i].data == tp_func->data) {
300 					old[i].func = tp_stub_func;
301 					/* Set the prio to the next event. */
302 					if (old[i + 1].func)
303 						old[i].prio =
304 							old[i + 1].prio;
305 					else
306 						old[i].prio = -1;
307 				}
308 			*funcs = old;
309 		}
310 	}
311 	debug_print_probes(*funcs);
312 	return old;
313 }
314 
315 /*
316  * Count the number of functions (enum tp_func_state) in a tp_funcs array.
317  */
nr_func_state(const struct tracepoint_func * tp_funcs)318 static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
319 {
320 	if (!tp_funcs)
321 		return TP_FUNC_0;
322 	if (!tp_funcs[1].func)
323 		return TP_FUNC_1;
324 	if (!tp_funcs[2].func)
325 		return TP_FUNC_2;
326 	return TP_FUNC_N;	/* 3 or more */
327 }
328 
tracepoint_update_call(struct tracepoint * tp,struct tracepoint_func * tp_funcs)329 static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
330 {
331 	void *func = tp->iterator;
332 
333 	/* Synthetic events do not have static call sites */
334 	if (!tp->static_call_key)
335 		return;
336 	if (nr_func_state(tp_funcs) == TP_FUNC_1)
337 		func = tp_funcs[0].func;
338 	__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
339 }
340 
341 /*
342  * Add the probe function to a tracepoint.
343  */
tracepoint_add_func(struct tracepoint * tp,struct tracepoint_func * func,int prio,bool warn)344 static int tracepoint_add_func(struct tracepoint *tp,
345 			       struct tracepoint_func *func, int prio,
346 			       bool warn)
347 {
348 	struct tracepoint_func *old, *tp_funcs;
349 	int ret;
350 
351 	if (tp->regfunc && !static_key_enabled(&tp->key)) {
352 		ret = tp->regfunc();
353 		if (ret < 0)
354 			return ret;
355 	}
356 
357 	tp_funcs = rcu_dereference_protected(tp->funcs,
358 			lockdep_is_held(&tracepoints_mutex));
359 	old = func_add(&tp_funcs, func, prio);
360 	if (IS_ERR(old)) {
361 		WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
362 		return PTR_ERR(old);
363 	}
364 
365 	/*
366 	 * rcu_assign_pointer has as smp_store_release() which makes sure
367 	 * that the new probe callbacks array is consistent before setting
368 	 * a pointer to it.  This array is referenced by __DO_TRACE from
369 	 * include/linux/tracepoint.h using rcu_dereference_sched().
370 	 */
371 	switch (nr_func_state(tp_funcs)) {
372 	case TP_FUNC_1:		/* 0->1 */
373 		/*
374 		 * Make sure new static func never uses old data after a
375 		 * 1->0->1 transition sequence.
376 		 */
377 		tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
378 		/* Set static call to first function */
379 		tracepoint_update_call(tp, tp_funcs);
380 		/* Both iterator and static call handle NULL tp->funcs */
381 		rcu_assign_pointer(tp->funcs, tp_funcs);
382 		static_key_enable(&tp->key);
383 		break;
384 	case TP_FUNC_2:		/* 1->2 */
385 		/* Set iterator static call */
386 		tracepoint_update_call(tp, tp_funcs);
387 		/*
388 		 * Iterator callback installed before updating tp->funcs.
389 		 * Requires ordering between RCU assign/dereference and
390 		 * static call update/call.
391 		 */
392 		fallthrough;
393 	case TP_FUNC_N:		/* N->N+1 (N>1) */
394 		rcu_assign_pointer(tp->funcs, tp_funcs);
395 		/*
396 		 * Make sure static func never uses incorrect data after a
397 		 * N->...->2->1 (N>1) transition sequence.
398 		 */
399 		if (tp_funcs[0].data != old[0].data)
400 			tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
401 		break;
402 	default:
403 		WARN_ON_ONCE(1);
404 		break;
405 	}
406 
407 	release_probes(old);
408 	return 0;
409 }
410 
411 /*
412  * Remove a probe function from a tracepoint.
413  * Note: only waiting an RCU period after setting elem->call to the empty
414  * function insures that the original callback is not used anymore. This insured
415  * by preempt_disable around the call site.
416  */
tracepoint_remove_func(struct tracepoint * tp,struct tracepoint_func * func)417 static int tracepoint_remove_func(struct tracepoint *tp,
418 		struct tracepoint_func *func)
419 {
420 	struct tracepoint_func *old, *tp_funcs;
421 
422 	tp_funcs = rcu_dereference_protected(tp->funcs,
423 			lockdep_is_held(&tracepoints_mutex));
424 	old = func_remove(&tp_funcs, func);
425 	if (WARN_ON_ONCE(IS_ERR(old)))
426 		return PTR_ERR(old);
427 
428 	if (tp_funcs == old)
429 		/* Failed allocating new tp_funcs, replaced func with stub */
430 		return 0;
431 
432 	switch (nr_func_state(tp_funcs)) {
433 	case TP_FUNC_0:		/* 1->0 */
434 		/* Removed last function */
435 		if (tp->unregfunc && static_key_enabled(&tp->key))
436 			tp->unregfunc();
437 
438 		static_key_disable(&tp->key);
439 		/* Set iterator static call */
440 		tracepoint_update_call(tp, tp_funcs);
441 		/* Both iterator and static call handle NULL tp->funcs */
442 		rcu_assign_pointer(tp->funcs, NULL);
443 		/*
444 		 * Make sure new static func never uses old data after a
445 		 * 1->0->1 transition sequence.
446 		 */
447 		tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
448 		break;
449 	case TP_FUNC_1:		/* 2->1 */
450 		rcu_assign_pointer(tp->funcs, tp_funcs);
451 		/*
452 		 * Make sure static func never uses incorrect data after a
453 		 * N->...->2->1 (N>2) transition sequence. If the first
454 		 * element's data has changed, then force the synchronization
455 		 * to prevent current readers that have loaded the old data
456 		 * from calling the new function.
457 		 */
458 		if (tp_funcs[0].data != old[0].data)
459 			tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
460 		tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
461 		/* Set static call to first function */
462 		tracepoint_update_call(tp, tp_funcs);
463 		break;
464 	case TP_FUNC_2:		/* N->N-1 (N>2) */
465 		fallthrough;
466 	case TP_FUNC_N:
467 		rcu_assign_pointer(tp->funcs, tp_funcs);
468 		/*
469 		 * Make sure static func never uses incorrect data after a
470 		 * N->...->2->1 (N>2) transition sequence.
471 		 */
472 		if (tp_funcs[0].data != old[0].data)
473 			tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
474 		break;
475 	default:
476 		WARN_ON_ONCE(1);
477 		break;
478 	}
479 	release_probes(old);
480 	return 0;
481 }
482 
483 /**
484  * tracepoint_probe_register_prio_may_exist -  Connect a probe to a tracepoint with priority
485  * @tp: tracepoint
486  * @probe: probe handler
487  * @data: tracepoint data
488  * @prio: priority of this function over other registered functions
489  *
490  * Same as tracepoint_probe_register_prio() except that it will not warn
491  * if the tracepoint is already registered.
492  */
tracepoint_probe_register_prio_may_exist(struct tracepoint * tp,void * probe,void * data,int prio)493 int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
494 					     void *data, int prio)
495 {
496 	struct tracepoint_func tp_func;
497 	int ret;
498 
499 	mutex_lock(&tracepoints_mutex);
500 	tp_func.func = probe;
501 	tp_func.data = data;
502 	tp_func.prio = prio;
503 	ret = tracepoint_add_func(tp, &tp_func, prio, false);
504 	mutex_unlock(&tracepoints_mutex);
505 	return ret;
506 }
507 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
508 
509 /**
510  * tracepoint_probe_register_prio -  Connect a probe to a tracepoint with priority
511  * @tp: tracepoint
512  * @probe: probe handler
513  * @data: tracepoint data
514  * @prio: priority of this function over other registered functions
515  *
516  * Returns 0 if ok, error value on error.
517  * Note: if @tp is within a module, the caller is responsible for
518  * unregistering the probe before the module is gone. This can be
519  * performed either with a tracepoint module going notifier, or from
520  * within module exit functions.
521  */
tracepoint_probe_register_prio(struct tracepoint * tp,void * probe,void * data,int prio)522 int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
523 				   void *data, int prio)
524 {
525 	struct tracepoint_func tp_func;
526 	int ret;
527 
528 	mutex_lock(&tracepoints_mutex);
529 	tp_func.func = probe;
530 	tp_func.data = data;
531 	tp_func.prio = prio;
532 	ret = tracepoint_add_func(tp, &tp_func, prio, true);
533 	mutex_unlock(&tracepoints_mutex);
534 	return ret;
535 }
536 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
537 
538 /**
539  * tracepoint_probe_register -  Connect a probe to a tracepoint
540  * @tp: tracepoint
541  * @probe: probe handler
542  * @data: tracepoint data
543  *
544  * Returns 0 if ok, error value on error.
545  * Note: if @tp is within a module, the caller is responsible for
546  * unregistering the probe before the module is gone. This can be
547  * performed either with a tracepoint module going notifier, or from
548  * within module exit functions.
549  */
tracepoint_probe_register(struct tracepoint * tp,void * probe,void * data)550 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
551 {
552 	return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
553 }
554 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
555 
556 /**
557  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
558  * @tp: tracepoint
559  * @probe: probe function pointer
560  * @data: tracepoint data
561  *
562  * Returns 0 if ok, error value on error.
563  */
tracepoint_probe_unregister(struct tracepoint * tp,void * probe,void * data)564 int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
565 {
566 	struct tracepoint_func tp_func;
567 	int ret;
568 
569 	mutex_lock(&tracepoints_mutex);
570 	tp_func.func = probe;
571 	tp_func.data = data;
572 	ret = tracepoint_remove_func(tp, &tp_func);
573 	mutex_unlock(&tracepoints_mutex);
574 	return ret;
575 }
576 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
577 
for_each_tracepoint_range(tracepoint_ptr_t * begin,tracepoint_ptr_t * end,void (* fct)(struct tracepoint * tp,void * priv),void * priv)578 static void for_each_tracepoint_range(
579 		tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
580 		void (*fct)(struct tracepoint *tp, void *priv),
581 		void *priv)
582 {
583 	tracepoint_ptr_t *iter;
584 
585 	if (!begin)
586 		return;
587 	for (iter = begin; iter < end; iter++)
588 		fct(tracepoint_ptr_deref(iter), priv);
589 }
590 
591 #ifdef CONFIG_MODULES
trace_module_has_bad_taint(struct module * mod)592 bool trace_module_has_bad_taint(struct module *mod)
593 {
594 	return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
595 			       (1 << TAINT_UNSIGNED_MODULE));
596 }
597 
598 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
599 
600 /**
601  * register_tracepoint_notifier - register tracepoint coming/going notifier
602  * @nb: notifier block
603  *
604  * Notifiers registered with this function are called on module
605  * coming/going with the tracepoint_module_list_mutex held.
606  * The notifier block callback should expect a "struct tp_module" data
607  * pointer.
608  */
register_tracepoint_module_notifier(struct notifier_block * nb)609 int register_tracepoint_module_notifier(struct notifier_block *nb)
610 {
611 	struct tp_module *tp_mod;
612 	int ret;
613 
614 	mutex_lock(&tracepoint_module_list_mutex);
615 	ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
616 	if (ret)
617 		goto end;
618 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
619 		(void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
620 end:
621 	mutex_unlock(&tracepoint_module_list_mutex);
622 	return ret;
623 }
624 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
625 
626 /**
627  * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
628  * @nb: notifier block
629  *
630  * The notifier block callback should expect a "struct tp_module" data
631  * pointer.
632  */
unregister_tracepoint_module_notifier(struct notifier_block * nb)633 int unregister_tracepoint_module_notifier(struct notifier_block *nb)
634 {
635 	struct tp_module *tp_mod;
636 	int ret;
637 
638 	mutex_lock(&tracepoint_module_list_mutex);
639 	ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
640 	if (ret)
641 		goto end;
642 	list_for_each_entry(tp_mod, &tracepoint_module_list, list)
643 		(void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
644 end:
645 	mutex_unlock(&tracepoint_module_list_mutex);
646 	return ret;
647 
648 }
649 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
650 
651 /*
652  * Ensure the tracer unregistered the module's probes before the module
653  * teardown is performed. Prevents leaks of probe and data pointers.
654  */
tp_module_going_check_quiescent(struct tracepoint * tp,void * priv)655 static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
656 {
657 	WARN_ON_ONCE(tp->funcs);
658 }
659 
tracepoint_module_coming(struct module * mod)660 static int tracepoint_module_coming(struct module *mod)
661 {
662 	struct tp_module *tp_mod;
663 	int ret = 0;
664 
665 	if (!mod->num_tracepoints)
666 		return 0;
667 
668 	/*
669 	 * We skip modules that taint the kernel, especially those with different
670 	 * module headers (for forced load), to make sure we don't cause a crash.
671 	 * Staging, out-of-tree, and unsigned GPL modules are fine.
672 	 */
673 	if (trace_module_has_bad_taint(mod))
674 		return 0;
675 	mutex_lock(&tracepoint_module_list_mutex);
676 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
677 	if (!tp_mod) {
678 		ret = -ENOMEM;
679 		goto end;
680 	}
681 	tp_mod->mod = mod;
682 	list_add_tail(&tp_mod->list, &tracepoint_module_list);
683 	blocking_notifier_call_chain(&tracepoint_notify_list,
684 			MODULE_STATE_COMING, tp_mod);
685 end:
686 	mutex_unlock(&tracepoint_module_list_mutex);
687 	return ret;
688 }
689 
tracepoint_module_going(struct module * mod)690 static void tracepoint_module_going(struct module *mod)
691 {
692 	struct tp_module *tp_mod;
693 
694 	if (!mod->num_tracepoints)
695 		return;
696 
697 	mutex_lock(&tracepoint_module_list_mutex);
698 	list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
699 		if (tp_mod->mod == mod) {
700 			blocking_notifier_call_chain(&tracepoint_notify_list,
701 					MODULE_STATE_GOING, tp_mod);
702 			list_del(&tp_mod->list);
703 			kfree(tp_mod);
704 			/*
705 			 * Called the going notifier before checking for
706 			 * quiescence.
707 			 */
708 			for_each_tracepoint_range(mod->tracepoints_ptrs,
709 				mod->tracepoints_ptrs + mod->num_tracepoints,
710 				tp_module_going_check_quiescent, NULL);
711 			break;
712 		}
713 	}
714 	/*
715 	 * In the case of modules that were tainted at "coming", we'll simply
716 	 * walk through the list without finding it. We cannot use the "tainted"
717 	 * flag on "going", in case a module taints the kernel only after being
718 	 * loaded.
719 	 */
720 	mutex_unlock(&tracepoint_module_list_mutex);
721 }
722 
tracepoint_module_notify(struct notifier_block * self,unsigned long val,void * data)723 static int tracepoint_module_notify(struct notifier_block *self,
724 		unsigned long val, void *data)
725 {
726 	struct module *mod = data;
727 	int ret = 0;
728 
729 	switch (val) {
730 	case MODULE_STATE_COMING:
731 		ret = tracepoint_module_coming(mod);
732 		break;
733 	case MODULE_STATE_LIVE:
734 		break;
735 	case MODULE_STATE_GOING:
736 		tracepoint_module_going(mod);
737 		break;
738 	case MODULE_STATE_UNFORMED:
739 		break;
740 	}
741 	return notifier_from_errno(ret);
742 }
743 
744 static struct notifier_block tracepoint_module_nb = {
745 	.notifier_call = tracepoint_module_notify,
746 	.priority = 0,
747 };
748 
init_tracepoints(void)749 static __init int init_tracepoints(void)
750 {
751 	int ret;
752 
753 	ret = register_module_notifier(&tracepoint_module_nb);
754 	if (ret)
755 		pr_warn("Failed to register tracepoint module enter notifier\n");
756 
757 	return ret;
758 }
759 __initcall(init_tracepoints);
760 #endif /* CONFIG_MODULES */
761 
762 /**
763  * for_each_kernel_tracepoint - iteration on all kernel tracepoints
764  * @fct: callback
765  * @priv: private data
766  */
for_each_kernel_tracepoint(void (* fct)(struct tracepoint * tp,void * priv),void * priv)767 void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
768 		void *priv)
769 {
770 	for_each_tracepoint_range(__start___tracepoints_ptrs,
771 		__stop___tracepoints_ptrs, fct, priv);
772 }
773 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
774 
775 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
776 
777 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
778 static int sys_tracepoint_refcount;
779 
syscall_regfunc(void)780 int syscall_regfunc(void)
781 {
782 	struct task_struct *p, *t;
783 
784 	if (!sys_tracepoint_refcount) {
785 		read_lock(&tasklist_lock);
786 		for_each_process_thread(p, t) {
787 			set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
788 		}
789 		read_unlock(&tasklist_lock);
790 	}
791 	sys_tracepoint_refcount++;
792 
793 	return 0;
794 }
795 
syscall_unregfunc(void)796 void syscall_unregfunc(void)
797 {
798 	struct task_struct *p, *t;
799 
800 	sys_tracepoint_refcount--;
801 	if (!sys_tracepoint_refcount) {
802 		read_lock(&tasklist_lock);
803 		for_each_process_thread(p, t) {
804 			clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
805 		}
806 		read_unlock(&tasklist_lock);
807 	}
808 }
809 #endif
810 
811 #ifdef CONFIG_ANDROID_VENDOR_HOOKS
812 
rvh_zalloc_funcs(int count)813 static void *rvh_zalloc_funcs(int count)
814 {
815 	return kzalloc(sizeof(struct tracepoint_func) * count, GFP_KERNEL);
816 }
817 
818 #define ANDROID_RVH_NR_PROBES_MAX	2
rvh_func_add(struct tracepoint * tp,struct tracepoint_func * func)819 static int rvh_func_add(struct tracepoint *tp, struct tracepoint_func *func)
820 {
821 	int i;
822 
823 	if (!static_key_enabled(&tp->key)) {
824 		/* '+ 1' for the last NULL element */
825 		tp->funcs = rvh_zalloc_funcs(ANDROID_RVH_NR_PROBES_MAX + 1);
826 		if (!tp->funcs)
827 			return ENOMEM;
828 	}
829 
830 	for (i = 0; i < ANDROID_RVH_NR_PROBES_MAX; i++) {
831 		if (!tp->funcs[i].func) {
832 			if (!static_key_enabled(&tp->key))
833 				tp->funcs[i].data = func->data;
834 			WRITE_ONCE(tp->funcs[i].func, func->func);
835 
836 			return 0;
837 		}
838 	}
839 
840 	return -EBUSY;
841 }
842 
android_rvh_add_func(struct tracepoint * tp,struct tracepoint_func * func)843 static int android_rvh_add_func(struct tracepoint *tp, struct tracepoint_func *func)
844 {
845 	int ret;
846 
847 	if (tp->regfunc && !static_key_enabled(&tp->key)) {
848 		ret = tp->regfunc();
849 		if (ret < 0)
850 			return ret;
851 	}
852 
853 	ret = rvh_func_add(tp, func);
854 	if (ret)
855 		return ret;
856 	tracepoint_update_call(tp, tp->funcs);
857 	static_key_enable(&tp->key);
858 
859 	return 0;
860 }
861 
android_rvh_probe_register(struct tracepoint * tp,void * probe,void * data)862 int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data)
863 {
864 	struct tracepoint_func tp_func;
865 	int ret;
866 
867 	/*
868 	 * Once the static key has been flipped, the array may be read
869 	 * concurrently. Although __traceiter_*()  always checks .func first,
870 	 * it doesn't enforce read->read dependencies, and we can't strongly
871 	 * guarantee it will see the correct .data for the second element
872 	 * without adding smp_load_acquire() in the fast path. But this is a
873 	 * corner case which is unlikely to be needed by anybody in practice,
874 	 * so let's just forbid it and keep the fast path clean.
875 	 */
876 	if (WARN_ON(static_key_enabled(&tp->key) && data))
877 		return -EINVAL;
878 
879 	mutex_lock(&tracepoints_mutex);
880 	tp_func.func = probe;
881 	tp_func.data = data;
882 	ret = android_rvh_add_func(tp, &tp_func);
883 	mutex_unlock(&tracepoints_mutex);
884 
885 	return ret;
886 }
887 EXPORT_SYMBOL_GPL(android_rvh_probe_register);
888 #endif
889