• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Include in trace.c */
2 
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
7 
trace_valid_entry(struct trace_entry * entry)8 static inline int trace_valid_entry(struct trace_entry *entry)
9 {
10 	switch (entry->type) {
11 	case TRACE_FN:
12 	case TRACE_CTX:
13 	case TRACE_WAKE:
14 	case TRACE_STACK:
15 	case TRACE_PRINT:
16 	case TRACE_BRANCH:
17 	case TRACE_GRAPH_ENT:
18 	case TRACE_GRAPH_RET:
19 		return 1;
20 	}
21 	return 0;
22 }
23 
trace_test_buffer_cpu(struct trace_buffer * buf,int cpu)24 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
25 {
26 	struct ring_buffer_event *event;
27 	struct trace_entry *entry;
28 	unsigned int loops = 0;
29 
30 	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
31 		entry = ring_buffer_event_data(event);
32 
33 		/*
34 		 * The ring buffer is a size of trace_buf_size, if
35 		 * we loop more than the size, there's something wrong
36 		 * with the ring buffer.
37 		 */
38 		if (loops++ > trace_buf_size) {
39 			printk(KERN_CONT ".. bad ring buffer ");
40 			goto failed;
41 		}
42 		if (!trace_valid_entry(entry)) {
43 			printk(KERN_CONT ".. invalid entry %d ",
44 				entry->type);
45 			goto failed;
46 		}
47 	}
48 	return 0;
49 
50  failed:
51 	/* disable tracing */
52 	tracing_disabled = 1;
53 	printk(KERN_CONT ".. corrupted trace buffer .. ");
54 	return -1;
55 }
56 
57 /*
58  * Test the trace buffer to see if all the elements
59  * are still sane.
60  */
trace_test_buffer(struct trace_buffer * buf,unsigned long * count)61 static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
62 {
63 	unsigned long flags, cnt = 0;
64 	int cpu, ret = 0;
65 
66 	/* Don't allow flipping of max traces now */
67 	local_irq_save(flags);
68 	arch_spin_lock(&buf->tr->max_lock);
69 
70 	cnt = ring_buffer_entries(buf->buffer);
71 
72 	/*
73 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 	 * If the calling tracer is broken, and is constantly filling
75 	 * the buffer, this will run forever, and hard lock the box.
76 	 * We disable the ring buffer while we do this test to prevent
77 	 * a hard lock up.
78 	 */
79 	tracing_off();
80 	for_each_possible_cpu(cpu) {
81 		ret = trace_test_buffer_cpu(buf, cpu);
82 		if (ret)
83 			break;
84 	}
85 	tracing_on();
86 	arch_spin_unlock(&buf->tr->max_lock);
87 	local_irq_restore(flags);
88 
89 	if (count)
90 		*count = cnt;
91 
92 	return ret;
93 }
94 
warn_failed_init_tracer(struct tracer * trace,int init_ret)95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96 {
97 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 		trace->name, init_ret);
99 }
100 #ifdef CONFIG_FUNCTION_TRACER
101 
102 #ifdef CONFIG_DYNAMIC_FTRACE
103 
104 static int trace_selftest_test_probe1_cnt;
trace_selftest_test_probe1_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)105 static void trace_selftest_test_probe1_func(unsigned long ip,
106 					    unsigned long pip,
107 					    struct ftrace_ops *op,
108 					    struct pt_regs *pt_regs)
109 {
110 	trace_selftest_test_probe1_cnt++;
111 }
112 
113 static int trace_selftest_test_probe2_cnt;
trace_selftest_test_probe2_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)114 static void trace_selftest_test_probe2_func(unsigned long ip,
115 					    unsigned long pip,
116 					    struct ftrace_ops *op,
117 					    struct pt_regs *pt_regs)
118 {
119 	trace_selftest_test_probe2_cnt++;
120 }
121 
122 static int trace_selftest_test_probe3_cnt;
trace_selftest_test_probe3_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)123 static void trace_selftest_test_probe3_func(unsigned long ip,
124 					    unsigned long pip,
125 					    struct ftrace_ops *op,
126 					    struct pt_regs *pt_regs)
127 {
128 	trace_selftest_test_probe3_cnt++;
129 }
130 
131 static int trace_selftest_test_global_cnt;
trace_selftest_test_global_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)132 static void trace_selftest_test_global_func(unsigned long ip,
133 					    unsigned long pip,
134 					    struct ftrace_ops *op,
135 					    struct pt_regs *pt_regs)
136 {
137 	trace_selftest_test_global_cnt++;
138 }
139 
140 static int trace_selftest_test_dyn_cnt;
trace_selftest_test_dyn_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)141 static void trace_selftest_test_dyn_func(unsigned long ip,
142 					 unsigned long pip,
143 					 struct ftrace_ops *op,
144 					 struct pt_regs *pt_regs)
145 {
146 	trace_selftest_test_dyn_cnt++;
147 }
148 
149 static struct ftrace_ops test_probe1 = {
150 	.func			= trace_selftest_test_probe1_func,
151 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
152 };
153 
154 static struct ftrace_ops test_probe2 = {
155 	.func			= trace_selftest_test_probe2_func,
156 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
157 };
158 
159 static struct ftrace_ops test_probe3 = {
160 	.func			= trace_selftest_test_probe3_func,
161 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
162 };
163 
print_counts(void)164 static void print_counts(void)
165 {
166 	printk("(%d %d %d %d %d) ",
167 	       trace_selftest_test_probe1_cnt,
168 	       trace_selftest_test_probe2_cnt,
169 	       trace_selftest_test_probe3_cnt,
170 	       trace_selftest_test_global_cnt,
171 	       trace_selftest_test_dyn_cnt);
172 }
173 
reset_counts(void)174 static void reset_counts(void)
175 {
176 	trace_selftest_test_probe1_cnt = 0;
177 	trace_selftest_test_probe2_cnt = 0;
178 	trace_selftest_test_probe3_cnt = 0;
179 	trace_selftest_test_global_cnt = 0;
180 	trace_selftest_test_dyn_cnt = 0;
181 }
182 
trace_selftest_ops(struct trace_array * tr,int cnt)183 static int trace_selftest_ops(struct trace_array *tr, int cnt)
184 {
185 	int save_ftrace_enabled = ftrace_enabled;
186 	struct ftrace_ops *dyn_ops;
187 	char *func1_name;
188 	char *func2_name;
189 	int len1;
190 	int len2;
191 	int ret = -1;
192 
193 	printk(KERN_CONT "PASSED\n");
194 	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
195 
196 	ftrace_enabled = 1;
197 	reset_counts();
198 
199 	/* Handle PPC64 '.' name */
200 	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
201 	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
202 	len1 = strlen(func1_name);
203 	len2 = strlen(func2_name);
204 
205 	/*
206 	 * Probe 1 will trace function 1.
207 	 * Probe 2 will trace function 2.
208 	 * Probe 3 will trace functions 1 and 2.
209 	 */
210 	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
211 	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
212 	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
213 	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
214 
215 	register_ftrace_function(&test_probe1);
216 	register_ftrace_function(&test_probe2);
217 	register_ftrace_function(&test_probe3);
218 	/* First time we are running with main function */
219 	if (cnt > 1) {
220 		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
221 		register_ftrace_function(tr->ops);
222 	}
223 
224 	DYN_FTRACE_TEST_NAME();
225 
226 	print_counts();
227 
228 	if (trace_selftest_test_probe1_cnt != 1)
229 		goto out;
230 	if (trace_selftest_test_probe2_cnt != 0)
231 		goto out;
232 	if (trace_selftest_test_probe3_cnt != 1)
233 		goto out;
234 	if (cnt > 1) {
235 		if (trace_selftest_test_global_cnt == 0)
236 			goto out;
237 	}
238 
239 	DYN_FTRACE_TEST_NAME2();
240 
241 	print_counts();
242 
243 	if (trace_selftest_test_probe1_cnt != 1)
244 		goto out;
245 	if (trace_selftest_test_probe2_cnt != 1)
246 		goto out;
247 	if (trace_selftest_test_probe3_cnt != 2)
248 		goto out;
249 
250 	/* Add a dynamic probe */
251 	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
252 	if (!dyn_ops) {
253 		printk("MEMORY ERROR ");
254 		goto out;
255 	}
256 
257 	dyn_ops->func = trace_selftest_test_dyn_func;
258 
259 	register_ftrace_function(dyn_ops);
260 
261 	trace_selftest_test_global_cnt = 0;
262 
263 	DYN_FTRACE_TEST_NAME();
264 
265 	print_counts();
266 
267 	if (trace_selftest_test_probe1_cnt != 2)
268 		goto out_free;
269 	if (trace_selftest_test_probe2_cnt != 1)
270 		goto out_free;
271 	if (trace_selftest_test_probe3_cnt != 3)
272 		goto out_free;
273 	if (cnt > 1) {
274 		if (trace_selftest_test_global_cnt == 0)
275 			goto out_free;
276 	}
277 	if (trace_selftest_test_dyn_cnt == 0)
278 		goto out_free;
279 
280 	DYN_FTRACE_TEST_NAME2();
281 
282 	print_counts();
283 
284 	if (trace_selftest_test_probe1_cnt != 2)
285 		goto out_free;
286 	if (trace_selftest_test_probe2_cnt != 2)
287 		goto out_free;
288 	if (trace_selftest_test_probe3_cnt != 4)
289 		goto out_free;
290 
291 	ret = 0;
292  out_free:
293 	unregister_ftrace_function(dyn_ops);
294 	kfree(dyn_ops);
295 
296  out:
297 	/* Purposely unregister in the same order */
298 	unregister_ftrace_function(&test_probe1);
299 	unregister_ftrace_function(&test_probe2);
300 	unregister_ftrace_function(&test_probe3);
301 	if (cnt > 1)
302 		unregister_ftrace_function(tr->ops);
303 	ftrace_reset_array_ops(tr);
304 
305 	/* Make sure everything is off */
306 	reset_counts();
307 	DYN_FTRACE_TEST_NAME();
308 	DYN_FTRACE_TEST_NAME();
309 
310 	if (trace_selftest_test_probe1_cnt ||
311 	    trace_selftest_test_probe2_cnt ||
312 	    trace_selftest_test_probe3_cnt ||
313 	    trace_selftest_test_global_cnt ||
314 	    trace_selftest_test_dyn_cnt)
315 		ret = -1;
316 
317 	ftrace_enabled = save_ftrace_enabled;
318 
319 	return ret;
320 }
321 
322 /* Test dynamic code modification and ftrace filters */
trace_selftest_startup_dynamic_tracing(struct tracer * trace,struct trace_array * tr,int (* func)(void))323 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
324 						  struct trace_array *tr,
325 						  int (*func)(void))
326 {
327 	int save_ftrace_enabled = ftrace_enabled;
328 	unsigned long count;
329 	char *func_name;
330 	int ret;
331 
332 	/* The ftrace test PASSED */
333 	printk(KERN_CONT "PASSED\n");
334 	pr_info("Testing dynamic ftrace: ");
335 
336 	/* enable tracing, and record the filter function */
337 	ftrace_enabled = 1;
338 
339 	/* passed in by parameter to fool gcc from optimizing */
340 	func();
341 
342 	/*
343 	 * Some archs *cough*PowerPC*cough* add characters to the
344 	 * start of the function names. We simply put a '*' to
345 	 * accommodate them.
346 	 */
347 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
348 
349 	/* filter only on our function */
350 	ftrace_set_global_filter(func_name, strlen(func_name), 1);
351 
352 	/* enable tracing */
353 	ret = tracer_init(trace, tr);
354 	if (ret) {
355 		warn_failed_init_tracer(trace, ret);
356 		goto out;
357 	}
358 
359 	/* Sleep for a 1/10 of a second */
360 	msleep(100);
361 
362 	/* we should have nothing in the buffer */
363 	ret = trace_test_buffer(&tr->trace_buffer, &count);
364 	if (ret)
365 		goto out;
366 
367 	if (count) {
368 		ret = -1;
369 		printk(KERN_CONT ".. filter did not filter .. ");
370 		goto out;
371 	}
372 
373 	/* call our function again */
374 	func();
375 
376 	/* sleep again */
377 	msleep(100);
378 
379 	/* stop the tracing. */
380 	tracing_stop();
381 	ftrace_enabled = 0;
382 
383 	/* check the trace buffer */
384 	ret = trace_test_buffer(&tr->trace_buffer, &count);
385 
386 	ftrace_enabled = 1;
387 	tracing_start();
388 
389 	/* we should only have one item */
390 	if (!ret && count != 1) {
391 		trace->reset(tr);
392 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
393 		ret = -1;
394 		goto out;
395 	}
396 
397 	/* Test the ops with global tracing running */
398 	ret = trace_selftest_ops(tr, 1);
399 	trace->reset(tr);
400 
401  out:
402 	ftrace_enabled = save_ftrace_enabled;
403 
404 	/* Enable tracing on all functions again */
405 	ftrace_set_global_filter(NULL, 0, 1);
406 
407 	/* Test the ops with global tracing off */
408 	if (!ret)
409 		ret = trace_selftest_ops(tr, 2);
410 
411 	return ret;
412 }
413 
414 static int trace_selftest_recursion_cnt;
trace_selftest_test_recursion_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)415 static void trace_selftest_test_recursion_func(unsigned long ip,
416 					       unsigned long pip,
417 					       struct ftrace_ops *op,
418 					       struct pt_regs *pt_regs)
419 {
420 	/*
421 	 * This function is registered without the recursion safe flag.
422 	 * The ftrace infrastructure should provide the recursion
423 	 * protection. If not, this will crash the kernel!
424 	 */
425 	if (trace_selftest_recursion_cnt++ > 10)
426 		return;
427 	DYN_FTRACE_TEST_NAME();
428 }
429 
trace_selftest_test_recursion_safe_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)430 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
431 						    unsigned long pip,
432 						    struct ftrace_ops *op,
433 						    struct pt_regs *pt_regs)
434 {
435 	/*
436 	 * We said we would provide our own recursion. By calling
437 	 * this function again, we should recurse back into this function
438 	 * and count again. But this only happens if the arch supports
439 	 * all of ftrace features and nothing else is using the function
440 	 * tracing utility.
441 	 */
442 	if (trace_selftest_recursion_cnt++)
443 		return;
444 	DYN_FTRACE_TEST_NAME();
445 }
446 
447 static struct ftrace_ops test_rec_probe = {
448 	.func			= trace_selftest_test_recursion_func,
449 };
450 
451 static struct ftrace_ops test_recsafe_probe = {
452 	.func			= trace_selftest_test_recursion_safe_func,
453 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
454 };
455 
456 static int
trace_selftest_function_recursion(void)457 trace_selftest_function_recursion(void)
458 {
459 	int save_ftrace_enabled = ftrace_enabled;
460 	char *func_name;
461 	int len;
462 	int ret;
463 
464 	/* The previous test PASSED */
465 	pr_cont("PASSED\n");
466 	pr_info("Testing ftrace recursion: ");
467 
468 
469 	/* enable tracing, and record the filter function */
470 	ftrace_enabled = 1;
471 
472 	/* Handle PPC64 '.' name */
473 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
474 	len = strlen(func_name);
475 
476 	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
477 	if (ret) {
478 		pr_cont("*Could not set filter* ");
479 		goto out;
480 	}
481 
482 	ret = register_ftrace_function(&test_rec_probe);
483 	if (ret) {
484 		pr_cont("*could not register callback* ");
485 		goto out;
486 	}
487 
488 	DYN_FTRACE_TEST_NAME();
489 
490 	unregister_ftrace_function(&test_rec_probe);
491 
492 	ret = -1;
493 	/*
494 	 * Recursion allows for transitions between context,
495 	 * and may call the callback twice.
496 	 */
497 	if (trace_selftest_recursion_cnt != 1 &&
498 	    trace_selftest_recursion_cnt != 2) {
499 		pr_cont("*callback not called once (or twice) (%d)* ",
500 			trace_selftest_recursion_cnt);
501 		goto out;
502 	}
503 
504 	trace_selftest_recursion_cnt = 1;
505 
506 	pr_cont("PASSED\n");
507 	pr_info("Testing ftrace recursion safe: ");
508 
509 	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
510 	if (ret) {
511 		pr_cont("*Could not set filter* ");
512 		goto out;
513 	}
514 
515 	ret = register_ftrace_function(&test_recsafe_probe);
516 	if (ret) {
517 		pr_cont("*could not register callback* ");
518 		goto out;
519 	}
520 
521 	DYN_FTRACE_TEST_NAME();
522 
523 	unregister_ftrace_function(&test_recsafe_probe);
524 
525 	ret = -1;
526 	if (trace_selftest_recursion_cnt != 2) {
527 		pr_cont("*callback not called expected 2 times (%d)* ",
528 			trace_selftest_recursion_cnt);
529 		goto out;
530 	}
531 
532 	ret = 0;
533 out:
534 	ftrace_enabled = save_ftrace_enabled;
535 
536 	return ret;
537 }
538 #else
539 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
540 # define trace_selftest_function_recursion() ({ 0; })
541 #endif /* CONFIG_DYNAMIC_FTRACE */
542 
543 static enum {
544 	TRACE_SELFTEST_REGS_START,
545 	TRACE_SELFTEST_REGS_FOUND,
546 	TRACE_SELFTEST_REGS_NOT_FOUND,
547 } trace_selftest_regs_stat;
548 
trace_selftest_test_regs_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct pt_regs * pt_regs)549 static void trace_selftest_test_regs_func(unsigned long ip,
550 					  unsigned long pip,
551 					  struct ftrace_ops *op,
552 					  struct pt_regs *pt_regs)
553 {
554 	if (pt_regs)
555 		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
556 	else
557 		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
558 }
559 
560 static struct ftrace_ops test_regs_probe = {
561 	.func		= trace_selftest_test_regs_func,
562 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
563 };
564 
565 static int
trace_selftest_function_regs(void)566 trace_selftest_function_regs(void)
567 {
568 	int save_ftrace_enabled = ftrace_enabled;
569 	char *func_name;
570 	int len;
571 	int ret;
572 	int supported = 0;
573 
574 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
575 	supported = 1;
576 #endif
577 
578 	/* The previous test PASSED */
579 	pr_cont("PASSED\n");
580 	pr_info("Testing ftrace regs%s: ",
581 		!supported ? "(no arch support)" : "");
582 
583 	/* enable tracing, and record the filter function */
584 	ftrace_enabled = 1;
585 
586 	/* Handle PPC64 '.' name */
587 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
588 	len = strlen(func_name);
589 
590 	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
591 	/*
592 	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
593 	 * This test really doesn't care.
594 	 */
595 	if (ret && ret != -ENODEV) {
596 		pr_cont("*Could not set filter* ");
597 		goto out;
598 	}
599 
600 	ret = register_ftrace_function(&test_regs_probe);
601 	/*
602 	 * Now if the arch does not support passing regs, then this should
603 	 * have failed.
604 	 */
605 	if (!supported) {
606 		if (!ret) {
607 			pr_cont("*registered save-regs without arch support* ");
608 			goto out;
609 		}
610 		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
611 		ret = register_ftrace_function(&test_regs_probe);
612 	}
613 	if (ret) {
614 		pr_cont("*could not register callback* ");
615 		goto out;
616 	}
617 
618 
619 	DYN_FTRACE_TEST_NAME();
620 
621 	unregister_ftrace_function(&test_regs_probe);
622 
623 	ret = -1;
624 
625 	switch (trace_selftest_regs_stat) {
626 	case TRACE_SELFTEST_REGS_START:
627 		pr_cont("*callback never called* ");
628 		goto out;
629 
630 	case TRACE_SELFTEST_REGS_FOUND:
631 		if (supported)
632 			break;
633 		pr_cont("*callback received regs without arch support* ");
634 		goto out;
635 
636 	case TRACE_SELFTEST_REGS_NOT_FOUND:
637 		if (!supported)
638 			break;
639 		pr_cont("*callback received NULL regs* ");
640 		goto out;
641 	}
642 
643 	ret = 0;
644 out:
645 	ftrace_enabled = save_ftrace_enabled;
646 
647 	return ret;
648 }
649 
650 /*
651  * Simple verification test of ftrace function tracer.
652  * Enable ftrace, sleep 1/10 second, and then read the trace
653  * buffer to see if all is in order.
654  */
655 __init int
trace_selftest_startup_function(struct tracer * trace,struct trace_array * tr)656 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
657 {
658 	int save_ftrace_enabled = ftrace_enabled;
659 	unsigned long count;
660 	int ret;
661 
662 #ifdef CONFIG_DYNAMIC_FTRACE
663 	if (ftrace_filter_param) {
664 		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
665 		return 0;
666 	}
667 #endif
668 
669 	/* make sure msleep has been recorded */
670 	msleep(1);
671 
672 	/* start the tracing */
673 	ftrace_enabled = 1;
674 
675 	ret = tracer_init(trace, tr);
676 	if (ret) {
677 		warn_failed_init_tracer(trace, ret);
678 		goto out;
679 	}
680 
681 	/* Sleep for a 1/10 of a second */
682 	msleep(100);
683 	/* stop the tracing. */
684 	tracing_stop();
685 	ftrace_enabled = 0;
686 
687 	/* check the trace buffer */
688 	ret = trace_test_buffer(&tr->trace_buffer, &count);
689 
690 	ftrace_enabled = 1;
691 	trace->reset(tr);
692 	tracing_start();
693 
694 	if (!ret && !count) {
695 		printk(KERN_CONT ".. no entries found ..");
696 		ret = -1;
697 		goto out;
698 	}
699 
700 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
701 						     DYN_FTRACE_TEST_NAME);
702 	if (ret)
703 		goto out;
704 
705 	ret = trace_selftest_function_recursion();
706 	if (ret)
707 		goto out;
708 
709 	ret = trace_selftest_function_regs();
710  out:
711 	ftrace_enabled = save_ftrace_enabled;
712 
713 	/* kill ftrace totally if we failed */
714 	if (ret)
715 		ftrace_kill();
716 
717 	return ret;
718 }
719 #endif /* CONFIG_FUNCTION_TRACER */
720 
721 
722 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
723 
724 /* Maximum number of functions to trace before diagnosing a hang */
725 #define GRAPH_MAX_FUNC_TEST	100000000
726 
727 static unsigned int graph_hang_thresh;
728 
729 /* Wrap the real function entry probe to avoid possible hanging */
trace_graph_entry_watchdog(struct ftrace_graph_ent * trace)730 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
731 {
732 	/* This is harmlessly racy, we want to approximately detect a hang */
733 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
734 		ftrace_graph_stop();
735 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
736 		if (ftrace_dump_on_oops) {
737 			ftrace_dump(DUMP_ALL);
738 			/* ftrace_dump() disables tracing */
739 			tracing_on();
740 		}
741 		return 0;
742 	}
743 
744 	return trace_graph_entry(trace);
745 }
746 
747 /*
748  * Pretty much the same than for the function tracer from which the selftest
749  * has been borrowed.
750  */
751 __init int
trace_selftest_startup_function_graph(struct tracer * trace,struct trace_array * tr)752 trace_selftest_startup_function_graph(struct tracer *trace,
753 					struct trace_array *tr)
754 {
755 	int ret;
756 	unsigned long count;
757 
758 #ifdef CONFIG_DYNAMIC_FTRACE
759 	if (ftrace_filter_param) {
760 		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
761 		return 0;
762 	}
763 #endif
764 
765 	/*
766 	 * Simulate the init() callback but we attach a watchdog callback
767 	 * to detect and recover from possible hangs
768 	 */
769 	tracing_reset_online_cpus(&tr->trace_buffer);
770 	set_graph_array(tr);
771 	ret = register_ftrace_graph(&trace_graph_return,
772 				    &trace_graph_entry_watchdog);
773 	if (ret) {
774 		warn_failed_init_tracer(trace, ret);
775 		goto out;
776 	}
777 	tracing_start_cmdline_record();
778 
779 	/* Sleep for a 1/10 of a second */
780 	msleep(100);
781 
782 	/* Have we just recovered from a hang? */
783 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
784 		tracing_selftest_disabled = true;
785 		ret = -1;
786 		goto out;
787 	}
788 
789 	tracing_stop();
790 
791 	/* check the trace buffer */
792 	ret = trace_test_buffer(&tr->trace_buffer, &count);
793 
794 	trace->reset(tr);
795 	tracing_start();
796 
797 	if (!ret && !count) {
798 		printk(KERN_CONT ".. no entries found ..");
799 		ret = -1;
800 		goto out;
801 	}
802 
803 	/* Don't test dynamic tracing, the function tracer already did */
804 
805 out:
806 	/* Stop it if we failed */
807 	if (ret)
808 		ftrace_graph_stop();
809 
810 	return ret;
811 }
812 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
813 
814 
815 #ifdef CONFIG_IRQSOFF_TRACER
816 int
trace_selftest_startup_irqsoff(struct tracer * trace,struct trace_array * tr)817 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
818 {
819 	unsigned long save_max = tr->max_latency;
820 	unsigned long count;
821 	int ret;
822 
823 	/* start the tracing */
824 	ret = tracer_init(trace, tr);
825 	if (ret) {
826 		warn_failed_init_tracer(trace, ret);
827 		return ret;
828 	}
829 
830 	/* reset the max latency */
831 	tr->max_latency = 0;
832 	/* disable interrupts for a bit */
833 	local_irq_disable();
834 	udelay(100);
835 	local_irq_enable();
836 
837 	/*
838 	 * Stop the tracer to avoid a warning subsequent
839 	 * to buffer flipping failure because tracing_stop()
840 	 * disables the tr and max buffers, making flipping impossible
841 	 * in case of parallels max irqs off latencies.
842 	 */
843 	trace->stop(tr);
844 	/* stop the tracing. */
845 	tracing_stop();
846 	/* check both trace buffers */
847 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
848 	if (!ret)
849 		ret = trace_test_buffer(&tr->max_buffer, &count);
850 	trace->reset(tr);
851 	tracing_start();
852 
853 	if (!ret && !count) {
854 		printk(KERN_CONT ".. no entries found ..");
855 		ret = -1;
856 	}
857 
858 	tr->max_latency = save_max;
859 
860 	return ret;
861 }
862 #endif /* CONFIG_IRQSOFF_TRACER */
863 
864 #ifdef CONFIG_PREEMPT_TRACER
865 int
trace_selftest_startup_preemptoff(struct tracer * trace,struct trace_array * tr)866 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
867 {
868 	unsigned long save_max = tr->max_latency;
869 	unsigned long count;
870 	int ret;
871 
872 	/*
873 	 * Now that the big kernel lock is no longer preemptable,
874 	 * and this is called with the BKL held, it will always
875 	 * fail. If preemption is already disabled, simply
876 	 * pass the test. When the BKL is removed, or becomes
877 	 * preemptible again, we will once again test this,
878 	 * so keep it in.
879 	 */
880 	if (preempt_count()) {
881 		printk(KERN_CONT "can not test ... force ");
882 		return 0;
883 	}
884 
885 	/* start the tracing */
886 	ret = tracer_init(trace, tr);
887 	if (ret) {
888 		warn_failed_init_tracer(trace, ret);
889 		return ret;
890 	}
891 
892 	/* reset the max latency */
893 	tr->max_latency = 0;
894 	/* disable preemption for a bit */
895 	preempt_disable();
896 	udelay(100);
897 	preempt_enable();
898 
899 	/*
900 	 * Stop the tracer to avoid a warning subsequent
901 	 * to buffer flipping failure because tracing_stop()
902 	 * disables the tr and max buffers, making flipping impossible
903 	 * in case of parallels max preempt off latencies.
904 	 */
905 	trace->stop(tr);
906 	/* stop the tracing. */
907 	tracing_stop();
908 	/* check both trace buffers */
909 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
910 	if (!ret)
911 		ret = trace_test_buffer(&tr->max_buffer, &count);
912 	trace->reset(tr);
913 	tracing_start();
914 
915 	if (!ret && !count) {
916 		printk(KERN_CONT ".. no entries found ..");
917 		ret = -1;
918 	}
919 
920 	tr->max_latency = save_max;
921 
922 	return ret;
923 }
924 #endif /* CONFIG_PREEMPT_TRACER */
925 
926 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
927 int
trace_selftest_startup_preemptirqsoff(struct tracer * trace,struct trace_array * tr)928 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
929 {
930 	unsigned long save_max = tr->max_latency;
931 	unsigned long count;
932 	int ret;
933 
934 	/*
935 	 * Now that the big kernel lock is no longer preemptable,
936 	 * and this is called with the BKL held, it will always
937 	 * fail. If preemption is already disabled, simply
938 	 * pass the test. When the BKL is removed, or becomes
939 	 * preemptible again, we will once again test this,
940 	 * so keep it in.
941 	 */
942 	if (preempt_count()) {
943 		printk(KERN_CONT "can not test ... force ");
944 		return 0;
945 	}
946 
947 	/* start the tracing */
948 	ret = tracer_init(trace, tr);
949 	if (ret) {
950 		warn_failed_init_tracer(trace, ret);
951 		goto out_no_start;
952 	}
953 
954 	/* reset the max latency */
955 	tr->max_latency = 0;
956 
957 	/* disable preemption and interrupts for a bit */
958 	preempt_disable();
959 	local_irq_disable();
960 	udelay(100);
961 	preempt_enable();
962 	/* reverse the order of preempt vs irqs */
963 	local_irq_enable();
964 
965 	/*
966 	 * Stop the tracer to avoid a warning subsequent
967 	 * to buffer flipping failure because tracing_stop()
968 	 * disables the tr and max buffers, making flipping impossible
969 	 * in case of parallels max irqs/preempt off latencies.
970 	 */
971 	trace->stop(tr);
972 	/* stop the tracing. */
973 	tracing_stop();
974 	/* check both trace buffers */
975 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
976 	if (ret)
977 		goto out;
978 
979 	ret = trace_test_buffer(&tr->max_buffer, &count);
980 	if (ret)
981 		goto out;
982 
983 	if (!ret && !count) {
984 		printk(KERN_CONT ".. no entries found ..");
985 		ret = -1;
986 		goto out;
987 	}
988 
989 	/* do the test by disabling interrupts first this time */
990 	tr->max_latency = 0;
991 	tracing_start();
992 	trace->start(tr);
993 
994 	preempt_disable();
995 	local_irq_disable();
996 	udelay(100);
997 	preempt_enable();
998 	/* reverse the order of preempt vs irqs */
999 	local_irq_enable();
1000 
1001 	trace->stop(tr);
1002 	/* stop the tracing. */
1003 	tracing_stop();
1004 	/* check both trace buffers */
1005 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1006 	if (ret)
1007 		goto out;
1008 
1009 	ret = trace_test_buffer(&tr->max_buffer, &count);
1010 
1011 	if (!ret && !count) {
1012 		printk(KERN_CONT ".. no entries found ..");
1013 		ret = -1;
1014 		goto out;
1015 	}
1016 
1017 out:
1018 	tracing_start();
1019 out_no_start:
1020 	trace->reset(tr);
1021 	tr->max_latency = save_max;
1022 
1023 	return ret;
1024 }
1025 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1026 
1027 #ifdef CONFIG_NOP_TRACER
1028 int
trace_selftest_startup_nop(struct tracer * trace,struct trace_array * tr)1029 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1030 {
1031 	/* What could possibly go wrong? */
1032 	return 0;
1033 }
1034 #endif
1035 
1036 #ifdef CONFIG_SCHED_TRACER
1037 
1038 struct wakeup_test_data {
1039 	struct completion	is_ready;
1040 	int			go;
1041 };
1042 
trace_wakeup_test_thread(void * data)1043 static int trace_wakeup_test_thread(void *data)
1044 {
1045 	/* Make this a -deadline thread */
1046 	static const struct sched_attr attr = {
1047 		.sched_policy = SCHED_DEADLINE,
1048 		.sched_runtime = 100000ULL,
1049 		.sched_deadline = 10000000ULL,
1050 		.sched_period = 10000000ULL
1051 	};
1052 	struct wakeup_test_data *x = data;
1053 
1054 	sched_setattr(current, &attr);
1055 
1056 	/* Make it know we have a new prio */
1057 	complete(&x->is_ready);
1058 
1059 	/* now go to sleep and let the test wake us up */
1060 	set_current_state(TASK_INTERRUPTIBLE);
1061 	while (!x->go) {
1062 		schedule();
1063 		set_current_state(TASK_INTERRUPTIBLE);
1064 	}
1065 
1066 	complete(&x->is_ready);
1067 
1068 	set_current_state(TASK_INTERRUPTIBLE);
1069 
1070 	/* we are awake, now wait to disappear */
1071 	while (!kthread_should_stop()) {
1072 		schedule();
1073 		set_current_state(TASK_INTERRUPTIBLE);
1074 	}
1075 
1076 	__set_current_state(TASK_RUNNING);
1077 
1078 	return 0;
1079 }
1080 int
trace_selftest_startup_wakeup(struct tracer * trace,struct trace_array * tr)1081 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1082 {
1083 	unsigned long save_max = tr->max_latency;
1084 	struct task_struct *p;
1085 	struct wakeup_test_data data;
1086 	unsigned long count;
1087 	int ret;
1088 
1089 	memset(&data, 0, sizeof(data));
1090 
1091 	init_completion(&data.is_ready);
1092 
1093 	/* create a -deadline thread */
1094 	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1095 	if (IS_ERR(p)) {
1096 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1097 		return -1;
1098 	}
1099 
1100 	/* make sure the thread is running at -deadline policy */
1101 	wait_for_completion(&data.is_ready);
1102 
1103 	/* start the tracing */
1104 	ret = tracer_init(trace, tr);
1105 	if (ret) {
1106 		warn_failed_init_tracer(trace, ret);
1107 		return ret;
1108 	}
1109 
1110 	/* reset the max latency */
1111 	tr->max_latency = 0;
1112 
1113 	while (p->on_rq) {
1114 		/*
1115 		 * Sleep to make sure the -deadline thread is asleep too.
1116 		 * On virtual machines we can't rely on timings,
1117 		 * but we want to make sure this test still works.
1118 		 */
1119 		msleep(100);
1120 	}
1121 
1122 	init_completion(&data.is_ready);
1123 
1124 	data.go = 1;
1125 	/* memory barrier is in the wake_up_process() */
1126 
1127 	wake_up_process(p);
1128 
1129 	/* Wait for the task to wake up */
1130 	wait_for_completion(&data.is_ready);
1131 
1132 	/* stop the tracing. */
1133 	tracing_stop();
1134 	/* check both trace buffers */
1135 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1136 	if (!ret)
1137 		ret = trace_test_buffer(&tr->max_buffer, &count);
1138 
1139 
1140 	trace->reset(tr);
1141 	tracing_start();
1142 
1143 	tr->max_latency = save_max;
1144 
1145 	/* kill the thread */
1146 	kthread_stop(p);
1147 
1148 	if (!ret && !count) {
1149 		printk(KERN_CONT ".. no entries found ..");
1150 		ret = -1;
1151 	}
1152 
1153 	return ret;
1154 }
1155 #endif /* CONFIG_SCHED_TRACER */
1156 
1157 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1158 int
trace_selftest_startup_sched_switch(struct tracer * trace,struct trace_array * tr)1159 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1160 {
1161 	unsigned long count;
1162 	int ret;
1163 
1164 	/* start the tracing */
1165 	ret = tracer_init(trace, tr);
1166 	if (ret) {
1167 		warn_failed_init_tracer(trace, ret);
1168 		return ret;
1169 	}
1170 
1171 	/* Sleep for a 1/10 of a second */
1172 	msleep(100);
1173 	/* stop the tracing. */
1174 	tracing_stop();
1175 	/* check the trace buffer */
1176 	ret = trace_test_buffer(&tr->trace_buffer, &count);
1177 	trace->reset(tr);
1178 	tracing_start();
1179 
1180 	if (!ret && !count) {
1181 		printk(KERN_CONT ".. no entries found ..");
1182 		ret = -1;
1183 	}
1184 
1185 	return ret;
1186 }
1187 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1188 
1189 #ifdef CONFIG_BRANCH_TRACER
1190 int
trace_selftest_startup_branch(struct tracer * trace,struct trace_array * tr)1191 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1192 {
1193 	unsigned long count;
1194 	int ret;
1195 
1196 	/* start the tracing */
1197 	ret = tracer_init(trace, tr);
1198 	if (ret) {
1199 		warn_failed_init_tracer(trace, ret);
1200 		return ret;
1201 	}
1202 
1203 	/* Sleep for a 1/10 of a second */
1204 	msleep(100);
1205 	/* stop the tracing. */
1206 	tracing_stop();
1207 	/* check the trace buffer */
1208 	ret = trace_test_buffer(&tr->trace_buffer, &count);
1209 	trace->reset(tr);
1210 	tracing_start();
1211 
1212 	if (!ret && !count) {
1213 		printk(KERN_CONT ".. no entries found ..");
1214 		ret = -1;
1215 	}
1216 
1217 	return ret;
1218 }
1219 #endif /* CONFIG_BRANCH_TRACER */
1220 
1221