• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14	help
15	  See Documentation/trace/ftrace-design.txt
16
17config HAVE_FUNCTION_TRACER
18	bool
19	help
20	  See Documentation/trace/ftrace-design.txt
21
22config HAVE_FUNCTION_GRAPH_TRACER
23	bool
24	help
25	  See Documentation/trace/ftrace-design.txt
26
27config HAVE_FUNCTION_GRAPH_FP_TEST
28	bool
29	help
30	  See Documentation/trace/ftrace-design.txt
31
32config HAVE_DYNAMIC_FTRACE
33	bool
34	help
35	  See Documentation/trace/ftrace-design.txt
36
37config HAVE_DYNAMIC_FTRACE_WITH_REGS
38	bool
39
40config HAVE_FTRACE_MCOUNT_RECORD
41	bool
42	help
43	  See Documentation/trace/ftrace-design.txt
44
45config HAVE_SYSCALL_TRACEPOINTS
46	bool
47	help
48	  See Documentation/trace/ftrace-design.txt
49
50config HAVE_FENTRY
51	bool
52	help
53	  Arch supports the gcc options -pg with -mfentry
54
55config HAVE_C_RECORDMCOUNT
56	bool
57	help
58	  C version of recordmcount available?
59
60config TRACER_MAX_TRACE
61	bool
62
63config TRACE_CLOCK
64	bool
65
66config RING_BUFFER
67	bool
68	select TRACE_CLOCK
69	select IRQ_WORK
70
71config FTRACE_NMI_ENTER
72       bool
73       depends on HAVE_FTRACE_NMI_ENTER
74       default y
75
76config EVENT_TRACING
77	select CONTEXT_SWITCH_TRACER
78	bool
79
80config GPU_TRACEPOINTS
81	bool
82
83config CONTEXT_SWITCH_TRACER
84	bool
85
86config RING_BUFFER_ALLOW_SWAP
87	bool
88	help
89	 Allow the use of ring_buffer_swap_cpu.
90	 Adds a very slight overhead to tracing when enabled.
91
92# All tracer options should select GENERIC_TRACER. For those options that are
93# enabled by all tracers (context switch and event tracer) they select TRACING.
94# This allows those options to appear when no other tracer is selected. But the
95# options do not appear when something else selects it. We need the two options
96# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
97# hiding of the automatic options.
98
99config TRACING
100	bool
101	select DEBUG_FS
102	select RING_BUFFER
103	select STACKTRACE if STACKTRACE_SUPPORT
104	select TRACEPOINTS
105	select NOP_TRACER
106	select BINARY_PRINTF
107	select EVENT_TRACING
108	select TRACE_CLOCK
109
110config GENERIC_TRACER
111	bool
112	select TRACING
113
114#
115# Minimum requirements an architecture has to meet for us to
116# be able to offer generic tracing facilities:
117#
118config TRACING_SUPPORT
119	bool
120	# PPC32 has no irqflags tracing support, but it can use most of the
121	# tracers anyway, they were tested to build and work. Note that new
122	# exceptions to this list aren't welcomed, better implement the
123	# irqflags tracing for your architecture.
124	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
125	depends on STACKTRACE_SUPPORT
126	default y
127
128if TRACING_SUPPORT
129
130menuconfig FTRACE
131	bool "Tracers"
132	default y if DEBUG_KERNEL
133	help
134	  Enable the kernel tracing infrastructure.
135
136if FTRACE
137
138config FUNCTION_TRACER
139	bool "Kernel Function Tracer"
140	depends on HAVE_FUNCTION_TRACER
141	select KALLSYMS
142	select GENERIC_TRACER
143	select CONTEXT_SWITCH_TRACER
144	help
145	  Enable the kernel to trace every kernel function. This is done
146	  by using a compiler feature to insert a small, 5-byte No-Operation
147	  instruction at the beginning of every kernel function, which NOP
148	  sequence is then dynamically patched into a tracer call when
149	  tracing is enabled by the administrator. If it's runtime disabled
150	  (the bootup default), then the overhead of the instructions is very
151	  small and not measurable even in micro-benchmarks.
152
153config FUNCTION_GRAPH_TRACER
154	bool "Kernel Function Graph Tracer"
155	depends on HAVE_FUNCTION_GRAPH_TRACER
156	depends on FUNCTION_TRACER
157	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
158	default y
159	help
160	  Enable the kernel to trace a function at both its return
161	  and its entry.
162	  Its first purpose is to trace the duration of functions and
163	  draw a call graph for each thread with some information like
164	  the return value. This is done by setting the current return
165	  address on the current task structure into a stack of calls.
166
167
168config IRQSOFF_TRACER
169	bool "Interrupts-off Latency Tracer"
170	default n
171	depends on TRACE_IRQFLAGS_SUPPORT
172	depends on !ARCH_USES_GETTIMEOFFSET
173	select TRACE_IRQFLAGS
174	select GENERIC_TRACER
175	select TRACER_MAX_TRACE
176	select RING_BUFFER_ALLOW_SWAP
177	select TRACER_SNAPSHOT
178	select TRACER_SNAPSHOT_PER_CPU_SWAP
179	help
180	  This option measures the time spent in irqs-off critical
181	  sections, with microsecond accuracy.
182
183	  The default measurement method is a maximum search, which is
184	  disabled by default and can be runtime (re-)started
185	  via:
186
187	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
188
189	  (Note that kernel size and overhead increase with this option
190	  enabled. This option and the preempt-off timing option can be
191	  used together or separately.)
192
193config PREEMPT_TRACER
194	bool "Preemption-off Latency Tracer"
195	default n
196	depends on !ARCH_USES_GETTIMEOFFSET
197	depends on PREEMPT
198	select GENERIC_TRACER
199	select TRACER_MAX_TRACE
200	select RING_BUFFER_ALLOW_SWAP
201	select TRACER_SNAPSHOT
202	select TRACER_SNAPSHOT_PER_CPU_SWAP
203	help
204	  This option measures the time spent in preemption-off critical
205	  sections, with microsecond accuracy.
206
207	  The default measurement method is a maximum search, which is
208	  disabled by default and can be runtime (re-)started
209	  via:
210
211	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
212
213	  (Note that kernel size and overhead increase with this option
214	  enabled. This option and the irqs-off timing option can be
215	  used together or separately.)
216
217config SCHED_TRACER
218	bool "Scheduling Latency Tracer"
219	select GENERIC_TRACER
220	select CONTEXT_SWITCH_TRACER
221	select TRACER_MAX_TRACE
222	select TRACER_SNAPSHOT
223	help
224	  This tracer tracks the latency of the highest priority task
225	  to be scheduled in, starting from the point it has woken up.
226
227config ENABLE_DEFAULT_TRACERS
228	bool "Trace process context switches and events"
229	depends on !GENERIC_TRACER
230	select TRACING
231	help
232	  This tracer hooks to various trace points in the kernel,
233	  allowing the user to pick and choose which trace point they
234	  want to trace. It also includes the sched_switch tracer plugin.
235
236config FTRACE_SYSCALLS
237	bool "Trace syscalls"
238	depends on HAVE_SYSCALL_TRACEPOINTS
239	select GENERIC_TRACER
240	select KALLSYMS
241	help
242	  Basic tracer to catch the syscall entry and exit events.
243
244config TRACER_SNAPSHOT
245	bool "Create a snapshot trace buffer"
246	select TRACER_MAX_TRACE
247	help
248	  Allow tracing users to take snapshot of the current buffer using the
249	  ftrace interface, e.g.:
250
251	      echo 1 > /sys/kernel/debug/tracing/snapshot
252	      cat snapshot
253
254config TRACER_SNAPSHOT_PER_CPU_SWAP
255        bool "Allow snapshot to swap per CPU"
256	depends on TRACER_SNAPSHOT
257	select RING_BUFFER_ALLOW_SWAP
258	help
259	  Allow doing a snapshot of a single CPU buffer instead of a
260	  full swap (all buffers). If this is set, then the following is
261	  allowed:
262
263	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
264
265	  After which, only the tracing buffer for CPU 2 was swapped with
266	  the main tracing buffer, and the other CPU buffers remain the same.
267
268	  When this is enabled, this adds a little more overhead to the
269	  trace recording, as it needs to add some checks to synchronize
270	  recording with swaps. But this does not affect the performance
271	  of the overall system. This is enabled by default when the preempt
272	  or irq latency tracers are enabled, as those need to swap as well
273	  and already adds the overhead (plus a lot more).
274
275config TRACE_BRANCH_PROFILING
276	bool
277	select GENERIC_TRACER
278
279choice
280	prompt "Branch Profiling"
281	default BRANCH_PROFILE_NONE
282	help
283	 The branch profiling is a software profiler. It will add hooks
284	 into the C conditionals to test which path a branch takes.
285
286	 The likely/unlikely profiler only looks at the conditions that
287	 are annotated with a likely or unlikely macro.
288
289	 The "all branch" profiler will profile every if-statement in the
290	 kernel. This profiler will also enable the likely/unlikely
291	 profiler.
292
293	 Either of the above profilers adds a bit of overhead to the system.
294	 If unsure, choose "No branch profiling".
295
296config BRANCH_PROFILE_NONE
297	bool "No branch profiling"
298	help
299	  No branch profiling. Branch profiling adds a bit of overhead.
300	  Only enable it if you want to analyse the branching behavior.
301	  Otherwise keep it disabled.
302
303config PROFILE_ANNOTATED_BRANCHES
304	bool "Trace likely/unlikely profiler"
305	select TRACE_BRANCH_PROFILING
306	help
307	  This tracer profiles all likely and unlikely macros
308	  in the kernel. It will display the results in:
309
310	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
311
312	  Note: this will add a significant overhead; only turn this
313	  on if you need to profile the system's use of these macros.
314
315config PROFILE_ALL_BRANCHES
316	bool "Profile all if conditionals"
317	select TRACE_BRANCH_PROFILING
318	help
319	  This tracer profiles all branch conditions. Every if ()
320	  taken in the kernel is recorded whether it hit or miss.
321	  The results will be displayed in:
322
323	  /sys/kernel/debug/tracing/trace_stat/branch_all
324
325	  This option also enables the likely/unlikely profiler.
326
327	  This configuration, when enabled, will impose a great overhead
328	  on the system. This should only be enabled when the system
329	  is to be analyzed in much detail.
330endchoice
331
332config TRACING_BRANCHES
333	bool
334	help
335	  Selected by tracers that will trace the likely and unlikely
336	  conditions. This prevents the tracers themselves from being
337	  profiled. Profiling the tracing infrastructure can only happen
338	  when the likelys and unlikelys are not being traced.
339
340config BRANCH_TRACER
341	bool "Trace likely/unlikely instances"
342	depends on TRACE_BRANCH_PROFILING
343	select TRACING_BRANCHES
344	help
345	  This traces the events of likely and unlikely condition
346	  calls in the kernel.  The difference between this and the
347	  "Trace likely/unlikely profiler" is that this is not a
348	  histogram of the callers, but actually places the calling
349	  events into a running trace buffer to see when and where the
350	  events happened, as well as their results.
351
352	  Say N if unsure.
353
354config STACK_TRACER
355	bool "Trace max stack"
356	depends on HAVE_FUNCTION_TRACER
357	select FUNCTION_TRACER
358	select STACKTRACE
359	select KALLSYMS
360	help
361	  This special tracer records the maximum stack footprint of the
362	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
363
364	  This tracer works by hooking into every function call that the
365	  kernel executes, and keeping a maximum stack depth value and
366	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
367	  then it will not have any overhead while the stack tracer
368	  is disabled.
369
370	  To enable the stack tracer on bootup, pass in 'stacktrace'
371	  on the kernel command line.
372
373	  The stack tracer can also be enabled or disabled via the
374	  sysctl kernel.stack_tracer_enabled
375
376	  Say N if unsure.
377
378config BLK_DEV_IO_TRACE
379	bool "Support for tracing block IO actions"
380	depends on SYSFS
381	depends on BLOCK
382	select RELAY
383	select DEBUG_FS
384	select TRACEPOINTS
385	select GENERIC_TRACER
386	select STACKTRACE
387	help
388	  Say Y here if you want to be able to trace the block layer actions
389	  on a given queue. Tracing allows you to see any traffic happening
390	  on a block device queue. For more information (and the userspace
391	  support tools needed), fetch the blktrace tools from:
392
393	  git://git.kernel.dk/blktrace.git
394
395	  Tracing also is possible using the ftrace interface, e.g.:
396
397	    echo 1 > /sys/block/sda/sda1/trace/enable
398	    echo blk > /sys/kernel/debug/tracing/current_tracer
399	    cat /sys/kernel/debug/tracing/trace_pipe
400
401	  If unsure, say N.
402
403config KPROBE_EVENT
404	depends on KPROBES
405	depends on HAVE_REGS_AND_STACK_ACCESS_API
406	bool "Enable kprobes-based dynamic events"
407	select TRACING
408	select PROBE_EVENTS
409	default y
410	help
411	  This allows the user to add tracing events (similar to tracepoints)
412	  on the fly via the ftrace interface. See
413	  Documentation/trace/kprobetrace.txt for more details.
414
415	  Those events can be inserted wherever kprobes can probe, and record
416	  various register and memory values.
417
418	  This option is also required by perf-probe subcommand of perf tools.
419	  If you want to use perf tools, this option is strongly recommended.
420
421config UPROBE_EVENT
422	bool "Enable uprobes-based dynamic events"
423	depends on ARCH_SUPPORTS_UPROBES
424	depends on MMU
425	depends on PERF_EVENTS
426	select UPROBES
427	select PROBE_EVENTS
428	select TRACING
429	default n
430	help
431	  This allows the user to add tracing events on top of userspace
432	  dynamic events (similar to tracepoints) on the fly via the trace
433	  events interface. Those events can be inserted wherever uprobes
434	  can probe, and record various registers.
435	  This option is required if you plan to use perf-probe subcommand
436	  of perf tools on user space applications.
437
438config PROBE_EVENTS
439	def_bool n
440
441config DYNAMIC_FTRACE
442	bool "enable/disable function tracing dynamically"
443	depends on FUNCTION_TRACER
444	depends on HAVE_DYNAMIC_FTRACE
445	default y
446	help
447	  This option will modify all the calls to function tracing
448	  dynamically (will patch them out of the binary image and
449	  replace them with a No-Op instruction) on boot up. During
450	  compile time, a table is made of all the locations that ftrace
451	  can function trace, and this table is linked into the kernel
452	  image. When this is enabled, functions can be individually
453	  enabled, and the functions not enabled will not affect
454	  performance of the system.
455
456	  See the files in /sys/kernel/debug/tracing:
457	    available_filter_functions
458	    set_ftrace_filter
459	    set_ftrace_notrace
460
461	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
462	  otherwise has native performance as long as no tracing is active.
463
464config DYNAMIC_FTRACE_WITH_REGS
465	def_bool y
466	depends on DYNAMIC_FTRACE
467	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
468
469config FUNCTION_PROFILER
470	bool "Kernel function profiler"
471	depends on FUNCTION_TRACER
472	default n
473	help
474	  This option enables the kernel function profiler. A file is created
475	  in debugfs called function_profile_enabled which defaults to zero.
476	  When a 1 is echoed into this file profiling begins, and when a
477	  zero is entered, profiling stops. A "functions" file is created in
478	  the trace_stats directory; this file shows the list of functions that
479	  have been hit and their counters.
480
481	  If in doubt, say N.
482
483config FTRACE_MCOUNT_RECORD
484	def_bool y
485	depends on DYNAMIC_FTRACE
486	depends on HAVE_FTRACE_MCOUNT_RECORD
487
488config FTRACE_SELFTEST
489	bool
490
491config FTRACE_STARTUP_TEST
492	bool "Perform a startup test on ftrace"
493	depends on GENERIC_TRACER
494	select FTRACE_SELFTEST
495	help
496	  This option performs a series of startup tests on ftrace. On bootup
497	  a series of tests are made to verify that the tracer is
498	  functioning properly. It will do tests on all the configured
499	  tracers of ftrace.
500
501config EVENT_TRACE_TEST_SYSCALLS
502	bool "Run selftest on syscall events"
503	depends on FTRACE_STARTUP_TEST
504	help
505	 This option will also enable testing every syscall event.
506	 It only enables the event and disables it and runs various loads
507	 with the event enabled. This adds a bit more time for kernel boot
508	 up since it runs this on every system call defined.
509
510	 TBD - enable a way to actually call the syscalls as we test their
511	       events
512
513config MMIOTRACE
514	bool "Memory mapped IO tracing"
515	depends on HAVE_MMIOTRACE_SUPPORT && PCI
516	select GENERIC_TRACER
517	help
518	  Mmiotrace traces Memory Mapped I/O access and is meant for
519	  debugging and reverse engineering. It is called from the ioremap
520	  implementation and works via page faults. Tracing is disabled by
521	  default and can be enabled at run-time.
522
523	  See Documentation/trace/mmiotrace.txt.
524	  If you are not helping to develop drivers, say N.
525
526config MMIOTRACE_TEST
527	tristate "Test module for mmiotrace"
528	depends on MMIOTRACE && m
529	help
530	  This is a dumb module for testing mmiotrace. It is very dangerous
531	  as it will write garbage to IO memory starting at a given address.
532	  However, it should be safe to use on e.g. unused portion of VRAM.
533
534	  Say N, unless you absolutely know what you are doing.
535
536config TRACEPOINT_BENCHMARK
537        bool "Add tracepoint that benchmarks tracepoints"
538	help
539	 This option creates the tracepoint "benchmark:benchmark_event".
540	 When the tracepoint is enabled, it kicks off a kernel thread that
541	 goes into an infinite loop (calling cond_sched() to let other tasks
542	 run), and calls the tracepoint. Each iteration will record the time
543	 it took to write to the tracepoint and the next iteration that
544	 data will be passed to the tracepoint itself. That is, the tracepoint
545	 will report the time it took to do the previous tracepoint.
546	 The string written to the tracepoint is a static string of 128 bytes
547	 to keep the time the same. The initial string is simply a write of
548	 "START". The second string records the cold cache time of the first
549	 write which is not added to the rest of the calculations.
550
551	 As it is a tight loop, it benchmarks as hot cache. That's fine because
552	 we care most about hot paths that are probably in cache already.
553
554	 An example of the output:
555
556	      START
557	      first=3672 [COLD CACHED]
558	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
559	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
560	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
561	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
562	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
563	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
564
565
566config RING_BUFFER_BENCHMARK
567	tristate "Ring buffer benchmark stress tester"
568	depends on RING_BUFFER
569	help
570	  This option creates a test to stress the ring buffer and benchmark it.
571	  It creates its own ring buffer such that it will not interfere with
572	  any other users of the ring buffer (such as ftrace). It then creates
573	  a producer and consumer that will run for 10 seconds and sleep for
574	  10 seconds. Each interval it will print out the number of events
575	  it recorded and give a rough estimate of how long each iteration took.
576
577	  It does not disable interrupts or raise its priority, so it may be
578	  affected by processes that are running.
579
580	  If unsure, say N.
581
582config RING_BUFFER_STARTUP_TEST
583       bool "Ring buffer startup self test"
584       depends on RING_BUFFER
585       help
586         Run a simple self test on the ring buffer on boot up. Late in the
587	 kernel boot sequence, the test will start that kicks off
588	 a thread per cpu. Each thread will write various size events
589	 into the ring buffer. Another thread is created to send IPIs
590	 to each of the threads, where the IPI handler will also write
591	 to the ring buffer, to test/stress the nesting ability.
592	 If any anomalies are discovered, a warning will be displayed
593	 and all ring buffers will be disabled.
594
595	 The test runs for 10 seconds. This will slow your boot time
596	 by at least 10 more seconds.
597
598	 At the end of the test, statics and more checks are done.
599	 It will output the stats of each per cpu buffer. What
600	 was written, the sizes, what was read, what was lost, and
601	 other similar details.
602
603	 If unsure, say N
604
605endif # FTRACE
606
607endif # TRACING_SUPPORT
608
609