• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14	help
15	  See Documentation/trace/ftrace-design.txt
16
17config HAVE_FUNCTION_TRACER
18	bool
19	help
20	  See Documentation/trace/ftrace-design.txt
21
22config HAVE_FUNCTION_GRAPH_TRACER
23	bool
24	help
25	  See Documentation/trace/ftrace-design.txt
26
27config HAVE_FUNCTION_GRAPH_FP_TEST
28	bool
29	help
30	  See Documentation/trace/ftrace-design.txt
31
32config HAVE_FUNCTION_TRACE_MCOUNT_TEST
33	bool
34	help
35	  See Documentation/trace/ftrace-design.txt
36
37config HAVE_DYNAMIC_FTRACE
38	bool
39	help
40	  See Documentation/trace/ftrace-design.txt
41
42config HAVE_DYNAMIC_FTRACE_WITH_REGS
43	bool
44
45config HAVE_FTRACE_MCOUNT_RECORD
46	bool
47	help
48	  See Documentation/trace/ftrace-design.txt
49
50config HAVE_SYSCALL_TRACEPOINTS
51	bool
52	help
53	  See Documentation/trace/ftrace-design.txt
54
55config HAVE_FENTRY
56	bool
57	help
58	  Arch supports the gcc options -pg with -mfentry
59
60config HAVE_C_RECORDMCOUNT
61	bool
62	help
63	  C version of recordmcount available?
64
65config TRACER_MAX_TRACE
66	bool
67
68config TRACE_CLOCK
69	bool
70
71config RING_BUFFER
72	bool
73	select TRACE_CLOCK
74	select IRQ_WORK
75
76config FTRACE_NMI_ENTER
77       bool
78       depends on HAVE_FTRACE_NMI_ENTER
79       default y
80
81config EVENT_TRACING
82	select CONTEXT_SWITCH_TRACER
83	bool
84
85config GPU_TRACEPOINTS
86	bool
87
88config CONTEXT_SWITCH_TRACER
89	bool
90
91config RING_BUFFER_ALLOW_SWAP
92	bool
93	help
94	 Allow the use of ring_buffer_swap_cpu.
95	 Adds a very slight overhead to tracing when enabled.
96
97# All tracer options should select GENERIC_TRACER. For those options that are
98# enabled by all tracers (context switch and event tracer) they select TRACING.
99# This allows those options to appear when no other tracer is selected. But the
100# options do not appear when something else selects it. We need the two options
101# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
102# hiding of the automatic options.
103
104config TRACING
105	bool
106	select DEBUG_FS
107	select RING_BUFFER
108	select STACKTRACE if STACKTRACE_SUPPORT
109	select TRACEPOINTS
110	select NOP_TRACER
111	select BINARY_PRINTF
112	select EVENT_TRACING
113	select TRACE_CLOCK
114
115config GENERIC_TRACER
116	bool
117	select TRACING
118
119#
120# Minimum requirements an architecture has to meet for us to
121# be able to offer generic tracing facilities:
122#
123config TRACING_SUPPORT
124	bool
125	# PPC32 has no irqflags tracing support, but it can use most of the
126	# tracers anyway, they were tested to build and work. Note that new
127	# exceptions to this list aren't welcomed, better implement the
128	# irqflags tracing for your architecture.
129	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
130	depends on STACKTRACE_SUPPORT
131	default y
132
133if TRACING_SUPPORT
134
135menuconfig FTRACE
136	bool "Tracers"
137	default y if DEBUG_KERNEL
138	help
139	  Enable the kernel tracing infrastructure.
140
141if FTRACE
142
143config FUNCTION_TRACER
144	bool "Kernel Function Tracer"
145	depends on HAVE_FUNCTION_TRACER
146	select KALLSYMS
147	select GENERIC_TRACER
148	select CONTEXT_SWITCH_TRACER
149	help
150	  Enable the kernel to trace every kernel function. This is done
151	  by using a compiler feature to insert a small, 5-byte No-Operation
152	  instruction at the beginning of every kernel function, which NOP
153	  sequence is then dynamically patched into a tracer call when
154	  tracing is enabled by the administrator. If it's runtime disabled
155	  (the bootup default), then the overhead of the instructions is very
156	  small and not measurable even in micro-benchmarks.
157
158config FUNCTION_GRAPH_TRACER
159	bool "Kernel Function Graph Tracer"
160	depends on HAVE_FUNCTION_GRAPH_TRACER
161	depends on FUNCTION_TRACER
162	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
163	default y
164	help
165	  Enable the kernel to trace a function at both its return
166	  and its entry.
167	  Its first purpose is to trace the duration of functions and
168	  draw a call graph for each thread with some information like
169	  the return value. This is done by setting the current return
170	  address on the current task structure into a stack of calls.
171
172
173config IRQSOFF_TRACER
174	bool "Interrupts-off Latency Tracer"
175	default n
176	depends on TRACE_IRQFLAGS_SUPPORT
177	depends on !ARCH_USES_GETTIMEOFFSET
178	select TRACE_IRQFLAGS
179	select GENERIC_TRACER
180	select TRACER_MAX_TRACE
181	select RING_BUFFER_ALLOW_SWAP
182	select TRACER_SNAPSHOT
183	select TRACER_SNAPSHOT_PER_CPU_SWAP
184	help
185	  This option measures the time spent in irqs-off critical
186	  sections, with microsecond accuracy.
187
188	  The default measurement method is a maximum search, which is
189	  disabled by default and can be runtime (re-)started
190	  via:
191
192	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
193
194	  (Note that kernel size and overhead increase with this option
195	  enabled. This option and the preempt-off timing option can be
196	  used together or separately.)
197
198config PREEMPT_TRACER
199	bool "Preemption-off Latency Tracer"
200	default n
201	depends on !ARCH_USES_GETTIMEOFFSET
202	depends on PREEMPT
203	select GENERIC_TRACER
204	select TRACER_MAX_TRACE
205	select RING_BUFFER_ALLOW_SWAP
206	select TRACER_SNAPSHOT
207	select TRACER_SNAPSHOT_PER_CPU_SWAP
208	help
209	  This option measures the time spent in preemption-off critical
210	  sections, with microsecond accuracy.
211
212	  The default measurement method is a maximum search, which is
213	  disabled by default and can be runtime (re-)started
214	  via:
215
216	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
217
218	  (Note that kernel size and overhead increase with this option
219	  enabled. This option and the irqs-off timing option can be
220	  used together or separately.)
221
222config SCHED_TRACER
223	bool "Scheduling Latency Tracer"
224	select GENERIC_TRACER
225	select CONTEXT_SWITCH_TRACER
226	select TRACER_MAX_TRACE
227	select TRACER_SNAPSHOT
228	help
229	  This tracer tracks the latency of the highest priority task
230	  to be scheduled in, starting from the point it has woken up.
231
232config ENABLE_DEFAULT_TRACERS
233	bool "Trace process context switches and events"
234	depends on !GENERIC_TRACER
235	select TRACING
236	help
237	  This tracer hooks to various trace points in the kernel,
238	  allowing the user to pick and choose which trace point they
239	  want to trace. It also includes the sched_switch tracer plugin.
240
241config FTRACE_SYSCALLS
242	bool "Trace syscalls"
243	depends on HAVE_SYSCALL_TRACEPOINTS
244	select GENERIC_TRACER
245	select KALLSYMS
246	help
247	  Basic tracer to catch the syscall entry and exit events.
248
249config TRACER_SNAPSHOT
250	bool "Create a snapshot trace buffer"
251	select TRACER_MAX_TRACE
252	help
253	  Allow tracing users to take snapshot of the current buffer using the
254	  ftrace interface, e.g.:
255
256	      echo 1 > /sys/kernel/debug/tracing/snapshot
257	      cat snapshot
258
259config TRACER_SNAPSHOT_PER_CPU_SWAP
260        bool "Allow snapshot to swap per CPU"
261	depends on TRACER_SNAPSHOT
262	select RING_BUFFER_ALLOW_SWAP
263	help
264	  Allow doing a snapshot of a single CPU buffer instead of a
265	  full swap (all buffers). If this is set, then the following is
266	  allowed:
267
268	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
269
270	  After which, only the tracing buffer for CPU 2 was swapped with
271	  the main tracing buffer, and the other CPU buffers remain the same.
272
273	  When this is enabled, this adds a little more overhead to the
274	  trace recording, as it needs to add some checks to synchronize
275	  recording with swaps. But this does not affect the performance
276	  of the overall system. This is enabled by default when the preempt
277	  or irq latency tracers are enabled, as those need to swap as well
278	  and already adds the overhead (plus a lot more).
279
280config TRACE_BRANCH_PROFILING
281	bool
282	select GENERIC_TRACER
283
284choice
285	prompt "Branch Profiling"
286	default BRANCH_PROFILE_NONE
287	help
288	 The branch profiling is a software profiler. It will add hooks
289	 into the C conditionals to test which path a branch takes.
290
291	 The likely/unlikely profiler only looks at the conditions that
292	 are annotated with a likely or unlikely macro.
293
294	 The "all branch" profiler will profile every if-statement in the
295	 kernel. This profiler will also enable the likely/unlikely
296	 profiler.
297
298	 Either of the above profilers adds a bit of overhead to the system.
299	 If unsure, choose "No branch profiling".
300
301config BRANCH_PROFILE_NONE
302	bool "No branch profiling"
303	help
304	  No branch profiling. Branch profiling adds a bit of overhead.
305	  Only enable it if you want to analyse the branching behavior.
306	  Otherwise keep it disabled.
307
308config PROFILE_ANNOTATED_BRANCHES
309	bool "Trace likely/unlikely profiler"
310	select TRACE_BRANCH_PROFILING
311	help
312	  This tracer profiles all likely and unlikely macros
313	  in the kernel. It will display the results in:
314
315	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
316
317	  Note: this will add a significant overhead; only turn this
318	  on if you need to profile the system's use of these macros.
319
320config PROFILE_ALL_BRANCHES
321	bool "Profile all if conditionals"
322	select TRACE_BRANCH_PROFILING
323	help
324	  This tracer profiles all branch conditions. Every if ()
325	  taken in the kernel is recorded whether it hit or miss.
326	  The results will be displayed in:
327
328	  /sys/kernel/debug/tracing/trace_stat/branch_all
329
330	  This option also enables the likely/unlikely profiler.
331
332	  This configuration, when enabled, will impose a great overhead
333	  on the system. This should only be enabled when the system
334	  is to be analyzed in much detail.
335endchoice
336
337config TRACING_BRANCHES
338	bool
339	help
340	  Selected by tracers that will trace the likely and unlikely
341	  conditions. This prevents the tracers themselves from being
342	  profiled. Profiling the tracing infrastructure can only happen
343	  when the likelys and unlikelys are not being traced.
344
345config BRANCH_TRACER
346	bool "Trace likely/unlikely instances"
347	depends on TRACE_BRANCH_PROFILING
348	select TRACING_BRANCHES
349	help
350	  This traces the events of likely and unlikely condition
351	  calls in the kernel.  The difference between this and the
352	  "Trace likely/unlikely profiler" is that this is not a
353	  histogram of the callers, but actually places the calling
354	  events into a running trace buffer to see when and where the
355	  events happened, as well as their results.
356
357	  Say N if unsure.
358
359config STACK_TRACER
360	bool "Trace max stack"
361	depends on HAVE_FUNCTION_TRACER
362	select FUNCTION_TRACER
363	select STACKTRACE
364	select KALLSYMS
365	help
366	  This special tracer records the maximum stack footprint of the
367	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
368
369	  This tracer works by hooking into every function call that the
370	  kernel executes, and keeping a maximum stack depth value and
371	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
372	  then it will not have any overhead while the stack tracer
373	  is disabled.
374
375	  To enable the stack tracer on bootup, pass in 'stacktrace'
376	  on the kernel command line.
377
378	  The stack tracer can also be enabled or disabled via the
379	  sysctl kernel.stack_tracer_enabled
380
381	  Say N if unsure.
382
383config BLK_DEV_IO_TRACE
384	bool "Support for tracing block IO actions"
385	depends on SYSFS
386	depends on BLOCK
387	select RELAY
388	select DEBUG_FS
389	select TRACEPOINTS
390	select GENERIC_TRACER
391	select STACKTRACE
392	help
393	  Say Y here if you want to be able to trace the block layer actions
394	  on a given queue. Tracing allows you to see any traffic happening
395	  on a block device queue. For more information (and the userspace
396	  support tools needed), fetch the blktrace tools from:
397
398	  git://git.kernel.dk/blktrace.git
399
400	  Tracing also is possible using the ftrace interface, e.g.:
401
402	    echo 1 > /sys/block/sda/sda1/trace/enable
403	    echo blk > /sys/kernel/debug/tracing/current_tracer
404	    cat /sys/kernel/debug/tracing/trace_pipe
405
406	  If unsure, say N.
407
408config KPROBE_EVENT
409	depends on KPROBES
410	depends on HAVE_REGS_AND_STACK_ACCESS_API
411	bool "Enable kprobes-based dynamic events"
412	select TRACING
413	select PROBE_EVENTS
414	default y
415	help
416	  This allows the user to add tracing events (similar to tracepoints)
417	  on the fly via the ftrace interface. See
418	  Documentation/trace/kprobetrace.txt for more details.
419
420	  Those events can be inserted wherever kprobes can probe, and record
421	  various register and memory values.
422
423	  This option is also required by perf-probe subcommand of perf tools.
424	  If you want to use perf tools, this option is strongly recommended.
425
426config UPROBE_EVENT
427	bool "Enable uprobes-based dynamic events"
428	depends on ARCH_SUPPORTS_UPROBES
429	depends on MMU
430	select UPROBES
431	select PROBE_EVENTS
432	select TRACING
433	default n
434	help
435	  This allows the user to add tracing events on top of userspace
436	  dynamic events (similar to tracepoints) on the fly via the trace
437	  events interface. Those events can be inserted wherever uprobes
438	  can probe, and record various registers.
439	  This option is required if you plan to use perf-probe subcommand
440	  of perf tools on user space applications.
441
442config PROBE_EVENTS
443	def_bool n
444
445config DYNAMIC_FTRACE
446	bool "enable/disable function tracing dynamically"
447	depends on FUNCTION_TRACER
448	depends on HAVE_DYNAMIC_FTRACE
449	default y
450	help
451	  This option will modify all the calls to function tracing
452	  dynamically (will patch them out of the binary image and
453	  replace them with a No-Op instruction) on boot up. During
454	  compile time, a table is made of all the locations that ftrace
455	  can function trace, and this table is linked into the kernel
456	  image. When this is enabled, functions can be individually
457	  enabled, and the functions not enabled will not affect
458	  performance of the system.
459
460	  See the files in /sys/kernel/debug/tracing:
461	    available_filter_functions
462	    set_ftrace_filter
463	    set_ftrace_notrace
464
465	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
466	  otherwise has native performance as long as no tracing is active.
467
468config DYNAMIC_FTRACE_WITH_REGS
469	def_bool y
470	depends on DYNAMIC_FTRACE
471	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
472
473config FUNCTION_PROFILER
474	bool "Kernel function profiler"
475	depends on FUNCTION_TRACER
476	default n
477	help
478	  This option enables the kernel function profiler. A file is created
479	  in debugfs called function_profile_enabled which defaults to zero.
480	  When a 1 is echoed into this file profiling begins, and when a
481	  zero is entered, profiling stops. A "functions" file is created in
482	  the trace_stats directory; this file shows the list of functions that
483	  have been hit and their counters.
484
485	  If in doubt, say N.
486
487config FTRACE_MCOUNT_RECORD
488	def_bool y
489	depends on DYNAMIC_FTRACE
490	depends on HAVE_FTRACE_MCOUNT_RECORD
491
492config FTRACE_SELFTEST
493	bool
494
495config FTRACE_STARTUP_TEST
496	bool "Perform a startup test on ftrace"
497	depends on GENERIC_TRACER
498	select FTRACE_SELFTEST
499	help
500	  This option performs a series of startup tests on ftrace. On bootup
501	  a series of tests are made to verify that the tracer is
502	  functioning properly. It will do tests on all the configured
503	  tracers of ftrace.
504
505config EVENT_TRACE_TEST_SYSCALLS
506	bool "Run selftest on syscall events"
507	depends on FTRACE_STARTUP_TEST
508	help
509	 This option will also enable testing every syscall event.
510	 It only enables the event and disables it and runs various loads
511	 with the event enabled. This adds a bit more time for kernel boot
512	 up since it runs this on every system call defined.
513
514	 TBD - enable a way to actually call the syscalls as we test their
515	       events
516
517config MMIOTRACE
518	bool "Memory mapped IO tracing"
519	depends on HAVE_MMIOTRACE_SUPPORT && PCI
520	select GENERIC_TRACER
521	help
522	  Mmiotrace traces Memory Mapped I/O access and is meant for
523	  debugging and reverse engineering. It is called from the ioremap
524	  implementation and works via page faults. Tracing is disabled by
525	  default and can be enabled at run-time.
526
527	  See Documentation/trace/mmiotrace.txt.
528	  If you are not helping to develop drivers, say N.
529
530config MMIOTRACE_TEST
531	tristate "Test module for mmiotrace"
532	depends on MMIOTRACE && m
533	help
534	  This is a dumb module for testing mmiotrace. It is very dangerous
535	  as it will write garbage to IO memory starting at a given address.
536	  However, it should be safe to use on e.g. unused portion of VRAM.
537
538	  Say N, unless you absolutely know what you are doing.
539
540config RING_BUFFER_BENCHMARK
541	tristate "Ring buffer benchmark stress tester"
542	depends on RING_BUFFER
543	help
544	  This option creates a test to stress the ring buffer and benchmark it.
545	  It creates its own ring buffer such that it will not interfere with
546	  any other users of the ring buffer (such as ftrace). It then creates
547	  a producer and consumer that will run for 10 seconds and sleep for
548	  10 seconds. Each interval it will print out the number of events
549	  it recorded and give a rough estimate of how long each iteration took.
550
551	  It does not disable interrupts or raise its priority, so it may be
552	  affected by processes that are running.
553
554	  If unsure, say N.
555
556config RING_BUFFER_STARTUP_TEST
557       bool "Ring buffer startup self test"
558       depends on RING_BUFFER
559       help
560         Run a simple self test on the ring buffer on boot up. Late in the
561	 kernel boot sequence, the test will start that kicks off
562	 a thread per cpu. Each thread will write various size events
563	 into the ring buffer. Another thread is created to send IPIs
564	 to each of the threads, where the IPI handler will also write
565	 to the ring buffer, to test/stress the nesting ability.
566	 If any anomalies are discovered, a warning will be displayed
567	 and all ring buffers will be disabled.
568
569	 The test runs for 10 seconds. This will slow your boot time
570	 by at least 10 more seconds.
571
572	 At the end of the test, statics and more checks are done.
573	 It will output the stats of each per cpu buffer. What
574	 was written, the sizes, what was read, what was lost, and
575	 other similar details.
576
577	 If unsure, say N
578
579endif # FTRACE
580
581endif # TRACING_SUPPORT
582
583