• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# SPDX-License-Identifier: GPL-2.0-only
2#
3# Architectures that offer an FUNCTION_TRACER implementation should
4#  select HAVE_FUNCTION_TRACER:
5#
6
7config USER_STACKTRACE_SUPPORT
8	bool
9
10config NOP_TRACER
11	bool
12
13config HAVE_FUNCTION_TRACER
14	bool
15	help
16	  See Documentation/trace/ftrace-design.rst
17
18config HAVE_FUNCTION_GRAPH_TRACER
19	bool
20	help
21	  See Documentation/trace/ftrace-design.rst
22
23config HAVE_DYNAMIC_FTRACE
24	bool
25	help
26	  See Documentation/trace/ftrace-design.rst
27
28config HAVE_DYNAMIC_FTRACE_WITH_REGS
29	bool
30
31config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
32	bool
33
34config HAVE_DYNAMIC_FTRACE_WITH_ARGS
35	bool
36	help
37	 If this is set, then arguments and stack can be found from
38	 the pt_regs passed into the function callback regs parameter
39	 by default, even without setting the REGS flag in the ftrace_ops.
40	 This allows for use of regs_get_kernel_argument() and
41	 kernel_stack_pointer().
42
43config HAVE_FTRACE_MCOUNT_RECORD
44	bool
45	help
46	  See Documentation/trace/ftrace-design.rst
47
48config HAVE_SYSCALL_TRACEPOINTS
49	bool
50	help
51	  See Documentation/trace/ftrace-design.rst
52
53config HAVE_FENTRY
54	bool
55	help
56	  Arch supports the gcc options -pg with -mfentry
57
58config HAVE_NOP_MCOUNT
59	bool
60	help
61	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
62
63config HAVE_OBJTOOL_MCOUNT
64	bool
65	help
66	  Arch supports objtool --mcount
67
68config HAVE_C_RECORDMCOUNT
69	bool
70	help
71	  C version of recordmcount available?
72
73config TRACER_MAX_TRACE
74	bool
75
76config TRACE_CLOCK
77	bool
78
79config RING_BUFFER
80	bool
81	select TRACE_CLOCK
82	select IRQ_WORK
83
84config EVENT_TRACING
85	select CONTEXT_SWITCH_TRACER
86	select GLOB
87	bool
88
89config CONTEXT_SWITCH_TRACER
90	bool
91
92config RING_BUFFER_ALLOW_SWAP
93	bool
94	help
95	 Allow the use of ring_buffer_swap_cpu.
96	 Adds a very slight overhead to tracing when enabled.
97
98config TRACE_MMIO_ACCESS
99	bool "Register read/write tracing"
100	depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS
101	help
102	  Create tracepoints for MMIO read/write operations. These trace events
103	  can be used for logging all MMIO read/write operations.
104
105config PREEMPTIRQ_TRACEPOINTS
106	bool
107	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
108	select TRACING
109	default y
110	help
111	  Create preempt/irq toggle tracepoints if needed, so that other parts
112	  of the kernel can use them to generate or add hooks to them.
113
114# All tracer options should select GENERIC_TRACER. For those options that are
115# enabled by all tracers (context switch and event tracer) they select TRACING.
116# This allows those options to appear when no other tracer is selected. But the
117# options do not appear when something else selects it. We need the two options
118# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
119# hiding of the automatic options.
120
121config TRACING
122	bool
123	select RING_BUFFER
124	select STACKTRACE if STACKTRACE_SUPPORT
125	select TRACEPOINTS
126	select NOP_TRACER
127	select BINARY_PRINTF
128	select EVENT_TRACING
129	select TRACE_CLOCK
130
131config GENERIC_TRACER
132	bool
133	select TRACING
134
135#
136# Minimum requirements an architecture has to meet for us to
137# be able to offer generic tracing facilities:
138#
139config TRACING_SUPPORT
140	bool
141	depends on TRACE_IRQFLAGS_SUPPORT
142	depends on STACKTRACE_SUPPORT
143	default y
144
145menuconfig FTRACE
146	bool "Tracers"
147	depends on TRACING_SUPPORT
148	default y if DEBUG_KERNEL
149	help
150	  Enable the kernel tracing infrastructure.
151
152if FTRACE
153
154config BOOTTIME_TRACING
155	bool "Boot-time Tracing support"
156	depends on TRACING
157	select BOOT_CONFIG
158	help
159	  Enable developer to setup ftrace subsystem via supplemental
160	  kernel cmdline at boot time for debugging (tracing) driver
161	  initialization and boot process.
162
163config FUNCTION_TRACER
164	bool "Kernel Function Tracer"
165	depends on HAVE_FUNCTION_TRACER
166	select KALLSYMS
167	select GENERIC_TRACER
168	select CONTEXT_SWITCH_TRACER
169	select GLOB
170	select TASKS_RCU if PREEMPTION
171	select TASKS_RUDE_RCU
172	help
173	  Enable the kernel to trace every kernel function. This is done
174	  by using a compiler feature to insert a small, 5-byte No-Operation
175	  instruction at the beginning of every kernel function, which NOP
176	  sequence is then dynamically patched into a tracer call when
177	  tracing is enabled by the administrator. If it's runtime disabled
178	  (the bootup default), then the overhead of the instructions is very
179	  small and not measurable even in micro-benchmarks.
180
181config FUNCTION_GRAPH_TRACER
182	bool "Kernel Function Graph Tracer"
183	depends on HAVE_FUNCTION_GRAPH_TRACER
184	depends on FUNCTION_TRACER
185	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
186	default y
187	help
188	  Enable the kernel to trace a function at both its return
189	  and its entry.
190	  Its first purpose is to trace the duration of functions and
191	  draw a call graph for each thread with some information like
192	  the return value. This is done by setting the current return
193	  address on the current task structure into a stack of calls.
194
195config DYNAMIC_FTRACE
196	bool "enable/disable function tracing dynamically"
197	depends on FUNCTION_TRACER
198	depends on HAVE_DYNAMIC_FTRACE
199	default y
200	help
201	  This option will modify all the calls to function tracing
202	  dynamically (will patch them out of the binary image and
203	  replace them with a No-Op instruction) on boot up. During
204	  compile time, a table is made of all the locations that ftrace
205	  can function trace, and this table is linked into the kernel
206	  image. When this is enabled, functions can be individually
207	  enabled, and the functions not enabled will not affect
208	  performance of the system.
209
210	  See the files in /sys/kernel/debug/tracing:
211	    available_filter_functions
212	    set_ftrace_filter
213	    set_ftrace_notrace
214
215	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
216	  otherwise has native performance as long as no tracing is active.
217
218config DYNAMIC_FTRACE_WITH_REGS
219	def_bool y
220	depends on DYNAMIC_FTRACE
221	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
222
223config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
224	def_bool y
225	depends on DYNAMIC_FTRACE_WITH_REGS
226	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
227
228config DYNAMIC_FTRACE_WITH_ARGS
229	def_bool y
230	depends on DYNAMIC_FTRACE
231	depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
232
233config FUNCTION_PROFILER
234	bool "Kernel function profiler"
235	depends on FUNCTION_TRACER
236	default n
237	help
238	  This option enables the kernel function profiler. A file is created
239	  in debugfs called function_profile_enabled which defaults to zero.
240	  When a 1 is echoed into this file profiling begins, and when a
241	  zero is entered, profiling stops. A "functions" file is created in
242	  the trace_stat directory; this file shows the list of functions that
243	  have been hit and their counters.
244
245	  If in doubt, say N.
246
247config STACK_TRACER
248	bool "Trace max stack"
249	depends on HAVE_FUNCTION_TRACER
250	select FUNCTION_TRACER
251	select STACKTRACE
252	select KALLSYMS
253	help
254	  This special tracer records the maximum stack footprint of the
255	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
256
257	  This tracer works by hooking into every function call that the
258	  kernel executes, and keeping a maximum stack depth value and
259	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
260	  then it will not have any overhead while the stack tracer
261	  is disabled.
262
263	  To enable the stack tracer on bootup, pass in 'stacktrace'
264	  on the kernel command line.
265
266	  The stack tracer can also be enabled or disabled via the
267	  sysctl kernel.stack_tracer_enabled
268
269	  Say N if unsure.
270
271config TRACE_PREEMPT_TOGGLE
272	bool
273	help
274	  Enables hooks which will be called when preemption is first disabled,
275	  and last enabled.
276
277config IRQSOFF_TRACER
278	bool "Interrupts-off Latency Tracer"
279	default n
280	depends on TRACE_IRQFLAGS_SUPPORT
281	select TRACE_IRQFLAGS
282	select GENERIC_TRACER
283	select TRACER_MAX_TRACE
284	select RING_BUFFER_ALLOW_SWAP
285	select TRACER_SNAPSHOT
286	select TRACER_SNAPSHOT_PER_CPU_SWAP
287	help
288	  This option measures the time spent in irqs-off critical
289	  sections, with microsecond accuracy.
290
291	  The default measurement method is a maximum search, which is
292	  disabled by default and can be runtime (re-)started
293	  via:
294
295	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
296
297	  (Note that kernel size and overhead increase with this option
298	  enabled. This option and the preempt-off timing option can be
299	  used together or separately.)
300
301config PREEMPT_TRACER
302	bool "Preemption-off Latency Tracer"
303	default n
304	depends on PREEMPTION
305	select GENERIC_TRACER
306	select TRACER_MAX_TRACE
307	select RING_BUFFER_ALLOW_SWAP
308	select TRACER_SNAPSHOT
309	select TRACER_SNAPSHOT_PER_CPU_SWAP
310	select TRACE_PREEMPT_TOGGLE
311	help
312	  This option measures the time spent in preemption-off critical
313	  sections, with microsecond accuracy.
314
315	  The default measurement method is a maximum search, which is
316	  disabled by default and can be runtime (re-)started
317	  via:
318
319	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
320
321	  (Note that kernel size and overhead increase with this option
322	  enabled. This option and the irqs-off timing option can be
323	  used together or separately.)
324
325config SCHED_TRACER
326	bool "Scheduling Latency Tracer"
327	select GENERIC_TRACER
328	select CONTEXT_SWITCH_TRACER
329	select TRACER_MAX_TRACE
330	select TRACER_SNAPSHOT
331	help
332	  This tracer tracks the latency of the highest priority task
333	  to be scheduled in, starting from the point it has woken up.
334
335config HWLAT_TRACER
336	bool "Tracer to detect hardware latencies (like SMIs)"
337	select GENERIC_TRACER
338	select TRACER_MAX_TRACE
339	help
340	 This tracer, when enabled will create one or more kernel threads,
341	 depending on what the cpumask file is set to, which each thread
342	 spinning in a loop looking for interruptions caused by
343	 something other than the kernel. For example, if a
344	 System Management Interrupt (SMI) takes a noticeable amount of
345	 time, this tracer will detect it. This is useful for testing
346	 if a system is reliable for Real Time tasks.
347
348	 Some files are created in the tracing directory when this
349	 is enabled:
350
351	   hwlat_detector/width   - time in usecs for how long to spin for
352	   hwlat_detector/window  - time in usecs between the start of each
353				     iteration
354
355	 A kernel thread is created that will spin with interrupts disabled
356	 for "width" microseconds in every "window" cycle. It will not spin
357	 for "window - width" microseconds, where the system can
358	 continue to operate.
359
360	 The output will appear in the trace and trace_pipe files.
361
362	 When the tracer is not running, it has no affect on the system,
363	 but when it is running, it can cause the system to be
364	 periodically non responsive. Do not run this tracer on a
365	 production system.
366
367	 To enable this tracer, echo in "hwlat" into the current_tracer
368	 file. Every time a latency is greater than tracing_thresh, it will
369	 be recorded into the ring buffer.
370
371config OSNOISE_TRACER
372	bool "OS Noise tracer"
373	select GENERIC_TRACER
374	select TRACER_MAX_TRACE
375	help
376	  In the context of high-performance computing (HPC), the Operating
377	  System Noise (osnoise) refers to the interference experienced by an
378	  application due to activities inside the operating system. In the
379	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
380	  can cause noise to the system. Moreover, hardware-related jobs can
381	  also cause noise, for example, via SMIs.
382
383	  The osnoise tracer leverages the hwlat_detector by running a similar
384	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
385	  the sources of osnoise during its execution. The osnoise tracer takes
386	  note of the entry and exit point of any source of interferences,
387	  increasing a per-cpu interference counter. It saves an interference
388	  counter for each source of interference. The interference counter for
389	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
390	  observes these interferences' entry events. When a noise happens
391	  without any interference from the operating system level, the
392	  hardware noise counter increases, pointing to a hardware-related
393	  noise. In this way, osnoise can account for any source of
394	  interference. At the end of the period, the osnoise tracer prints
395	  the sum of all noise, the max single noise, the percentage of CPU
396	  available for the thread, and the counters for the noise sources.
397
398	  In addition to the tracer, a set of tracepoints were added to
399	  facilitate the identification of the osnoise source.
400
401	  The output will appear in the trace and trace_pipe files.
402
403	  To enable this tracer, echo in "osnoise" into the current_tracer
404          file.
405
406config TIMERLAT_TRACER
407	bool "Timerlat tracer"
408	select OSNOISE_TRACER
409	select GENERIC_TRACER
410	help
411	  The timerlat tracer aims to help the preemptive kernel developers
412	  to find sources of wakeup latencies of real-time threads.
413
414	  The tracer creates a per-cpu kernel thread with real-time priority.
415	  The tracer thread sets a periodic timer to wakeup itself, and goes
416	  to sleep waiting for the timer to fire. At the wakeup, the thread
417	  then computes a wakeup latency value as the difference between
418	  the current time and the absolute time that the timer was set
419	  to expire.
420
421	  The tracer prints two lines at every activation. The first is the
422	  timer latency observed at the hardirq context before the
423	  activation of the thread. The second is the timer latency observed
424	  by the thread, which is the same level that cyclictest reports. The
425	  ACTIVATION ID field serves to relate the irq execution to its
426	  respective thread execution.
427
428	  The tracer is build on top of osnoise tracer, and the osnoise:
429	  events can be used to trace the source of interference from NMI,
430	  IRQs and other threads. It also enables the capture of the
431	  stacktrace at the IRQ context, which helps to identify the code
432	  path that can cause thread delay.
433
434config MMIOTRACE
435	bool "Memory mapped IO tracing"
436	depends on HAVE_MMIOTRACE_SUPPORT && PCI
437	select GENERIC_TRACER
438	help
439	  Mmiotrace traces Memory Mapped I/O access and is meant for
440	  debugging and reverse engineering. It is called from the ioremap
441	  implementation and works via page faults. Tracing is disabled by
442	  default and can be enabled at run-time.
443
444	  See Documentation/trace/mmiotrace.rst.
445	  If you are not helping to develop drivers, say N.
446
447config ENABLE_DEFAULT_TRACERS
448	bool "Trace process context switches and events"
449	depends on !GENERIC_TRACER
450	select TRACING
451	help
452	  This tracer hooks to various trace points in the kernel,
453	  allowing the user to pick and choose which trace point they
454	  want to trace. It also includes the sched_switch tracer plugin.
455
456config FTRACE_SYSCALLS
457	bool "Trace syscalls"
458	depends on HAVE_SYSCALL_TRACEPOINTS
459	select GENERIC_TRACER
460	select KALLSYMS
461	help
462	  Basic tracer to catch the syscall entry and exit events.
463
464config TRACER_SNAPSHOT
465	bool "Create a snapshot trace buffer"
466	select TRACER_MAX_TRACE
467	help
468	  Allow tracing users to take snapshot of the current buffer using the
469	  ftrace interface, e.g.:
470
471	      echo 1 > /sys/kernel/debug/tracing/snapshot
472	      cat snapshot
473
474config TRACER_SNAPSHOT_PER_CPU_SWAP
475	bool "Allow snapshot to swap per CPU"
476	depends on TRACER_SNAPSHOT
477	select RING_BUFFER_ALLOW_SWAP
478	help
479	  Allow doing a snapshot of a single CPU buffer instead of a
480	  full swap (all buffers). If this is set, then the following is
481	  allowed:
482
483	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
484
485	  After which, only the tracing buffer for CPU 2 was swapped with
486	  the main tracing buffer, and the other CPU buffers remain the same.
487
488	  When this is enabled, this adds a little more overhead to the
489	  trace recording, as it needs to add some checks to synchronize
490	  recording with swaps. But this does not affect the performance
491	  of the overall system. This is enabled by default when the preempt
492	  or irq latency tracers are enabled, as those need to swap as well
493	  and already adds the overhead (plus a lot more).
494
495config TRACE_BRANCH_PROFILING
496	bool
497	select GENERIC_TRACER
498
499choice
500	prompt "Branch Profiling"
501	default BRANCH_PROFILE_NONE
502	help
503	 The branch profiling is a software profiler. It will add hooks
504	 into the C conditionals to test which path a branch takes.
505
506	 The likely/unlikely profiler only looks at the conditions that
507	 are annotated with a likely or unlikely macro.
508
509	 The "all branch" profiler will profile every if-statement in the
510	 kernel. This profiler will also enable the likely/unlikely
511	 profiler.
512
513	 Either of the above profilers adds a bit of overhead to the system.
514	 If unsure, choose "No branch profiling".
515
516config BRANCH_PROFILE_NONE
517	bool "No branch profiling"
518	help
519	  No branch profiling. Branch profiling adds a bit of overhead.
520	  Only enable it if you want to analyse the branching behavior.
521	  Otherwise keep it disabled.
522
523config PROFILE_ANNOTATED_BRANCHES
524	bool "Trace likely/unlikely profiler"
525	select TRACE_BRANCH_PROFILING
526	help
527	  This tracer profiles all likely and unlikely macros
528	  in the kernel. It will display the results in:
529
530	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
531
532	  Note: this will add a significant overhead; only turn this
533	  on if you need to profile the system's use of these macros.
534
535config PROFILE_ALL_BRANCHES
536	bool "Profile all if conditionals" if !FORTIFY_SOURCE
537	select TRACE_BRANCH_PROFILING
538	help
539	  This tracer profiles all branch conditions. Every if ()
540	  taken in the kernel is recorded whether it hit or miss.
541	  The results will be displayed in:
542
543	  /sys/kernel/debug/tracing/trace_stat/branch_all
544
545	  This option also enables the likely/unlikely profiler.
546
547	  This configuration, when enabled, will impose a great overhead
548	  on the system. This should only be enabled when the system
549	  is to be analyzed in much detail.
550endchoice
551
552config TRACING_BRANCHES
553	bool
554	help
555	  Selected by tracers that will trace the likely and unlikely
556	  conditions. This prevents the tracers themselves from being
557	  profiled. Profiling the tracing infrastructure can only happen
558	  when the likelys and unlikelys are not being traced.
559
560config BRANCH_TRACER
561	bool "Trace likely/unlikely instances"
562	depends on TRACE_BRANCH_PROFILING
563	select TRACING_BRANCHES
564	help
565	  This traces the events of likely and unlikely condition
566	  calls in the kernel.  The difference between this and the
567	  "Trace likely/unlikely profiler" is that this is not a
568	  histogram of the callers, but actually places the calling
569	  events into a running trace buffer to see when and where the
570	  events happened, as well as their results.
571
572	  Say N if unsure.
573
574config BLK_DEV_IO_TRACE
575	bool "Support for tracing block IO actions"
576	depends on SYSFS
577	depends on BLOCK
578	select RELAY
579	select DEBUG_FS
580	select TRACEPOINTS
581	select GENERIC_TRACER
582	select STACKTRACE
583	help
584	  Say Y here if you want to be able to trace the block layer actions
585	  on a given queue. Tracing allows you to see any traffic happening
586	  on a block device queue. For more information (and the userspace
587	  support tools needed), fetch the blktrace tools from:
588
589	  git://git.kernel.dk/blktrace.git
590
591	  Tracing also is possible using the ftrace interface, e.g.:
592
593	    echo 1 > /sys/block/sda/sda1/trace/enable
594	    echo blk > /sys/kernel/debug/tracing/current_tracer
595	    cat /sys/kernel/debug/tracing/trace_pipe
596
597	  If unsure, say N.
598
599config KPROBE_EVENTS
600	depends on KPROBES
601	depends on HAVE_REGS_AND_STACK_ACCESS_API
602	bool "Enable kprobes-based dynamic events"
603	select TRACING
604	select PROBE_EVENTS
605	select DYNAMIC_EVENTS
606	default y
607	help
608	  This allows the user to add tracing events (similar to tracepoints)
609	  on the fly via the ftrace interface. See
610	  Documentation/trace/kprobetrace.rst for more details.
611
612	  Those events can be inserted wherever kprobes can probe, and record
613	  various register and memory values.
614
615	  This option is also required by perf-probe subcommand of perf tools.
616	  If you want to use perf tools, this option is strongly recommended.
617
618config KPROBE_EVENTS_ON_NOTRACE
619	bool "Do NOT protect notrace function from kprobe events"
620	depends on KPROBE_EVENTS
621	depends on DYNAMIC_FTRACE
622	default n
623	help
624	  This is only for the developers who want to debug ftrace itself
625	  using kprobe events.
626
627	  If kprobes can use ftrace instead of breakpoint, ftrace related
628	  functions are protected from kprobe-events to prevent an infinite
629	  recursion or any unexpected execution path which leads to a kernel
630	  crash.
631
632	  This option disables such protection and allows you to put kprobe
633	  events on ftrace functions for debugging ftrace by itself.
634	  Note that this might let you shoot yourself in the foot.
635
636	  If unsure, say N.
637
638config UPROBE_EVENTS
639	bool "Enable uprobes-based dynamic events"
640	depends on ARCH_SUPPORTS_UPROBES
641	depends on MMU
642	depends on PERF_EVENTS
643	select UPROBES
644	select PROBE_EVENTS
645	select DYNAMIC_EVENTS
646	select TRACING
647	default y
648	help
649	  This allows the user to add tracing events on top of userspace
650	  dynamic events (similar to tracepoints) on the fly via the trace
651	  events interface. Those events can be inserted wherever uprobes
652	  can probe, and record various registers.
653	  This option is required if you plan to use perf-probe subcommand
654	  of perf tools on user space applications.
655
656config BPF_EVENTS
657	depends on BPF_SYSCALL
658	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
659	bool
660	default y
661	help
662	  This allows the user to attach BPF programs to kprobe, uprobe, and
663	  tracepoint events.
664
665config DYNAMIC_EVENTS
666	def_bool n
667
668config PROBE_EVENTS
669	def_bool n
670
671config BPF_KPROBE_OVERRIDE
672	bool "Enable BPF programs to override a kprobed function"
673	depends on BPF_EVENTS
674	depends on FUNCTION_ERROR_INJECTION
675	default n
676	help
677	 Allows BPF to override the execution of a probed function and
678	 set a different return value.  This is used for error injection.
679
680config FTRACE_MCOUNT_RECORD
681	def_bool y
682	depends on DYNAMIC_FTRACE
683	depends on HAVE_FTRACE_MCOUNT_RECORD
684
685config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
686	bool
687	depends on FTRACE_MCOUNT_RECORD
688
689config FTRACE_MCOUNT_USE_CC
690	def_bool y
691	depends on $(cc-option,-mrecord-mcount)
692	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
693	depends on FTRACE_MCOUNT_RECORD
694
695config FTRACE_MCOUNT_USE_OBJTOOL
696	def_bool y
697	depends on HAVE_OBJTOOL_MCOUNT
698	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
699	depends on !FTRACE_MCOUNT_USE_CC
700	depends on FTRACE_MCOUNT_RECORD
701
702config FTRACE_MCOUNT_USE_RECORDMCOUNT
703	def_bool y
704	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
705	depends on !FTRACE_MCOUNT_USE_CC
706	depends on !FTRACE_MCOUNT_USE_OBJTOOL
707	depends on FTRACE_MCOUNT_RECORD
708
709config TRACING_MAP
710	bool
711	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
712	help
713	  tracing_map is a special-purpose lock-free map for tracing,
714	  separated out as a stand-alone facility in order to allow it
715	  to be shared between multiple tracers.  It isn't meant to be
716	  generally used outside of that context, and is normally
717	  selected by tracers that use it.
718
719config SYNTH_EVENTS
720	bool "Synthetic trace events"
721	select TRACING
722	select DYNAMIC_EVENTS
723	default n
724	help
725	  Synthetic events are user-defined trace events that can be
726	  used to combine data from other trace events or in fact any
727	  data source.  Synthetic events can be generated indirectly
728	  via the trace() action of histogram triggers or directly
729	  by way of an in-kernel API.
730
731	  See Documentation/trace/events.rst or
732	  Documentation/trace/histogram.rst for details and examples.
733
734	  If in doubt, say N.
735
736config HIST_TRIGGERS
737	bool "Histogram triggers"
738	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
739	select TRACING_MAP
740	select TRACING
741	select DYNAMIC_EVENTS
742	select SYNTH_EVENTS
743	default n
744	help
745	  Hist triggers allow one or more arbitrary trace event fields
746	  to be aggregated into hash tables and dumped to stdout by
747	  reading a debugfs/tracefs file.  They're useful for
748	  gathering quick and dirty (though precise) summaries of
749	  event activity as an initial guide for further investigation
750	  using more advanced tools.
751
752	  Inter-event tracing of quantities such as latencies is also
753	  supported using hist triggers under this option.
754
755	  See Documentation/trace/histogram.rst.
756	  If in doubt, say N.
757
758config TRACE_EVENT_INJECT
759	bool "Trace event injection"
760	depends on TRACING
761	help
762	  Allow user-space to inject a specific trace event into the ring
763	  buffer. This is mainly used for testing purpose.
764
765	  If unsure, say N.
766
767config TRACEPOINT_BENCHMARK
768	bool "Add tracepoint that benchmarks tracepoints"
769	help
770	 This option creates the tracepoint "benchmark:benchmark_event".
771	 When the tracepoint is enabled, it kicks off a kernel thread that
772	 goes into an infinite loop (calling cond_resched() to let other tasks
773	 run), and calls the tracepoint. Each iteration will record the time
774	 it took to write to the tracepoint and the next iteration that
775	 data will be passed to the tracepoint itself. That is, the tracepoint
776	 will report the time it took to do the previous tracepoint.
777	 The string written to the tracepoint is a static string of 128 bytes
778	 to keep the time the same. The initial string is simply a write of
779	 "START". The second string records the cold cache time of the first
780	 write which is not added to the rest of the calculations.
781
782	 As it is a tight loop, it benchmarks as hot cache. That's fine because
783	 we care most about hot paths that are probably in cache already.
784
785	 An example of the output:
786
787	      START
788	      first=3672 [COLD CACHED]
789	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
790	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
791	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
792	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
793	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
794	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
795
796
797config RING_BUFFER_BENCHMARK
798	tristate "Ring buffer benchmark stress tester"
799	depends on RING_BUFFER
800	help
801	  This option creates a test to stress the ring buffer and benchmark it.
802	  It creates its own ring buffer such that it will not interfere with
803	  any other users of the ring buffer (such as ftrace). It then creates
804	  a producer and consumer that will run for 10 seconds and sleep for
805	  10 seconds. Each interval it will print out the number of events
806	  it recorded and give a rough estimate of how long each iteration took.
807
808	  It does not disable interrupts or raise its priority, so it may be
809	  affected by processes that are running.
810
811	  If unsure, say N.
812
813config TRACE_EVAL_MAP_FILE
814       bool "Show eval mappings for trace events"
815       depends on TRACING
816       help
817	The "print fmt" of the trace events will show the enum/sizeof names
818	instead of their values. This can cause problems for user space tools
819	that use this string to parse the raw data as user space does not know
820	how to convert the string to its value.
821
822	To fix this, there's a special macro in the kernel that can be used
823	to convert an enum/sizeof into its value. If this macro is used, then
824	the print fmt strings will be converted to their values.
825
826	If something does not get converted properly, this option can be
827	used to show what enums/sizeof the kernel tried to convert.
828
829	This option is for debugging the conversions. A file is created
830	in the tracing directory called "eval_map" that will show the
831	names matched with their values and what trace event system they
832	belong too.
833
834	Normally, the mapping of the strings to values will be freed after
835	boot up or module load. With this option, they will not be freed, as
836	they are needed for the "eval_map" file. Enabling this option will
837	increase the memory footprint of the running kernel.
838
839	If unsure, say N.
840
841config FTRACE_RECORD_RECURSION
842	bool "Record functions that recurse in function tracing"
843	depends on FUNCTION_TRACER
844	help
845	  All callbacks that attach to the function tracing have some sort
846	  of protection against recursion. Even though the protection exists,
847	  it adds overhead. This option will create a file in the tracefs
848	  file system called "recursed_functions" that will list the functions
849	  that triggered a recursion.
850
851	  This will add more overhead to cases that have recursion.
852
853	  If unsure, say N
854
855config FTRACE_RECORD_RECURSION_SIZE
856	int "Max number of recursed functions to record"
857	default	128
858	depends on FTRACE_RECORD_RECURSION
859	help
860	  This defines the limit of number of functions that can be
861	  listed in the "recursed_functions" file, that lists all
862	  the functions that caused a recursion to happen.
863	  This file can be reset, but the limit can not change in
864	  size at runtime.
865
866config RING_BUFFER_RECORD_RECURSION
867	bool "Record functions that recurse in the ring buffer"
868	depends on FTRACE_RECORD_RECURSION
869	# default y, because it is coupled with FTRACE_RECORD_RECURSION
870	default y
871	help
872	  The ring buffer has its own internal recursion. Although when
873	  recursion happens it wont cause harm because of the protection,
874	  but it does cause an unwanted overhead. Enabling this option will
875	  place where recursion was detected into the ftrace "recursed_functions"
876	  file.
877
878	  This will add more overhead to cases that have recursion.
879
880config GCOV_PROFILE_FTRACE
881	bool "Enable GCOV profiling on ftrace subsystem"
882	depends on GCOV_KERNEL
883	help
884	  Enable GCOV profiling on ftrace subsystem for checking
885	  which functions/lines are tested.
886
887	  If unsure, say N.
888
889	  Note that on a kernel compiled with this config, ftrace will
890	  run significantly slower.
891
892config FTRACE_SELFTEST
893	bool
894
895config FTRACE_STARTUP_TEST
896	bool "Perform a startup test on ftrace"
897	depends on GENERIC_TRACER
898	select FTRACE_SELFTEST
899	help
900	  This option performs a series of startup tests on ftrace. On bootup
901	  a series of tests are made to verify that the tracer is
902	  functioning properly. It will do tests on all the configured
903	  tracers of ftrace.
904
905config EVENT_TRACE_STARTUP_TEST
906	bool "Run selftest on trace events"
907	depends on FTRACE_STARTUP_TEST
908	default y
909	help
910	  This option performs a test on all trace events in the system.
911	  It basically just enables each event and runs some code that
912	  will trigger events (not necessarily the event it enables)
913	  This may take some time run as there are a lot of events.
914
915config EVENT_TRACE_TEST_SYSCALLS
916	bool "Run selftest on syscall events"
917	depends on EVENT_TRACE_STARTUP_TEST
918	help
919	 This option will also enable testing every syscall event.
920	 It only enables the event and disables it and runs various loads
921	 with the event enabled. This adds a bit more time for kernel boot
922	 up since it runs this on every system call defined.
923
924	 TBD - enable a way to actually call the syscalls as we test their
925	       events
926
927config RING_BUFFER_STARTUP_TEST
928       bool "Ring buffer startup self test"
929       depends on RING_BUFFER
930       help
931	 Run a simple self test on the ring buffer on boot up. Late in the
932	 kernel boot sequence, the test will start that kicks off
933	 a thread per cpu. Each thread will write various size events
934	 into the ring buffer. Another thread is created to send IPIs
935	 to each of the threads, where the IPI handler will also write
936	 to the ring buffer, to test/stress the nesting ability.
937	 If any anomalies are discovered, a warning will be displayed
938	 and all ring buffers will be disabled.
939
940	 The test runs for 10 seconds. This will slow your boot time
941	 by at least 10 more seconds.
942
943	 At the end of the test, statics and more checks are done.
944	 It will output the stats of each per cpu buffer. What
945	 was written, the sizes, what was read, what was lost, and
946	 other similar details.
947
948	 If unsure, say N
949
950config RING_BUFFER_VALIDATE_TIME_DELTAS
951	bool "Verify ring buffer time stamp deltas"
952	depends on RING_BUFFER
953	help
954	  This will audit the time stamps on the ring buffer sub
955	  buffer to make sure that all the time deltas for the
956	  events on a sub buffer matches the current time stamp.
957	  This audit is performed for every event that is not
958	  interrupted, or interrupting another event. A check
959	  is also made when traversing sub buffers to make sure
960	  that all the deltas on the previous sub buffer do not
961	  add up to be greater than the current time stamp.
962
963	  NOTE: This adds significant overhead to recording of events,
964	  and should only be used to test the logic of the ring buffer.
965	  Do not use it on production systems.
966
967	  Only say Y if you understand what this does, and you
968	  still want it enabled. Otherwise say N
969
970config MMIOTRACE_TEST
971	tristate "Test module for mmiotrace"
972	depends on MMIOTRACE && m
973	help
974	  This is a dumb module for testing mmiotrace. It is very dangerous
975	  as it will write garbage to IO memory starting at a given address.
976	  However, it should be safe to use on e.g. unused portion of VRAM.
977
978	  Say N, unless you absolutely know what you are doing.
979
980config PREEMPTIRQ_DELAY_TEST
981	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
982	depends on m
983	help
984	  Select this option to build a test module that can help test latency
985	  tracers by executing a preempt or irq disable section with a user
986	  configurable delay. The module busy waits for the duration of the
987	  critical section.
988
989	  For example, the following invocation generates a burst of three
990	  irq-disabled critical sections for 500us:
991	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
992
993	  What's more, if you want to attach the test on the cpu which the latency
994	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
995	  command.
996
997	  If unsure, say N
998
999config SYNTH_EVENT_GEN_TEST
1000	tristate "Test module for in-kernel synthetic event generation"
1001	depends on SYNTH_EVENTS
1002	help
1003          This option creates a test module to check the base
1004          functionality of in-kernel synthetic event definition and
1005          generation.
1006
1007          To test, insert the module, and then check the trace buffer
1008	  for the generated sample events.
1009
1010	  If unsure, say N.
1011
1012config KPROBE_EVENT_GEN_TEST
1013	tristate "Test module for in-kernel kprobe event generation"
1014	depends on KPROBE_EVENTS
1015	help
1016          This option creates a test module to check the base
1017          functionality of in-kernel kprobe event definition.
1018
1019          To test, insert the module, and then check the trace buffer
1020	  for the generated kprobe events.
1021
1022	  If unsure, say N.
1023
1024config HIST_TRIGGERS_DEBUG
1025	bool "Hist trigger debug support"
1026	depends on HIST_TRIGGERS
1027	help
1028          Add "hist_debug" file for each event, which when read will
1029          dump out a bunch of internal details about the hist triggers
1030          defined on that event.
1031
1032          The hist_debug file serves a couple of purposes:
1033
1034            - Helps developers verify that nothing is broken.
1035
1036            - Provides educational information to support the details
1037              of the hist trigger internals as described by
1038              Documentation/trace/histogram-design.rst.
1039
1040          The hist_debug output only covers the data structures
1041          related to the histogram definitions themselves and doesn't
1042          display the internals of map buckets or variable values of
1043          running histograms.
1044
1045          If unsure, say N.
1046
1047endif # FTRACE
1048