1# 2# Architectures that offer an FUNCTION_TRACER implementation should 3# select HAVE_FUNCTION_TRACER: 4# 5 6config USER_STACKTRACE_SUPPORT 7 bool 8 9config NOP_TRACER 10 bool 11 12config HAVE_FTRACE_NMI_ENTER 13 bool 14 help 15 See Documentation/trace/ftrace-design.txt 16 17config HAVE_FUNCTION_TRACER 18 bool 19 help 20 See Documentation/trace/ftrace-design.txt 21 22config HAVE_FUNCTION_GRAPH_TRACER 23 bool 24 help 25 See Documentation/trace/ftrace-design.txt 26 27config HAVE_FUNCTION_GRAPH_FP_TEST 28 bool 29 help 30 See Documentation/trace/ftrace-design.txt 31 32config HAVE_DYNAMIC_FTRACE 33 bool 34 help 35 See Documentation/trace/ftrace-design.txt 36 37config HAVE_DYNAMIC_FTRACE_WITH_REGS 38 bool 39 40config HAVE_FTRACE_MCOUNT_RECORD 41 bool 42 help 43 See Documentation/trace/ftrace-design.txt 44 45config HAVE_SYSCALL_TRACEPOINTS 46 bool 47 help 48 See Documentation/trace/ftrace-design.txt 49 50config HAVE_FENTRY 51 bool 52 help 53 Arch supports the gcc options -pg with -mfentry 54 55config HAVE_C_RECORDMCOUNT 56 bool 57 help 58 C version of recordmcount available? 59 60config TRACER_MAX_TRACE 61 bool 62 63config TRACE_CLOCK 64 bool 65 66config RING_BUFFER 67 bool 68 select TRACE_CLOCK 69 select IRQ_WORK 70 71config FTRACE_NMI_ENTER 72 bool 73 depends on HAVE_FTRACE_NMI_ENTER 74 default y 75 76config EVENT_TRACING 77 select CONTEXT_SWITCH_TRACER 78 bool 79 80config GPU_TRACEPOINTS 81 bool 82 83config CONTEXT_SWITCH_TRACER 84 bool 85 86config RING_BUFFER_ALLOW_SWAP 87 bool 88 help 89 Allow the use of ring_buffer_swap_cpu. 90 Adds a very slight overhead to tracing when enabled. 91 92# All tracer options should select GENERIC_TRACER. For those options that are 93# enabled by all tracers (context switch and event tracer) they select TRACING. 94# This allows those options to appear when no other tracer is selected. But the 95# options do not appear when something else selects it. We need the two options 96# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 97# hiding of the automatic options. 98 99config TRACING 100 bool 101 select DEBUG_FS 102 select RING_BUFFER 103 select STACKTRACE if STACKTRACE_SUPPORT 104 select TRACEPOINTS 105 select NOP_TRACER 106 select BINARY_PRINTF 107 select EVENT_TRACING 108 select TRACE_CLOCK 109 110config GENERIC_TRACER 111 bool 112 select TRACING 113 114# 115# Minimum requirements an architecture has to meet for us to 116# be able to offer generic tracing facilities: 117# 118config TRACING_SUPPORT 119 bool 120 # PPC32 has no irqflags tracing support, but it can use most of the 121 # tracers anyway, they were tested to build and work. Note that new 122 # exceptions to this list aren't welcomed, better implement the 123 # irqflags tracing for your architecture. 124 depends on TRACE_IRQFLAGS_SUPPORT || PPC32 125 depends on STACKTRACE_SUPPORT 126 default y 127 128if TRACING_SUPPORT 129 130menuconfig FTRACE 131 bool "Tracers" 132 default y if DEBUG_KERNEL 133 help 134 Enable the kernel tracing infrastructure. 135 136if FTRACE 137 138config FUNCTION_TRACER 139 bool "Kernel Function Tracer" 140 depends on HAVE_FUNCTION_TRACER 141 select KALLSYMS 142 select GENERIC_TRACER 143 select CONTEXT_SWITCH_TRACER 144 help 145 Enable the kernel to trace every kernel function. This is done 146 by using a compiler feature to insert a small, 5-byte No-Operation 147 instruction at the beginning of every kernel function, which NOP 148 sequence is then dynamically patched into a tracer call when 149 tracing is enabled by the administrator. If it's runtime disabled 150 (the bootup default), then the overhead of the instructions is very 151 small and not measurable even in micro-benchmarks. 152 153config FUNCTION_GRAPH_TRACER 154 bool "Kernel Function Graph Tracer" 155 depends on HAVE_FUNCTION_GRAPH_TRACER 156 depends on FUNCTION_TRACER 157 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 158 default y 159 help 160 Enable the kernel to trace a function at both its return 161 and its entry. 162 Its first purpose is to trace the duration of functions and 163 draw a call graph for each thread with some information like 164 the return value. This is done by setting the current return 165 address on the current task structure into a stack of calls. 166 167 168config PREEMPTIRQ_EVENTS 169 bool "Enable trace events for preempt and irq disable/enable" 170 select TRACE_IRQFLAGS 171 depends on DEBUG_PREEMPT || !PROVE_LOCKING 172 default n 173 help 174 Enable tracing of disable and enable events for preemption and irqs. 175 For tracing preempt disable/enable events, DEBUG_PREEMPT must be 176 enabled. For tracing irq disable/enable events, PROVE_LOCKING must 177 be disabled. 178 179config IRQSOFF_TRACER 180 bool "Interrupts-off Latency Tracer" 181 default n 182 depends on TRACE_IRQFLAGS_SUPPORT 183 depends on !ARCH_USES_GETTIMEOFFSET 184 select TRACE_IRQFLAGS 185 select GENERIC_TRACER 186 select TRACER_MAX_TRACE 187 select RING_BUFFER_ALLOW_SWAP 188 select TRACER_SNAPSHOT 189 select TRACER_SNAPSHOT_PER_CPU_SWAP 190 help 191 This option measures the time spent in irqs-off critical 192 sections, with microsecond accuracy. 193 194 The default measurement method is a maximum search, which is 195 disabled by default and can be runtime (re-)started 196 via: 197 198 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 199 200 (Note that kernel size and overhead increase with this option 201 enabled. This option and the preempt-off timing option can be 202 used together or separately.) 203 204config PREEMPT_TRACER 205 bool "Preemption-off Latency Tracer" 206 default n 207 depends on !ARCH_USES_GETTIMEOFFSET 208 depends on PREEMPT 209 select GENERIC_TRACER 210 select TRACER_MAX_TRACE 211 select RING_BUFFER_ALLOW_SWAP 212 select TRACER_SNAPSHOT 213 select TRACER_SNAPSHOT_PER_CPU_SWAP 214 help 215 This option measures the time spent in preemption-off critical 216 sections, with microsecond accuracy. 217 218 The default measurement method is a maximum search, which is 219 disabled by default and can be runtime (re-)started 220 via: 221 222 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 223 224 (Note that kernel size and overhead increase with this option 225 enabled. This option and the irqs-off timing option can be 226 used together or separately.) 227 228config SCHED_TRACER 229 bool "Scheduling Latency Tracer" 230 select GENERIC_TRACER 231 select CONTEXT_SWITCH_TRACER 232 select TRACER_MAX_TRACE 233 select TRACER_SNAPSHOT 234 help 235 This tracer tracks the latency of the highest priority task 236 to be scheduled in, starting from the point it has woken up. 237 238config ENABLE_DEFAULT_TRACERS 239 bool "Trace process context switches and events" 240 depends on !GENERIC_TRACER 241 select TRACING 242 help 243 This tracer hooks to various trace points in the kernel, 244 allowing the user to pick and choose which trace point they 245 want to trace. It also includes the sched_switch tracer plugin. 246 247config FTRACE_SYSCALLS 248 bool "Trace syscalls" 249 depends on HAVE_SYSCALL_TRACEPOINTS 250 select GENERIC_TRACER 251 select KALLSYMS 252 help 253 Basic tracer to catch the syscall entry and exit events. 254 255config TRACER_SNAPSHOT 256 bool "Create a snapshot trace buffer" 257 select TRACER_MAX_TRACE 258 help 259 Allow tracing users to take snapshot of the current buffer using the 260 ftrace interface, e.g.: 261 262 echo 1 > /sys/kernel/debug/tracing/snapshot 263 cat snapshot 264 265config TRACER_SNAPSHOT_PER_CPU_SWAP 266 bool "Allow snapshot to swap per CPU" 267 depends on TRACER_SNAPSHOT 268 select RING_BUFFER_ALLOW_SWAP 269 help 270 Allow doing a snapshot of a single CPU buffer instead of a 271 full swap (all buffers). If this is set, then the following is 272 allowed: 273 274 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 275 276 After which, only the tracing buffer for CPU 2 was swapped with 277 the main tracing buffer, and the other CPU buffers remain the same. 278 279 When this is enabled, this adds a little more overhead to the 280 trace recording, as it needs to add some checks to synchronize 281 recording with swaps. But this does not affect the performance 282 of the overall system. This is enabled by default when the preempt 283 or irq latency tracers are enabled, as those need to swap as well 284 and already adds the overhead (plus a lot more). 285 286config TRACE_BRANCH_PROFILING 287 bool 288 select GENERIC_TRACER 289 290choice 291 prompt "Branch Profiling" 292 default BRANCH_PROFILE_NONE 293 help 294 The branch profiling is a software profiler. It will add hooks 295 into the C conditionals to test which path a branch takes. 296 297 The likely/unlikely profiler only looks at the conditions that 298 are annotated with a likely or unlikely macro. 299 300 The "all branch" profiler will profile every if-statement in the 301 kernel. This profiler will also enable the likely/unlikely 302 profiler. 303 304 Either of the above profilers adds a bit of overhead to the system. 305 If unsure, choose "No branch profiling". 306 307config BRANCH_PROFILE_NONE 308 bool "No branch profiling" 309 help 310 No branch profiling. Branch profiling adds a bit of overhead. 311 Only enable it if you want to analyse the branching behavior. 312 Otherwise keep it disabled. 313 314config PROFILE_ANNOTATED_BRANCHES 315 bool "Trace likely/unlikely profiler" 316 select TRACE_BRANCH_PROFILING 317 help 318 This tracer profiles all likely and unlikely macros 319 in the kernel. It will display the results in: 320 321 /sys/kernel/debug/tracing/trace_stat/branch_annotated 322 323 Note: this will add a significant overhead; only turn this 324 on if you need to profile the system's use of these macros. 325 326config PROFILE_ALL_BRANCHES 327 bool "Profile all if conditionals" 328 select TRACE_BRANCH_PROFILING 329 help 330 This tracer profiles all branch conditions. Every if () 331 taken in the kernel is recorded whether it hit or miss. 332 The results will be displayed in: 333 334 /sys/kernel/debug/tracing/trace_stat/branch_all 335 336 This option also enables the likely/unlikely profiler. 337 338 This configuration, when enabled, will impose a great overhead 339 on the system. This should only be enabled when the system 340 is to be analyzed in much detail. 341endchoice 342 343config TRACING_BRANCHES 344 bool 345 help 346 Selected by tracers that will trace the likely and unlikely 347 conditions. This prevents the tracers themselves from being 348 profiled. Profiling the tracing infrastructure can only happen 349 when the likelys and unlikelys are not being traced. 350 351config BRANCH_TRACER 352 bool "Trace likely/unlikely instances" 353 depends on TRACE_BRANCH_PROFILING 354 select TRACING_BRANCHES 355 help 356 This traces the events of likely and unlikely condition 357 calls in the kernel. The difference between this and the 358 "Trace likely/unlikely profiler" is that this is not a 359 histogram of the callers, but actually places the calling 360 events into a running trace buffer to see when and where the 361 events happened, as well as their results. 362 363 Say N if unsure. 364 365config STACK_TRACER 366 bool "Trace max stack" 367 depends on HAVE_FUNCTION_TRACER 368 select FUNCTION_TRACER 369 select STACKTRACE 370 select KALLSYMS 371 help 372 This special tracer records the maximum stack footprint of the 373 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 374 375 This tracer works by hooking into every function call that the 376 kernel executes, and keeping a maximum stack depth value and 377 stack-trace saved. If this is configured with DYNAMIC_FTRACE 378 then it will not have any overhead while the stack tracer 379 is disabled. 380 381 To enable the stack tracer on bootup, pass in 'stacktrace' 382 on the kernel command line. 383 384 The stack tracer can also be enabled or disabled via the 385 sysctl kernel.stack_tracer_enabled 386 387 Say N if unsure. 388 389config BLK_DEV_IO_TRACE 390 bool "Support for tracing block IO actions" 391 depends on SYSFS 392 depends on BLOCK 393 select RELAY 394 select DEBUG_FS 395 select TRACEPOINTS 396 select GENERIC_TRACER 397 select STACKTRACE 398 help 399 Say Y here if you want to be able to trace the block layer actions 400 on a given queue. Tracing allows you to see any traffic happening 401 on a block device queue. For more information (and the userspace 402 support tools needed), fetch the blktrace tools from: 403 404 git://git.kernel.dk/blktrace.git 405 406 Tracing also is possible using the ftrace interface, e.g.: 407 408 echo 1 > /sys/block/sda/sda1/trace/enable 409 echo blk > /sys/kernel/debug/tracing/current_tracer 410 cat /sys/kernel/debug/tracing/trace_pipe 411 412 If unsure, say N. 413 414config KPROBE_EVENT 415 depends on KPROBES 416 depends on HAVE_REGS_AND_STACK_ACCESS_API 417 bool "Enable kprobes-based dynamic events" 418 select TRACING 419 select PROBE_EVENTS 420 default y 421 help 422 This allows the user to add tracing events (similar to tracepoints) 423 on the fly via the ftrace interface. See 424 Documentation/trace/kprobetrace.txt for more details. 425 426 Those events can be inserted wherever kprobes can probe, and record 427 various register and memory values. 428 429 This option is also required by perf-probe subcommand of perf tools. 430 If you want to use perf tools, this option is strongly recommended. 431 432config UPROBE_EVENT 433 bool "Enable uprobes-based dynamic events" 434 depends on ARCH_SUPPORTS_UPROBES 435 depends on MMU 436 depends on PERF_EVENTS 437 select UPROBES 438 select PROBE_EVENTS 439 select TRACING 440 default n 441 help 442 This allows the user to add tracing events on top of userspace 443 dynamic events (similar to tracepoints) on the fly via the trace 444 events interface. Those events can be inserted wherever uprobes 445 can probe, and record various registers. 446 This option is required if you plan to use perf-probe subcommand 447 of perf tools on user space applications. 448 449config BPF_EVENTS 450 depends on BPF_SYSCALL 451 depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS 452 bool 453 default y 454 help 455 This allows the user to attach BPF programs to kprobe events. 456 457config PROBE_EVENTS 458 def_bool n 459 460config DYNAMIC_FTRACE 461 bool "enable/disable function tracing dynamically" 462 depends on FUNCTION_TRACER 463 depends on HAVE_DYNAMIC_FTRACE 464 default y 465 help 466 This option will modify all the calls to function tracing 467 dynamically (will patch them out of the binary image and 468 replace them with a No-Op instruction) on boot up. During 469 compile time, a table is made of all the locations that ftrace 470 can function trace, and this table is linked into the kernel 471 image. When this is enabled, functions can be individually 472 enabled, and the functions not enabled will not affect 473 performance of the system. 474 475 See the files in /sys/kernel/debug/tracing: 476 available_filter_functions 477 set_ftrace_filter 478 set_ftrace_notrace 479 480 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 481 otherwise has native performance as long as no tracing is active. 482 483config DYNAMIC_FTRACE_WITH_REGS 484 def_bool y 485 depends on DYNAMIC_FTRACE 486 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 487 488config FUNCTION_PROFILER 489 bool "Kernel function profiler" 490 depends on FUNCTION_TRACER 491 default n 492 help 493 This option enables the kernel function profiler. A file is created 494 in debugfs called function_profile_enabled which defaults to zero. 495 When a 1 is echoed into this file profiling begins, and when a 496 zero is entered, profiling stops. A "functions" file is created in 497 the trace_stats directory; this file shows the list of functions that 498 have been hit and their counters. 499 500 If in doubt, say N. 501 502config FTRACE_MCOUNT_RECORD 503 def_bool y 504 depends on DYNAMIC_FTRACE 505 depends on HAVE_FTRACE_MCOUNT_RECORD 506 507config FTRACE_SELFTEST 508 bool 509 510config FTRACE_STARTUP_TEST 511 bool "Perform a startup test on ftrace" 512 depends on GENERIC_TRACER 513 select FTRACE_SELFTEST 514 help 515 This option performs a series of startup tests on ftrace. On bootup 516 a series of tests are made to verify that the tracer is 517 functioning properly. It will do tests on all the configured 518 tracers of ftrace. 519 520config EVENT_TRACE_TEST_SYSCALLS 521 bool "Run selftest on syscall events" 522 depends on FTRACE_STARTUP_TEST 523 help 524 This option will also enable testing every syscall event. 525 It only enables the event and disables it and runs various loads 526 with the event enabled. This adds a bit more time for kernel boot 527 up since it runs this on every system call defined. 528 529 TBD - enable a way to actually call the syscalls as we test their 530 events 531 532config MMIOTRACE 533 bool "Memory mapped IO tracing" 534 depends on HAVE_MMIOTRACE_SUPPORT && PCI 535 select GENERIC_TRACER 536 help 537 Mmiotrace traces Memory Mapped I/O access and is meant for 538 debugging and reverse engineering. It is called from the ioremap 539 implementation and works via page faults. Tracing is disabled by 540 default and can be enabled at run-time. 541 542 See Documentation/trace/mmiotrace.txt. 543 If you are not helping to develop drivers, say N. 544 545config MMIOTRACE_TEST 546 tristate "Test module for mmiotrace" 547 depends on MMIOTRACE && m 548 help 549 This is a dumb module for testing mmiotrace. It is very dangerous 550 as it will write garbage to IO memory starting at a given address. 551 However, it should be safe to use on e.g. unused portion of VRAM. 552 553 Say N, unless you absolutely know what you are doing. 554 555config TRACEPOINT_BENCHMARK 556 bool "Add tracepoint that benchmarks tracepoints" 557 help 558 This option creates the tracepoint "benchmark:benchmark_event". 559 When the tracepoint is enabled, it kicks off a kernel thread that 560 goes into an infinite loop (calling cond_sched() to let other tasks 561 run), and calls the tracepoint. Each iteration will record the time 562 it took to write to the tracepoint and the next iteration that 563 data will be passed to the tracepoint itself. That is, the tracepoint 564 will report the time it took to do the previous tracepoint. 565 The string written to the tracepoint is a static string of 128 bytes 566 to keep the time the same. The initial string is simply a write of 567 "START". The second string records the cold cache time of the first 568 write which is not added to the rest of the calculations. 569 570 As it is a tight loop, it benchmarks as hot cache. That's fine because 571 we care most about hot paths that are probably in cache already. 572 573 An example of the output: 574 575 START 576 first=3672 [COLD CACHED] 577 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 578 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 579 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 580 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 581 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 582 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 583 584 585config RING_BUFFER_BENCHMARK 586 tristate "Ring buffer benchmark stress tester" 587 depends on RING_BUFFER 588 help 589 This option creates a test to stress the ring buffer and benchmark it. 590 It creates its own ring buffer such that it will not interfere with 591 any other users of the ring buffer (such as ftrace). It then creates 592 a producer and consumer that will run for 10 seconds and sleep for 593 10 seconds. Each interval it will print out the number of events 594 it recorded and give a rough estimate of how long each iteration took. 595 596 It does not disable interrupts or raise its priority, so it may be 597 affected by processes that are running. 598 599 If unsure, say N. 600 601config RING_BUFFER_STARTUP_TEST 602 bool "Ring buffer startup self test" 603 depends on RING_BUFFER 604 help 605 Run a simple self test on the ring buffer on boot up. Late in the 606 kernel boot sequence, the test will start that kicks off 607 a thread per cpu. Each thread will write various size events 608 into the ring buffer. Another thread is created to send IPIs 609 to each of the threads, where the IPI handler will also write 610 to the ring buffer, to test/stress the nesting ability. 611 If any anomalies are discovered, a warning will be displayed 612 and all ring buffers will be disabled. 613 614 The test runs for 10 seconds. This will slow your boot time 615 by at least 10 more seconds. 616 617 At the end of the test, statics and more checks are done. 618 It will output the stats of each per cpu buffer. What 619 was written, the sizes, what was read, what was lost, and 620 other similar details. 621 622 If unsure, say N 623 624config TRACE_ENUM_MAP_FILE 625 bool "Show enum mappings for trace events" 626 depends on TRACING 627 help 628 The "print fmt" of the trace events will show the enum names instead 629 of their values. This can cause problems for user space tools that 630 use this string to parse the raw data as user space does not know 631 how to convert the string to its value. 632 633 To fix this, there's a special macro in the kernel that can be used 634 to convert the enum into its value. If this macro is used, then the 635 print fmt strings will have the enums converted to their values. 636 637 If something does not get converted properly, this option can be 638 used to show what enums the kernel tried to convert. 639 640 This option is for debugging the enum conversions. A file is created 641 in the tracing directory called "enum_map" that will show the enum 642 names matched with their values and what trace event system they 643 belong too. 644 645 Normally, the mapping of the strings to values will be freed after 646 boot up or module load. With this option, they will not be freed, as 647 they are needed for the "enum_map" file. Enabling this option will 648 increase the memory footprint of the running kernel. 649 650 If unsure, say N 651 652config TRACING_EVENTS_GPIO 653 bool "Trace gpio events" 654 depends on GPIOLIB 655 default y 656 help 657 Enable tracing events for gpio subsystem 658 659endif # FTRACE 660 661endif # TRACING_SUPPORT 662 663