1# 2# Architectures that offer an FUNCTION_TRACER implementation should 3# select HAVE_FUNCTION_TRACER: 4# 5 6config USER_STACKTRACE_SUPPORT 7 bool 8 9config NOP_TRACER 10 bool 11 12config HAVE_FTRACE_NMI_ENTER 13 bool 14 help 15 See Documentation/trace/ftrace-design.txt 16 17config HAVE_FUNCTION_TRACER 18 bool 19 help 20 See Documentation/trace/ftrace-design.txt 21 22config HAVE_FUNCTION_GRAPH_TRACER 23 bool 24 help 25 See Documentation/trace/ftrace-design.txt 26 27config HAVE_DYNAMIC_FTRACE 28 bool 29 help 30 See Documentation/trace/ftrace-design.txt 31 32config HAVE_DYNAMIC_FTRACE_WITH_REGS 33 bool 34 35config HAVE_FTRACE_MCOUNT_RECORD 36 bool 37 help 38 See Documentation/trace/ftrace-design.txt 39 40config HAVE_SYSCALL_TRACEPOINTS 41 bool 42 help 43 See Documentation/trace/ftrace-design.txt 44 45config HAVE_FENTRY 46 bool 47 help 48 Arch supports the gcc options -pg with -mfentry 49 50config HAVE_C_RECORDMCOUNT 51 bool 52 help 53 C version of recordmcount available? 54 55config TRACER_MAX_TRACE 56 bool 57 58config TRACE_CLOCK 59 bool 60 61config RING_BUFFER 62 bool 63 select TRACE_CLOCK 64 select IRQ_WORK 65 66config FTRACE_NMI_ENTER 67 bool 68 depends on HAVE_FTRACE_NMI_ENTER 69 default y 70 71config EVENT_TRACING 72 select CONTEXT_SWITCH_TRACER 73 select GLOB 74 bool 75 76config GPU_TRACEPOINTS 77 bool 78 79config CONTEXT_SWITCH_TRACER 80 bool 81 82config RING_BUFFER_ALLOW_SWAP 83 bool 84 help 85 Allow the use of ring_buffer_swap_cpu. 86 Adds a very slight overhead to tracing when enabled. 87 88# All tracer options should select GENERIC_TRACER. For those options that are 89# enabled by all tracers (context switch and event tracer) they select TRACING. 90# This allows those options to appear when no other tracer is selected. But the 91# options do not appear when something else selects it. We need the two options 92# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 93# hiding of the automatic options. 94 95config TRACING 96 bool 97 select DEBUG_FS 98 select RING_BUFFER 99 select STACKTRACE if STACKTRACE_SUPPORT 100 select TRACEPOINTS 101 select NOP_TRACER 102 select BINARY_PRINTF 103 select EVENT_TRACING 104 select TRACE_CLOCK 105 106config GENERIC_TRACER 107 bool 108 select TRACING 109 110# 111# Minimum requirements an architecture has to meet for us to 112# be able to offer generic tracing facilities: 113# 114config TRACING_SUPPORT 115 bool 116 # PPC32 has no irqflags tracing support, but it can use most of the 117 # tracers anyway, they were tested to build and work. Note that new 118 # exceptions to this list aren't welcomed, better implement the 119 # irqflags tracing for your architecture. 120 depends on TRACE_IRQFLAGS_SUPPORT || PPC32 121 depends on STACKTRACE_SUPPORT 122 default y 123 124if TRACING_SUPPORT 125 126menuconfig FTRACE 127 bool "Tracers" 128 default y if DEBUG_KERNEL 129 help 130 Enable the kernel tracing infrastructure. 131 132if FTRACE 133 134config FUNCTION_TRACER 135 bool "Kernel Function Tracer" 136 depends on HAVE_FUNCTION_TRACER 137 select KALLSYMS 138 select GENERIC_TRACER 139 select CONTEXT_SWITCH_TRACER 140 select GLOB 141 select TASKS_RCU if PREEMPT 142 help 143 Enable the kernel to trace every kernel function. This is done 144 by using a compiler feature to insert a small, 5-byte No-Operation 145 instruction at the beginning of every kernel function, which NOP 146 sequence is then dynamically patched into a tracer call when 147 tracing is enabled by the administrator. If it's runtime disabled 148 (the bootup default), then the overhead of the instructions is very 149 small and not measurable even in micro-benchmarks. 150 151config FUNCTION_GRAPH_TRACER 152 bool "Kernel Function Graph Tracer" 153 depends on HAVE_FUNCTION_GRAPH_TRACER 154 depends on FUNCTION_TRACER 155 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 156 default y 157 help 158 Enable the kernel to trace a function at both its return 159 and its entry. 160 Its first purpose is to trace the duration of functions and 161 draw a call graph for each thread with some information like 162 the return value. This is done by setting the current return 163 address on the current task structure into a stack of calls. 164 165 166config PREEMPTIRQ_EVENTS 167 bool "Enable trace events for preempt and irq disable/enable" 168 select TRACE_IRQFLAGS 169 depends on DEBUG_PREEMPT || !PROVE_LOCKING 170 default n 171 help 172 Enable tracing of disable and enable events for preemption and irqs. 173 For tracing preempt disable/enable events, DEBUG_PREEMPT must be 174 enabled. For tracing irq disable/enable events, PROVE_LOCKING must 175 be disabled. 176 177config IRQSOFF_TRACER 178 bool "Interrupts-off Latency Tracer" 179 default n 180 depends on TRACE_IRQFLAGS_SUPPORT 181 depends on !ARCH_USES_GETTIMEOFFSET 182 select TRACE_IRQFLAGS 183 select GENERIC_TRACER 184 select TRACER_MAX_TRACE 185 select RING_BUFFER_ALLOW_SWAP 186 select TRACER_SNAPSHOT 187 select TRACER_SNAPSHOT_PER_CPU_SWAP 188 help 189 This option measures the time spent in irqs-off critical 190 sections, with microsecond accuracy. 191 192 The default measurement method is a maximum search, which is 193 disabled by default and can be runtime (re-)started 194 via: 195 196 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 197 198 (Note that kernel size and overhead increase with this option 199 enabled. This option and the preempt-off timing option can be 200 used together or separately.) 201 202config PREEMPT_TRACER 203 bool "Preemption-off Latency Tracer" 204 default n 205 depends on !ARCH_USES_GETTIMEOFFSET 206 depends on PREEMPT 207 select GENERIC_TRACER 208 select TRACER_MAX_TRACE 209 select RING_BUFFER_ALLOW_SWAP 210 select TRACER_SNAPSHOT 211 select TRACER_SNAPSHOT_PER_CPU_SWAP 212 help 213 This option measures the time spent in preemption-off critical 214 sections, with microsecond accuracy. 215 216 The default measurement method is a maximum search, which is 217 disabled by default and can be runtime (re-)started 218 via: 219 220 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 221 222 (Note that kernel size and overhead increase with this option 223 enabled. This option and the irqs-off timing option can be 224 used together or separately.) 225 226config SCHED_TRACER 227 bool "Scheduling Latency Tracer" 228 select GENERIC_TRACER 229 select CONTEXT_SWITCH_TRACER 230 select TRACER_MAX_TRACE 231 select TRACER_SNAPSHOT 232 help 233 This tracer tracks the latency of the highest priority task 234 to be scheduled in, starting from the point it has woken up. 235 236config HWLAT_TRACER 237 bool "Tracer to detect hardware latencies (like SMIs)" 238 select GENERIC_TRACER 239 help 240 This tracer, when enabled will create one or more kernel threads, 241 depening on what the cpumask file is set to, which each thread 242 spinning in a loop looking for interruptions caused by 243 something other than the kernel. For example, if a 244 System Management Interrupt (SMI) takes a noticeable amount of 245 time, this tracer will detect it. This is useful for testing 246 if a system is reliable for Real Time tasks. 247 248 Some files are created in the tracing directory when this 249 is enabled: 250 251 hwlat_detector/width - time in usecs for how long to spin for 252 hwlat_detector/window - time in usecs between the start of each 253 iteration 254 255 A kernel thread is created that will spin with interrupts disabled 256 for "width" microseconds in every "widow" cycle. It will not spin 257 for "window - width" microseconds, where the system can 258 continue to operate. 259 260 The output will appear in the trace and trace_pipe files. 261 262 When the tracer is not running, it has no affect on the system, 263 but when it is running, it can cause the system to be 264 periodically non responsive. Do not run this tracer on a 265 production system. 266 267 To enable this tracer, echo in "hwlat" into the current_tracer 268 file. Every time a latency is greater than tracing_thresh, it will 269 be recorded into the ring buffer. 270 271config ENABLE_DEFAULT_TRACERS 272 bool "Trace process context switches and events" 273 depends on !GENERIC_TRACER 274 select TRACING 275 help 276 This tracer hooks to various trace points in the kernel, 277 allowing the user to pick and choose which trace point they 278 want to trace. It also includes the sched_switch tracer plugin. 279 280config FTRACE_SYSCALLS 281 bool "Trace syscalls" 282 depends on HAVE_SYSCALL_TRACEPOINTS 283 select GENERIC_TRACER 284 select KALLSYMS 285 help 286 Basic tracer to catch the syscall entry and exit events. 287 288config TRACER_SNAPSHOT 289 bool "Create a snapshot trace buffer" 290 select TRACER_MAX_TRACE 291 help 292 Allow tracing users to take snapshot of the current buffer using the 293 ftrace interface, e.g.: 294 295 echo 1 > /sys/kernel/debug/tracing/snapshot 296 cat snapshot 297 298config TRACER_SNAPSHOT_PER_CPU_SWAP 299 bool "Allow snapshot to swap per CPU" 300 depends on TRACER_SNAPSHOT 301 select RING_BUFFER_ALLOW_SWAP 302 help 303 Allow doing a snapshot of a single CPU buffer instead of a 304 full swap (all buffers). If this is set, then the following is 305 allowed: 306 307 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 308 309 After which, only the tracing buffer for CPU 2 was swapped with 310 the main tracing buffer, and the other CPU buffers remain the same. 311 312 When this is enabled, this adds a little more overhead to the 313 trace recording, as it needs to add some checks to synchronize 314 recording with swaps. But this does not affect the performance 315 of the overall system. This is enabled by default when the preempt 316 or irq latency tracers are enabled, as those need to swap as well 317 and already adds the overhead (plus a lot more). 318 319config TRACE_BRANCH_PROFILING 320 bool 321 select GENERIC_TRACER 322 323choice 324 prompt "Branch Profiling" 325 default BRANCH_PROFILE_NONE 326 help 327 The branch profiling is a software profiler. It will add hooks 328 into the C conditionals to test which path a branch takes. 329 330 The likely/unlikely profiler only looks at the conditions that 331 are annotated with a likely or unlikely macro. 332 333 The "all branch" profiler will profile every if-statement in the 334 kernel. This profiler will also enable the likely/unlikely 335 profiler. 336 337 Either of the above profilers adds a bit of overhead to the system. 338 If unsure, choose "No branch profiling". 339 340config BRANCH_PROFILE_NONE 341 bool "No branch profiling" 342 help 343 No branch profiling. Branch profiling adds a bit of overhead. 344 Only enable it if you want to analyse the branching behavior. 345 Otherwise keep it disabled. 346 347config PROFILE_ANNOTATED_BRANCHES 348 bool "Trace likely/unlikely profiler" 349 select TRACE_BRANCH_PROFILING 350 help 351 This tracer profiles all likely and unlikely macros 352 in the kernel. It will display the results in: 353 354 /sys/kernel/debug/tracing/trace_stat/branch_annotated 355 356 Note: this will add a significant overhead; only turn this 357 on if you need to profile the system's use of these macros. 358 359config PROFILE_ALL_BRANCHES 360 bool "Profile all if conditionals" if !FORTIFY_SOURCE 361 select TRACE_BRANCH_PROFILING 362 help 363 This tracer profiles all branch conditions. Every if () 364 taken in the kernel is recorded whether it hit or miss. 365 The results will be displayed in: 366 367 /sys/kernel/debug/tracing/trace_stat/branch_all 368 369 This option also enables the likely/unlikely profiler. 370 371 This configuration, when enabled, will impose a great overhead 372 on the system. This should only be enabled when the system 373 is to be analyzed in much detail. 374endchoice 375 376config TRACING_BRANCHES 377 bool 378 help 379 Selected by tracers that will trace the likely and unlikely 380 conditions. This prevents the tracers themselves from being 381 profiled. Profiling the tracing infrastructure can only happen 382 when the likelys and unlikelys are not being traced. 383 384config BRANCH_TRACER 385 bool "Trace likely/unlikely instances" 386 depends on TRACE_BRANCH_PROFILING 387 select TRACING_BRANCHES 388 help 389 This traces the events of likely and unlikely condition 390 calls in the kernel. The difference between this and the 391 "Trace likely/unlikely profiler" is that this is not a 392 histogram of the callers, but actually places the calling 393 events into a running trace buffer to see when and where the 394 events happened, as well as their results. 395 396 Say N if unsure. 397 398config STACK_TRACER 399 bool "Trace max stack" 400 depends on HAVE_FUNCTION_TRACER 401 select FUNCTION_TRACER 402 select STACKTRACE 403 select KALLSYMS 404 help 405 This special tracer records the maximum stack footprint of the 406 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 407 408 This tracer works by hooking into every function call that the 409 kernel executes, and keeping a maximum stack depth value and 410 stack-trace saved. If this is configured with DYNAMIC_FTRACE 411 then it will not have any overhead while the stack tracer 412 is disabled. 413 414 To enable the stack tracer on bootup, pass in 'stacktrace' 415 on the kernel command line. 416 417 The stack tracer can also be enabled or disabled via the 418 sysctl kernel.stack_tracer_enabled 419 420 Say N if unsure. 421 422config BLK_DEV_IO_TRACE 423 bool "Support for tracing block IO actions" 424 depends on SYSFS 425 depends on BLOCK 426 select RELAY 427 select DEBUG_FS 428 select TRACEPOINTS 429 select GENERIC_TRACER 430 select STACKTRACE 431 help 432 Say Y here if you want to be able to trace the block layer actions 433 on a given queue. Tracing allows you to see any traffic happening 434 on a block device queue. For more information (and the userspace 435 support tools needed), fetch the blktrace tools from: 436 437 git://git.kernel.dk/blktrace.git 438 439 Tracing also is possible using the ftrace interface, e.g.: 440 441 echo 1 > /sys/block/sda/sda1/trace/enable 442 echo blk > /sys/kernel/debug/tracing/current_tracer 443 cat /sys/kernel/debug/tracing/trace_pipe 444 445 If unsure, say N. 446 447config KPROBE_EVENTS 448 depends on KPROBES 449 depends on HAVE_REGS_AND_STACK_ACCESS_API 450 bool "Enable kprobes-based dynamic events" 451 select TRACING 452 select PROBE_EVENTS 453 default y 454 help 455 This allows the user to add tracing events (similar to tracepoints) 456 on the fly via the ftrace interface. See 457 Documentation/trace/kprobetrace.txt for more details. 458 459 Those events can be inserted wherever kprobes can probe, and record 460 various register and memory values. 461 462 This option is also required by perf-probe subcommand of perf tools. 463 If you want to use perf tools, this option is strongly recommended. 464 465config UPROBE_EVENTS 466 bool "Enable uprobes-based dynamic events" 467 depends on ARCH_SUPPORTS_UPROBES 468 depends on MMU 469 depends on PERF_EVENTS 470 select UPROBES 471 select PROBE_EVENTS 472 select TRACING 473 default y 474 help 475 This allows the user to add tracing events on top of userspace 476 dynamic events (similar to tracepoints) on the fly via the trace 477 events interface. Those events can be inserted wherever uprobes 478 can probe, and record various registers. 479 This option is required if you plan to use perf-probe subcommand 480 of perf tools on user space applications. 481 482config BPF_EVENTS 483 depends on BPF_SYSCALL 484 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 485 bool 486 default y 487 help 488 This allows the user to attach BPF programs to kprobe events. 489 490config PROBE_EVENTS 491 def_bool n 492 493config DYNAMIC_FTRACE 494 bool "enable/disable function tracing dynamically" 495 depends on FUNCTION_TRACER 496 depends on HAVE_DYNAMIC_FTRACE 497 default y 498 help 499 This option will modify all the calls to function tracing 500 dynamically (will patch them out of the binary image and 501 replace them with a No-Op instruction) on boot up. During 502 compile time, a table is made of all the locations that ftrace 503 can function trace, and this table is linked into the kernel 504 image. When this is enabled, functions can be individually 505 enabled, and the functions not enabled will not affect 506 performance of the system. 507 508 See the files in /sys/kernel/debug/tracing: 509 available_filter_functions 510 set_ftrace_filter 511 set_ftrace_notrace 512 513 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 514 otherwise has native performance as long as no tracing is active. 515 516config DYNAMIC_FTRACE_WITH_REGS 517 def_bool y 518 depends on DYNAMIC_FTRACE 519 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 520 521config FUNCTION_PROFILER 522 bool "Kernel function profiler" 523 depends on FUNCTION_TRACER 524 default n 525 help 526 This option enables the kernel function profiler. A file is created 527 in debugfs called function_profile_enabled which defaults to zero. 528 When a 1 is echoed into this file profiling begins, and when a 529 zero is entered, profiling stops. A "functions" file is created in 530 the trace_stats directory; this file shows the list of functions that 531 have been hit and their counters. 532 533 If in doubt, say N. 534 535config FTRACE_MCOUNT_RECORD 536 def_bool y 537 depends on DYNAMIC_FTRACE 538 depends on HAVE_FTRACE_MCOUNT_RECORD 539 540config FTRACE_SELFTEST 541 bool 542 543config FTRACE_STARTUP_TEST 544 bool "Perform a startup test on ftrace" 545 depends on GENERIC_TRACER 546 select FTRACE_SELFTEST 547 help 548 This option performs a series of startup tests on ftrace. On bootup 549 a series of tests are made to verify that the tracer is 550 functioning properly. It will do tests on all the configured 551 tracers of ftrace. 552 553config EVENT_TRACE_TEST_SYSCALLS 554 bool "Run selftest on syscall events" 555 depends on FTRACE_STARTUP_TEST 556 help 557 This option will also enable testing every syscall event. 558 It only enables the event and disables it and runs various loads 559 with the event enabled. This adds a bit more time for kernel boot 560 up since it runs this on every system call defined. 561 562 TBD - enable a way to actually call the syscalls as we test their 563 events 564 565config MMIOTRACE 566 bool "Memory mapped IO tracing" 567 depends on HAVE_MMIOTRACE_SUPPORT && PCI 568 select GENERIC_TRACER 569 help 570 Mmiotrace traces Memory Mapped I/O access and is meant for 571 debugging and reverse engineering. It is called from the ioremap 572 implementation and works via page faults. Tracing is disabled by 573 default and can be enabled at run-time. 574 575 See Documentation/trace/mmiotrace.txt. 576 If you are not helping to develop drivers, say N. 577 578config TRACING_MAP 579 bool 580 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 581 help 582 tracing_map is a special-purpose lock-free map for tracing, 583 separated out as a stand-alone facility in order to allow it 584 to be shared between multiple tracers. It isn't meant to be 585 generally used outside of that context, and is normally 586 selected by tracers that use it. 587 588config HIST_TRIGGERS 589 bool "Histogram triggers" 590 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 591 select TRACING_MAP 592 select TRACING 593 default n 594 help 595 Hist triggers allow one or more arbitrary trace event fields 596 to be aggregated into hash tables and dumped to stdout by 597 reading a debugfs/tracefs file. They're useful for 598 gathering quick and dirty (though precise) summaries of 599 event activity as an initial guide for further investigation 600 using more advanced tools. 601 602 See Documentation/trace/events.txt. 603 If in doubt, say N. 604 605config MMIOTRACE_TEST 606 tristate "Test module for mmiotrace" 607 depends on MMIOTRACE && m 608 help 609 This is a dumb module for testing mmiotrace. It is very dangerous 610 as it will write garbage to IO memory starting at a given address. 611 However, it should be safe to use on e.g. unused portion of VRAM. 612 613 Say N, unless you absolutely know what you are doing. 614 615config TRACEPOINT_BENCHMARK 616 bool "Add tracepoint that benchmarks tracepoints" 617 help 618 This option creates the tracepoint "benchmark:benchmark_event". 619 When the tracepoint is enabled, it kicks off a kernel thread that 620 goes into an infinite loop (calling cond_sched() to let other tasks 621 run), and calls the tracepoint. Each iteration will record the time 622 it took to write to the tracepoint and the next iteration that 623 data will be passed to the tracepoint itself. That is, the tracepoint 624 will report the time it took to do the previous tracepoint. 625 The string written to the tracepoint is a static string of 128 bytes 626 to keep the time the same. The initial string is simply a write of 627 "START". The second string records the cold cache time of the first 628 write which is not added to the rest of the calculations. 629 630 As it is a tight loop, it benchmarks as hot cache. That's fine because 631 we care most about hot paths that are probably in cache already. 632 633 An example of the output: 634 635 START 636 first=3672 [COLD CACHED] 637 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 638 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 639 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 640 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 641 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 642 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 643 644 645config RING_BUFFER_BENCHMARK 646 tristate "Ring buffer benchmark stress tester" 647 depends on RING_BUFFER 648 help 649 This option creates a test to stress the ring buffer and benchmark it. 650 It creates its own ring buffer such that it will not interfere with 651 any other users of the ring buffer (such as ftrace). It then creates 652 a producer and consumer that will run for 10 seconds and sleep for 653 10 seconds. Each interval it will print out the number of events 654 it recorded and give a rough estimate of how long each iteration took. 655 656 It does not disable interrupts or raise its priority, so it may be 657 affected by processes that are running. 658 659 If unsure, say N. 660 661config RING_BUFFER_STARTUP_TEST 662 bool "Ring buffer startup self test" 663 depends on RING_BUFFER 664 help 665 Run a simple self test on the ring buffer on boot up. Late in the 666 kernel boot sequence, the test will start that kicks off 667 a thread per cpu. Each thread will write various size events 668 into the ring buffer. Another thread is created to send IPIs 669 to each of the threads, where the IPI handler will also write 670 to the ring buffer, to test/stress the nesting ability. 671 If any anomalies are discovered, a warning will be displayed 672 and all ring buffers will be disabled. 673 674 The test runs for 10 seconds. This will slow your boot time 675 by at least 10 more seconds. 676 677 At the end of the test, statics and more checks are done. 678 It will output the stats of each per cpu buffer. What 679 was written, the sizes, what was read, what was lost, and 680 other similar details. 681 682 If unsure, say N 683 684config TRACE_EVAL_MAP_FILE 685 bool "Show eval mappings for trace events" 686 depends on TRACING 687 help 688 The "print fmt" of the trace events will show the enum/sizeof names 689 instead of their values. This can cause problems for user space tools 690 that use this string to parse the raw data as user space does not know 691 how to convert the string to its value. 692 693 To fix this, there's a special macro in the kernel that can be used 694 to convert an enum/sizeof into its value. If this macro is used, then 695 the print fmt strings will be converted to their values. 696 697 If something does not get converted properly, this option can be 698 used to show what enums/sizeof the kernel tried to convert. 699 700 This option is for debugging the conversions. A file is created 701 in the tracing directory called "eval_map" that will show the 702 names matched with their values and what trace event system they 703 belong too. 704 705 Normally, the mapping of the strings to values will be freed after 706 boot up or module load. With this option, they will not be freed, as 707 they are needed for the "eval_map" file. Enabling this option will 708 increase the memory footprint of the running kernel. 709 710 If unsure, say N 711 712config TRACING_EVENTS_GPIO 713 bool "Trace gpio events" 714 depends on GPIOLIB 715 default y 716 help 717 Enable tracing events for gpio subsystem 718 719endif # FTRACE 720 721endif # TRACING_SUPPORT 722 723