Searched refs:disabled (Results 1 – 19 of 19) sorted by relevance
/kernel/trace/ |
D | trace_sched_wakeup.c | 72 long disabled; in func_prolog_preempt_disable() local 86 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_preempt_disable() 87 if (unlikely(disabled != 1)) in func_prolog_preempt_disable() 93 atomic_dec(&(*data)->disabled); in func_prolog_preempt_disable() 138 atomic_dec(&data->disabled); in wakeup_graph_entry() 156 atomic_dec(&data->disabled); in wakeup_graph_return() 228 atomic_dec(&data->disabled); in wakeup_tracer_call() 436 long disabled; in probe_wakeup_sched_switch() local 459 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch() 460 if (likely(disabled != 1)) in probe_wakeup_sched_switch() [all …]
|
D | trace_irqsoff.c | 103 long disabled; in func_prolog_dec() local 126 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_dec() 128 if (likely(disabled == 1)) in func_prolog_dec() 131 atomic_dec(&(*data)->disabled); in func_prolog_dec() 155 atomic_dec(&data->disabled); in irqsoff_tracer_call() 203 atomic_dec(&data->disabled); in irqsoff_graph_entry() 222 atomic_dec(&data->disabled); in irqsoff_graph_return() 388 if (unlikely(!data) || atomic_read(&data->disabled)) in start_critical_timing() 391 atomic_inc(&data->disabled); in start_critical_timing() 401 atomic_dec(&data->disabled); in start_critical_timing() [all …]
|
D | trace_functions.c | 193 if (!atomic_read(&data->disabled)) in function_trace_call() 225 long disabled; in function_stack_trace_call() local 239 disabled = atomic_inc_return(&data->disabled); in function_stack_trace_call() 241 if (likely(disabled == 1)) { in function_stack_trace_call() 247 atomic_dec(&data->disabled); in function_stack_trace_call() 305 if (atomic_read(&data->disabled)) in function_no_repeats_trace_call() 339 long disabled; in function_stack_no_repeats_trace_call() local 353 disabled = atomic_inc_return(&data->disabled); in function_stack_no_repeats_trace_call() 355 if (likely(disabled == 1)) { in function_stack_no_repeats_trace_call() 368 atomic_dec(&data->disabled); in function_stack_no_repeats_trace_call()
|
D | trace_functions_graph.c | 132 long disabled; in trace_graph_entry() local 174 disabled = atomic_inc_return(&data->disabled); in trace_graph_entry() 175 if (likely(disabled == 1)) { in trace_graph_entry() 182 atomic_dec(&data->disabled); in trace_graph_entry() 241 long disabled; in trace_graph_return() local 254 disabled = atomic_inc_return(&data->disabled); in trace_graph_return() 255 if (likely(disabled == 1)) { in trace_graph_return() 259 atomic_dec(&data->disabled); in trace_graph_return()
|
D | trace_kdb.c | 127 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in kdb_ftdump() 142 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in kdb_ftdump()
|
D | Kconfig | 170 tracing is enabled by the administrator. If it's runtime disabled 254 is disabled. 259 The stack tracer can also be enabled or disabled via the 267 Enables hooks which will be called when preemption is first disabled, 285 disabled by default and can be runtime (re-)started 309 disabled by default and can be runtime (re-)started 348 A kernel thread is created that will spin with interrupts disabled 434 implementation and works via page faults. Tracing is disabled by 514 Otherwise keep it disabled. 931 and all ring buffers will be disabled. [all …]
|
D | trace_branch.c | 59 if (atomic_read(&data->disabled)) in probe_likely_condition()
|
D | trace_events_synth.c | 1642 trace_state->disabled = true; in __synth_event_trace_init() 1967 if (trace_state->disabled) in __synth_event_add_val()
|
D | trace_events.c | 4013 long disabled; in function_test_events_call() local 4019 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); in function_test_events_call() 4021 if (disabled != 1) in function_test_events_call()
|
D | trace.h | 161 atomic_t disabled; member
|
D | trace.c | 5207 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask() 5215 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask() 10012 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in ftrace_dump() 10088 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in ftrace_dump()
|
D | ftrace.c | 402 atomic_t disabled; member 728 if (atomic_inc_return(&stat->disabled) != 1) in ftrace_profile_alloc() 750 atomic_dec(&stat->disabled); in ftrace_profile_alloc()
|
/kernel/events/ |
D | hw_breakpoint.c | 474 to->disabled = from->disabled; in hw_breakpoint_copy_attr() 531 if (!bp->attr.disabled) in modify_user_hw_breakpoint()
|
D | core.c | 1897 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init() 3262 if (!bp->attr.disabled) in perf_event_modify_breakpoint()
|
/kernel/ |
D | watchdog_hld.c | 106 .disabled = 1,
|
D | Kconfig.preempt | 117 SCHED_CORE is default disabled. When it is enabled and unused,
|
/kernel/dma/ |
D | Kconfig | 158 Memory Allocator. If the size of 0 is selected, CMA is disabled by 170 If 0 percent is selected, CMA is disabled by default, but it can be
|
/kernel/rcu/ |
D | Kconfig.debug | 19 false-positive splats, we keep it default disabled but once all
|
D | Kconfig | 225 This option is disabled by default on PREEMPT_RT=y kernels which
|