Home
last modified time | relevance | path

Searched refs:disabled (Results 1 – 19 of 19) sorted by relevance

/kernel/trace/
Dtrace_sched_wakeup.c72 long disabled; in func_prolog_preempt_disable() local
86 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_preempt_disable()
87 if (unlikely(disabled != 1)) in func_prolog_preempt_disable()
93 atomic_dec(&(*data)->disabled); in func_prolog_preempt_disable()
138 atomic_dec(&data->disabled); in wakeup_graph_entry()
156 atomic_dec(&data->disabled); in wakeup_graph_return()
228 atomic_dec(&data->disabled); in wakeup_tracer_call()
437 long disabled; in probe_wakeup_sched_switch() local
460 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch()
461 if (likely(disabled != 1)) in probe_wakeup_sched_switch()
[all …]
Dtrace_irqsoff.c103 long disabled; in func_prolog_dec() local
126 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_dec()
128 if (likely(disabled == 1)) in func_prolog_dec()
131 atomic_dec(&(*data)->disabled); in func_prolog_dec()
155 atomic_dec(&data->disabled); in irqsoff_tracer_call()
203 atomic_dec(&data->disabled); in irqsoff_graph_entry()
222 atomic_dec(&data->disabled); in irqsoff_graph_return()
388 if (unlikely(!data) || atomic_read(&data->disabled)) in start_critical_timing()
391 atomic_inc(&data->disabled); in start_critical_timing()
401 atomic_dec(&data->disabled); in start_critical_timing()
[all …]
Dtrace_functions.c192 if (!atomic_read(&data->disabled)) in function_trace_call()
223 long disabled; in function_stack_trace_call() local
237 disabled = atomic_inc_return(&data->disabled); in function_stack_trace_call()
239 if (likely(disabled == 1)) { in function_stack_trace_call()
245 atomic_dec(&data->disabled); in function_stack_trace_call()
301 if (atomic_read(&data->disabled)) in function_no_repeats_trace_call()
334 long disabled; in function_stack_no_repeats_trace_call() local
348 disabled = atomic_inc_return(&data->disabled); in function_stack_no_repeats_trace_call()
350 if (likely(disabled == 1)) { in function_stack_no_repeats_trace_call()
363 atomic_dec(&data->disabled); in function_stack_no_repeats_trace_call()
Dtrace_functions_graph.c132 long disabled; in trace_graph_entry() local
174 disabled = atomic_inc_return(&data->disabled); in trace_graph_entry()
175 if (likely(disabled == 1)) { in trace_graph_entry()
182 atomic_dec(&data->disabled); in trace_graph_entry()
241 long disabled; in trace_graph_return() local
254 disabled = atomic_inc_return(&data->disabled); in trace_graph_return()
255 if (likely(disabled == 1)) { in trace_graph_return()
259 atomic_dec(&data->disabled); in trace_graph_return()
Dtrace_kdb.c127 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in kdb_ftdump()
142 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in kdb_ftdump()
DKconfig201 tracing is enabled by the administrator. If it's runtime disabled
301 is disabled.
306 The stack tracer can also be enabled or disabled via the
314 Enables hooks which will be called when preemption is first disabled,
332 disabled by default and can be runtime (re-)started
356 disabled by default and can be runtime (re-)started
395 A kernel thread is created that will spin with interrupts disabled
481 implementation and works via page faults. Tracing is disabled by
561 Otherwise keep it disabled.
1008 and all ring buffers will be disabled.
[all …]
Dtrace_branch.c59 if (atomic_read(&data->disabled)) in probe_likely_condition()
Dtrace_events_synth.c1662 trace_state->disabled = true; in __synth_event_trace_init()
1987 if (trace_state->disabled) in __synth_event_add_val()
Dtrace_events.c4028 long disabled; in function_test_events_call() local
4034 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); in function_test_events_call()
4036 if (disabled != 1) in function_test_events_call()
Dtrace.h163 atomic_t disabled; member
Dtrace.c5220 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
5228 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); in tracing_set_cpumask()
10100 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in ftrace_dump()
10176 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); in ftrace_dump()
Dftrace.c399 atomic_t disabled; member
725 if (atomic_inc_return(&stat->disabled) != 1) in ftrace_profile_alloc()
747 atomic_dec(&stat->disabled); in ftrace_profile_alloc()
/kernel/events/
Dhw_breakpoint.c785 to->disabled = from->disabled; in hw_breakpoint_copy_attr()
842 if (!bp->attr.disabled) in modify_user_hw_breakpoint()
Dcore.c1811 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
3210 if (!bp->attr.disabled) in perf_event_modify_breakpoint()
/kernel/
Dwatchdog_hld.c106 .disabled = 1,
DKconfig.preempt132 SCHED_CORE is default disabled. When it is enabled and unused,
/kernel/dma/
DKconfig153 Memory Allocator. If the size of 0 is selected, CMA is disabled by
165 If 0 percent is selected, CMA is disabled by default, but it can be
/kernel/rcu/
DKconfig.debug19 false-positive splats, we keep it default disabled but once all
DKconfig233 This option is disabled by default on PREEMPT_RT=y kernels which