/kernel/trace/ |
D | trace_irqsoff.c | 45 preempt_trace(int pc) in preempt_trace() argument 47 return ((trace_type & TRACER_PREEMPT_OFF) && pc); in preempt_trace() 50 # define preempt_trace(pc) (0) argument 181 int pc; in irqsoff_graph_entry() local 198 pc = preempt_count(); in irqsoff_graph_entry() 199 ret = __trace_graph_entry(tr, trace, flags, pc); in irqsoff_graph_entry() 210 int pc; in irqsoff_graph_return() local 217 pc = preempt_count(); in irqsoff_graph_return() 218 __trace_graph_return(tr, trace, flags, pc); in irqsoff_graph_return() 270 unsigned long flags, int pc) in __trace_function() argument [all …]
|
D | trace_sched_wakeup.c | 70 int *pc) in func_prolog_preempt_disable() argument 78 *pc = preempt_count(); in func_prolog_preempt_disable() 120 int pc, ret = 0; in wakeup_graph_entry() local 134 if (!func_prolog_preempt_disable(tr, &data, &pc)) in wakeup_graph_entry() 138 ret = __trace_graph_entry(tr, trace, flags, pc); in wakeup_graph_entry() 150 int pc; in wakeup_graph_return() local 154 if (!func_prolog_preempt_disable(tr, &data, &pc)) in wakeup_graph_return() 158 __trace_graph_return(tr, trace, flags, pc); in wakeup_graph_return() 220 int pc; in wakeup_tracer_call() local 222 if (!func_prolog_preempt_disable(tr, &data, &pc)) in wakeup_tracer_call() [all …]
|
D | trace_mmiotrace.c | 186 rw->value, rw->pc, 0); in mmio_print_rw() 193 rw->value, rw->pc, 0); in mmio_print_rw() 202 (rw->value >> 0) & 0xff, rw->pc, 0); in mmio_print_rw() 303 int pc = preempt_count(); in __trace_mmiotrace_rw() local 306 sizeof(*entry), 0, pc); in __trace_mmiotrace_rw() 315 trace_buffer_unlock_commit(tr, buffer, event, 0, pc); in __trace_mmiotrace_rw() 333 int pc = preempt_count(); in __trace_mmiotrace_map() local 336 sizeof(*entry), 0, pc); in __trace_mmiotrace_map() 345 trace_buffer_unlock_commit(tr, buffer, event, 0, pc); in __trace_mmiotrace_map()
|
D | trace_functions.c | 133 int pc; in function_trace_call() local 138 pc = preempt_count(); in function_trace_call() 149 trace_function(tr, ip, parent_ip, flags, pc); in function_trace_call() 184 int pc; in function_stack_trace_call() local 199 pc = preempt_count(); in function_stack_trace_call() 200 trace_function(tr, ip, parent_ip, flags, pc); in function_stack_trace_call() 201 __trace_stack(tr, flags, STACK_SKIP, pc); in function_stack_trace_call() 405 int pc; in trace_stack() local 408 pc = preempt_count(); in trace_stack() 410 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc); in trace_stack()
|
D | trace_functions_graph.c | 100 int pc) in __trace_graph_entry() argument 108 sizeof(*entry), flags, pc); in __trace_graph_entry() 135 int pc; in trace_graph_entry() local 177 pc = preempt_count(); in trace_graph_entry() 178 ret = __trace_graph_entry(tr, trace, flags, pc); in trace_graph_entry() 191 unsigned long ip, unsigned long flags, int pc) in __trace_graph_function() argument 205 __trace_graph_entry(tr, &ent, flags, pc); in __trace_graph_function() 206 __trace_graph_return(tr, &ret, flags, pc); in __trace_graph_function() 212 unsigned long flags, int pc) in trace_graph_function() argument 214 __trace_graph_function(tr, ip, flags, pc); in trace_graph_function() [all …]
|
D | trace.h | 705 int pc); 730 unsigned long flags, int pc); 734 unsigned long flags, int pc); 791 int pc); 794 int skip, int pc) in __trace_stack() argument 929 unsigned long flags, int pc); 932 unsigned long flags, int pc); 1358 unsigned long flags, int pc, 1364 unsigned long flags, int pc) in trace_buffer_unlock_commit() argument 1366 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); in trace_buffer_unlock_commit() [all …]
|
D | trace_syscalls.c | 318 int pc; in ftrace_syscall_enter() local 341 pc = preempt_count(); in ftrace_syscall_enter() 345 sys_data->enter_event->event.type, size, irq_flags, pc); in ftrace_syscall_enter() 355 irq_flags, pc); in ftrace_syscall_enter() 367 int pc; in ftrace_syscall_exit() local 387 pc = preempt_count(); in ftrace_syscall_exit() 392 irq_flags, pc); in ftrace_syscall_exit() 401 irq_flags, pc); in ftrace_syscall_exit()
|
D | trace.c | 164 unsigned long flags, int pc); 737 int skip, int pc, struct pt_regs *regs); 741 int skip, int pc, struct pt_regs *regs); 746 int skip, int pc, struct pt_regs *regs) in __ftrace_trace_stack() argument 752 int skip, int pc, struct pt_regs *regs) in ftrace_trace_stack() argument 760 int type, unsigned long flags, int pc) in trace_event_setup() argument 764 tracing_generic_entry_update(ent, type, flags, pc); in trace_event_setup() 771 unsigned long flags, int pc) in __trace_buffer_lock_reserve() argument 777 trace_event_setup(event, type, flags, pc); in __trace_buffer_lock_reserve() 840 int pc; in __trace_puts() local [all …]
|
D | trace_branch.c | 40 int pc; in probe_likely_condition() local 62 pc = preempt_count(); in probe_likely_condition() 65 sizeof(*entry), flags, pc); in probe_likely_condition()
|
D | trace_hwlat.c | 111 int pc; in trace_hwlat_sample() local 113 pc = preempt_count(); in trace_hwlat_sample() 117 flags, pc); in trace_hwlat_sample()
|
D | trace_entries.h | 285 __field_desc( unsigned long, rw, pc ) 292 (unsigned long)__entry->phys, __entry->value, __entry->pc,
|
D | trace_kprobe.c | 1180 int size, dsize, pc; in NOKPROBE_SYMBOL() local 1190 pc = preempt_count(); in NOKPROBE_SYMBOL() 1197 size, irq_flags, pc); in NOKPROBE_SYMBOL() 1206 entry, irq_flags, pc, regs); in NOKPROBE_SYMBOL() 1228 int size, pc, dsize; in __kretprobe_trace_func() local 1238 pc = preempt_count(); in __kretprobe_trace_func() 1245 size, irq_flags, pc); in __kretprobe_trace_func() 1255 entry, irq_flags, pc, regs); in __kretprobe_trace_func()
|
D | blktrace.c | 73 int pc = 0; in trace_note() local 80 pc = preempt_count(); in trace_note() 83 0, pc); in trace_note() 108 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); in trace_note() 224 int cpu, pc = 0; in __blk_add_trace() local 253 pc = preempt_count(); in __blk_add_trace() 256 0, pc); in __blk_add_trace() 302 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); in __blk_add_trace()
|
D | trace_event_perf.c | 424 int pc = preempt_count(); in perf_trace_buf_update() local 428 tracing_generic_entry_update(entry, type, flags, pc); in perf_trace_buf_update()
|
D | trace_events.c | 257 fbuffer->pc = preempt_count(); in trace_event_buffer_reserve() 265 fbuffer->pc--; in trace_event_buffer_reserve() 271 fbuffer->flags, fbuffer->pc); in trace_event_buffer_reserve() 3367 int pc; in function_test_events_call() local 3369 pc = preempt_count(); in function_test_events_call() 3381 flags, pc); in function_test_events_call() 3389 entry, flags, pc); in function_test_events_call()
|
D | ring_buffer.c | 2695 unsigned long pc = preempt_count(); in trace_recursive_lock() local 2698 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) in trace_recursive_lock() 2701 bit = pc & NMI_MASK ? RB_CTX_NMI : in trace_recursive_lock() 2702 pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ; in trace_recursive_lock()
|
/kernel/ |
D | profile.c | 36 u32 pc, hits; member 259 if (hits[i].pc) in profile_flip_buffers() 260 hits[i].pc = 0; in profile_flip_buffers() 263 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in profile_flip_buffers() 264 hits[i].hits = hits[i].pc = 0; in profile_flip_buffers() 287 unsigned long primary, secondary, flags, pc = (unsigned long)__pc; in do_profile_hits() local 291 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); in do_profile_hits() 292 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; in do_profile_hits() 293 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; in do_profile_hits() 308 if (hits[i + j].pc == pc) { in do_profile_hits() [all …]
|
D | seccomp.c | 176 int pc; in seccomp_check_filter() local 177 for (pc = 0; pc < flen; pc++) { in seccomp_check_filter() 178 struct sock_filter *ftest = &filter[pc]; in seccomp_check_filter()
|
/kernel/sched/ |
D | idle.c | 293 bool cpu_in_idle(unsigned long pc) in cpu_in_idle() argument 295 return pc >= (unsigned long)__cpuidle_text_start && in cpu_in_idle() 296 pc < (unsigned long)__cpuidle_text_end; in cpu_in_idle()
|