/kernel/trace/ |
D | trace_functions_graph.c | 24 int depth; member 195 .depth = 0, in __trace_graph_function() 199 .depth = 0, in __trace_graph_function() 651 cpu_data->depth = call->depth - 1; in print_graph_entry_leaf() 654 if (call->depth < FTRACE_RETFUNC_DEPTH && in print_graph_entry_leaf() 655 !WARN_ON_ONCE(call->depth < 0)) in print_graph_entry_leaf() 656 cpu_data->enter_funcs[call->depth] = 0; in print_graph_entry_leaf() 663 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) in print_graph_entry_leaf() 689 cpu_data->depth = call->depth; in print_graph_entry_nested() 692 if (call->depth < FTRACE_RETFUNC_DEPTH && in print_graph_entry_nested() [all …]
|
D | trace_entries.h | 82 __field_packed( int, graph_ent, depth ) 85 F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth) 96 __field_packed( int, ret, depth ) 103 (void *)__entry->func, __entry->depth, 105 __entry->depth)
|
D | trace_events_user.c | 511 u32 depth = 0, saved_offset = *offset; in user_event_parse_field() local 556 depth++; in user_event_parse_field() 561 switch (depth++) { in user_event_parse_field() 580 if (depth < FIELD_DEPTH_SIZE || !name) in user_event_parse_field() 583 if (depth == FIELD_DEPTH_SIZE) in user_event_parse_field() 723 int pos = 0, depth = 0; in user_event_set_print_fmt() local 729 if (depth != 0) in user_event_set_print_fmt() 735 depth++; in user_event_set_print_fmt() 1131 int depth = 0; in user_event_show() local 1138 if (depth == 0) in user_event_show() [all …]
|
D | fgraph.c | 139 trace.depth = ++current->curr_ret_depth; in function_graph_enter() 205 trace->depth = current->curr_ret_depth--; in ftrace_pop_return_trace()
|
D | trace.h | 896 trace_recursion_set_depth(trace->depth); in ftrace_graph_addr() 918 trace->depth == trace_recursion_depth()) in ftrace_graph_addr_finish() 965 (trace->depth < 0) || in ftrace_graph_ignore_func() 966 (fgraph_max_depth && trace->depth >= fgraph_max_depth); in ftrace_graph_ignore_func()
|
D | blktrace.c | 959 unsigned int depth, bool explicit) in blk_add_trace_unplug() argument 966 __be64 rpdu = cpu_to_be64(depth); in blk_add_trace_unplug()
|
/kernel/locking/ |
D | lockdep.c | 781 int i, depth = READ_ONCE(p->lockdep_depth); in lockdep_print_held_locks() local 783 if (!depth) in lockdep_print_held_locks() 786 printk("%d lock%s held by %s/%d:\n", depth, in lockdep_print_held_locks() 787 depth > 1 ? "s" : "", p->comm, task_pid_nr(p)); in lockdep_print_held_locks() 794 for (i = 0; i < depth; i++) { in lockdep_print_held_locks() 1043 for (i = chain->base; i < chain->base + chain->depth; i++) in check_lock_chain_key() 1533 int depth = 0; in get_lock_depth() local 1538 depth++; in get_lock_depth() 1540 return depth; in get_lock_depth() 1858 print_circular_bug_entry(struct lock_list *target, int depth) in print_circular_bug_entry() argument [all …]
|
D | rtmutex.c | 634 int ret = 0, depth = 0; in rt_mutex_adjust_prio_chain() local 651 if (++depth > max_lock_depth) { in rt_mutex_adjust_prio_chain()
|
D | lockdep_proc.c | 171 for (i = 0; i < chain->depth; i++) { in lc_show()
|
/kernel/ |
D | umh.c | 283 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) in __usermodehelper_set_disable_depth() argument 286 usermodehelper_disabled = depth; in __usermodehelper_set_disable_depth() 297 int __usermodehelper_disable(enum umh_disable_depth depth) in __usermodehelper_disable() argument 301 if (!depth) in __usermodehelper_disable() 305 usermodehelper_disabled = depth; in __usermodehelper_disable()
|
D | resource.c | 114 int depth; in r_show() local 116 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) in r_show() 128 depth * 2, "", in r_show()
|
/kernel/irq/ |
D | pm.c | 21 desc->depth++; in irq_pm_check_wakeup() 175 desc->depth++; in resume_irq()
|
D | debug.h | 19 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); in print_irq_desc()
|
D | chip.c | 261 desc->depth = 0; in irq_startup() 310 desc->depth = 1; in irq_shutdown() 1030 desc->depth = 1; in __irq_do_set_handler() 1113 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); in irq_modify_status()
|
D | manage.c | 684 if (!desc->depth++) in __disable_irq() 779 switch (desc->depth) { in __enable_irq() 801 desc->depth--; in __enable_irq() 1780 desc->depth = 1; in __setup_irq() 2084 if (WARN_ON(desc->depth == 0)) in free_nmi()
|
D | spurious.c | 425 desc->depth++; in note_interrupt()
|
D | debugfs.c | 177 seq_printf(m, "ddepth: %u\n", desc->depth); in irq_debug_show()
|
D | irqdesc.c | 119 desc->depth = 1; in desc_set_defaults() 557 .depth = 1,
|
/kernel/rcu/ |
D | tree_exp.h | 737 int depth = rcu_preempt_depth(); in rcu_exp_handler() local 748 if (!depth) { in rcu_exp_handler() 772 if (depth > 0) { in rcu_exp_handler()
|
/kernel/bpf/ |
D | btf.c | 386 u8 depth; member 1072 if ((indent - show->state.depth) >= indents) in __btf_show_indent() 1073 return indent - show->state.depth; in __btf_show_indent() 1089 if (show->state.depth == 0) in btf_show_delim() 1119 show->state.depth == 0) { \ 1125 if (show->state.depth > show->state.depth_to_show) \ 1126 show->state.depth_to_show = show->state.depth; \ 1136 if (show->state.depth > show->state.depth_to_show) \ 1137 show->state.depth_to_show = show->state.depth; \ 1216 if (show->state.depth == 0) { in btf_show_obj_safe() [all …]
|
D | verifier.c | 4474 int depth = 0, frame = 0, i, subprog_end; in check_max_stack_depth_subprog() local 4501 if (idx && subprog[idx].has_tail_call && depth >= 256) { in check_max_stack_depth_subprog() 4504 depth); in check_max_stack_depth_subprog() 4510 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); in check_max_stack_depth_subprog() 4511 if (depth > MAX_BPF_STACK) { in check_max_stack_depth_subprog() 4513 frame + 1, depth); in check_max_stack_depth_subprog() 4574 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); in check_max_stack_depth_subprog() 14141 int i, depth; in fixup_call_args() local 14176 depth = get_callee_stack_depth(env, insn, i); in fixup_call_args() 14177 if (depth < 0) in fixup_call_args() [all …]
|
/kernel/events/ |
D | uprobes.c | 1772 n_utask->depth++; in dup_utask() 1854 utask->depth--; in cleanup_return_instances() 1873 if (utask->depth >= MAX_URETPROBE_DEPTH) { in prepare_uretprobe() 1916 utask->depth++; in prepare_uretprobe() 2159 utask->depth--; in handle_trampoline()
|
/kernel/cgroup/ |
D | cgroup.c | 3649 int depth = READ_ONCE(cgrp->max_depth); in cgroup_max_depth_show() local 3651 if (depth == INT_MAX) in cgroup_max_depth_show() 3654 seq_printf(seq, "%d\n", depth); in cgroup_max_depth_show() 3664 int depth; in cgroup_max_depth_write() local 3668 depth = INT_MAX; in cgroup_max_depth_write() 3670 ret = kstrtoint(buf, 0, &depth); in cgroup_max_depth_write() 3675 if (depth < 0) in cgroup_max_depth_write() 3682 cgrp->max_depth = depth; in cgroup_max_depth_write()
|
/kernel/sched/ |
D | fair.c | 481 se_depth = (*se)->depth; in find_matching_se() 482 pse_depth = (*pse)->depth; in find_matching_se() 7934 int se_depth = se->depth; in pick_next_task_fair() 7935 int pse_depth = pse->depth; in pick_next_task_fair() 11945 int sea_depth = sea->depth; in cfs_prio_less() 11946 int seb_depth = seb->depth; in cfs_prio_less() 12388 se->depth = 0; in init_tg_cfs_entry() 12391 se->depth = parent->depth + 1; in init_tg_cfs_entry()
|
D | sched.h | 1984 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; in set_task_rq()
|