/kernel/livepatch/ |
D | patch.c | 28 struct klp_func *func; in klp_find_ops() local 31 func = list_first_entry(&ops->func_stack, struct klp_func, in klp_find_ops() 33 if (func->old_func == old_func) in klp_find_ops() 46 struct klp_func *func; in klp_ftrace_handler() local 57 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, in klp_ftrace_handler() 65 if (WARN_ON_ONCE(!func)) in klp_ftrace_handler() 81 if (unlikely(func->transition)) { in klp_ftrace_handler() 101 func = list_entry_rcu(func->stack_node.next, in klp_ftrace_handler() 104 if (&func->stack_node == &ops->func_stack) in klp_ftrace_handler() 113 if (func->nop) in klp_ftrace_handler() [all …]
|
D | core.c | 87 struct klp_func *func; in klp_find_func() local 89 klp_for_each_func(obj, func) { in klp_find_func() 90 if ((strcmp(old_func->old_name, func->old_name) == 0) && in klp_find_func() 91 (old_func->old_sympos == func->old_sympos)) { in klp_find_func() 92 return func; in klp_find_func() 420 struct klp_func *func); 447 static void klp_free_func_nop(struct klp_func *func) in klp_free_func_nop() argument 449 kfree(func->old_name); in klp_free_func_nop() 450 kfree(func); in klp_free_func_nop() 456 struct klp_func *func; in klp_alloc_func_nop() local [all …]
|
D | transition.c | 72 struct klp_func *func; in klp_complete_transition() local 102 klp_for_each_func(obj, func) in klp_complete_transition() 103 func->transition = false; in klp_complete_transition() 193 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, in klp_check_stack_func() argument 208 func_addr = (unsigned long)func->new_func; in klp_check_stack_func() 209 func_size = func->new_size; in klp_check_stack_func() 215 ops = klp_find_ops(func->old_func); in klp_check_stack_func() 219 func_addr = (unsigned long)func->old_func; in klp_check_stack_func() 220 func_size = func->old_size; in klp_check_stack_func() 225 prev = list_next_entry(func, stack_node); in klp_check_stack_func() [all …]
|
/kernel/ |
D | smp.c | 143 smp_call_func_t func, void *info) in generic_exec_single() argument 154 func(info); in generic_exec_single() 165 csd->func = func; in generic_exec_single() 235 csd->func); in flush_smp_call_function_queue() 239 smp_call_func_t func = csd->func; in flush_smp_call_function_queue() local 244 func(info); in flush_smp_call_function_queue() 248 func(info); in flush_smp_call_function_queue() 269 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, in smp_call_function_single() argument 308 err = generic_exec_single(cpu, csd, func, info); in smp_call_function_single() 348 err = generic_exec_single(cpu, csd, csd->func, csd->info); in smp_call_function_single_async() [all …]
|
D | up.c | 12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, in smp_call_function_single() 20 func(info); in smp_call_function_single() 32 csd->func(csd->info); in smp_call_function_single_async() 38 void on_each_cpu(smp_call_func_t func, void *info, int wait) in on_each_cpu() argument 43 func(info); in on_each_cpu() 55 smp_call_func_t func, void *info, bool wait) in on_each_cpu_mask() argument 61 func(info); in on_each_cpu_mask() 72 smp_call_func_t func, void *info, bool wait, in on_each_cpu_cond_mask() argument 80 func(info); in on_each_cpu_cond_mask() 88 smp_call_func_t func, void *info, bool wait, in on_each_cpu_cond() argument [all …]
|
D | tracepoint.c | 124 for (i = 0; funcs[i].func; i++) in debug_print_probes() 125 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); in debug_print_probes() 136 if (WARN_ON(!tp_func->func)) in func_add() 143 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { in func_add() 147 if (old[nr_probes].func == tp_func->func && in func_add() 170 new[nr_probes + 1].func = NULL; in func_add() 189 if (tp_func->func) { in func_remove() 190 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { in func_remove() 191 if (old[nr_probes].func == tp_func->func && in func_remove() 213 for (i = 0; old[i].func; i++) in func_remove() [all …]
|
D | async.c | 72 async_func_t func; member 120 entry->func, task_pid_nr(current)); in async_run_entry_fn() 123 entry->func(entry->data, entry->cookie); in async_run_entry_fn() 129 entry->func, in async_run_entry_fn() 165 async_cookie_t async_schedule_node_domain(async_func_t func, void *data, in async_schedule_node_domain() argument 186 func(data, newcookie); in async_schedule_node_domain() 192 entry->func = func; in async_schedule_node_domain() 231 async_cookie_t async_schedule_node(async_func_t func, void *data, int node) in async_schedule_node() argument 233 return async_schedule_node_domain(func, data, node, &async_dfl_domain); in async_schedule_node()
|
D | task_work.c | 56 task_work_cancel(struct task_struct *task, task_work_func_t func) in task_work_cancel() argument 72 if (work->func != func) in task_work_cancel() 113 work->func(work); in task_work_run()
|
D | resource.c | 398 int (*func)(struct resource *, void *)) in __walk_iomem_res_desc() 405 ret = (*func)(&res, arg); in __walk_iomem_res_desc() 432 u64 end, void *arg, int (*func)(struct resource *, void *)) in walk_iomem_res_desc() 434 return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func); in walk_iomem_res_desc() 446 int (*func)(struct resource *, void *)) in walk_system_ram_res() 451 arg, func); in walk_system_ram_res() 459 int (*func)(struct resource *, void *)) in walk_mem_res() 464 arg, func); in walk_mem_res() 476 void *arg, int (*func)(unsigned long, unsigned long, void *)) in walk_system_ram_range() 493 ret = (*func)(pfn, end_pfn - pfn, arg); in walk_system_ram_range()
|
/kernel/bpf/ |
D | helpers.c | 33 .func = bpf_map_lookup_elem, 49 .func = bpf_map_update_elem, 66 .func = bpf_map_delete_elem, 80 .func = bpf_map_push_elem, 95 .func = bpf_map_pop_elem, 108 .func = bpf_map_pop_elem, 116 .func = bpf_user_rnd_u32, 127 .func = bpf_get_smp_processor_id, 138 .func = bpf_get_numa_node_id, 150 .func = bpf_ktime_get_ns, [all …]
|
D | verifier.c | 425 static struct bpf_func_state *func(struct bpf_verifier_env *env, in func() function 1645 struct bpf_func_state *func; in mark_all_scalars_precise() local 1654 func = st->frame[i]; in mark_all_scalars_precise() 1656 reg = &func->regs[j]; in mark_all_scalars_precise() 1661 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise() 1662 if (func->stack[j].slot_type[0] != STACK_SPILL) in mark_all_scalars_precise() 1664 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_precise() 1678 struct bpf_func_state *func; in __mark_chain_precision() local 1690 func = st->frame[st->curframe]; in __mark_chain_precision() 1692 reg = &func->regs[regno]; in __mark_chain_precision() [all …]
|
/kernel/trace/ |
D | trace_functions.c | 46 ops->func = function_trace_call; in allocate_ftrace_ops() 85 ftrace_func_t func; in function_trace_init() local 98 func = function_stack_trace_call; in function_trace_init() 100 func = function_trace_call; in function_trace_init() 102 ftrace_init_array_ops(tr, func); in function_trace_init() 251 tr->ops->func = function_stack_trace_call; in func_set_flag() 254 tr->ops->func = function_trace_call; in func_set_flag() 589 .func = ftrace_traceon_count, 596 .func = ftrace_traceoff_count, 603 .func = ftrace_stacktrace_count, [all …]
|
D | fgraph.c | 59 ftrace_push_return_trace(unsigned long ret, unsigned long func, in ftrace_push_return_trace() argument 88 current->ret_stack[index].func = func; in ftrace_push_return_trace() 99 int function_graph_enter(unsigned long ret, unsigned long func, in function_graph_enter() argument 104 trace.func = func; in function_graph_enter() 107 if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) in function_graph_enter() 160 (void *)current->ret_stack[index].func, in ftrace_pop_return_trace() 168 trace->func = current->ret_stack[index].func; in ftrace_pop_return_trace() 313 .func = ftrace_stub, 422 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) in ftrace_graph_entry_test()
|
D | ftrace.c | 84 .func = ftrace_stub, 187 ftrace_func_t func; in update_ftrace_function() local 199 func = ftrace_stub; in update_ftrace_function() 208 func = ftrace_ops_get_list_func(ftrace_ops_list); in update_ftrace_function() 213 func = ftrace_ops_list_func; in update_ftrace_function() 219 if (ftrace_trace_function == func) in update_ftrace_function() 226 if (func == ftrace_ops_list_func) { in update_ftrace_function() 227 ftrace_trace_function = func; in update_ftrace_function() 261 ftrace_trace_function = func; in update_ftrace_function() 336 ops->saved_func = ops->func; in __register_ftrace_function() [all …]
|
D | trace_event_perf.c | 251 char *func = NULL; in perf_kprobe_init() local 255 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL); in perf_kprobe_init() 256 if (!func) in perf_kprobe_init() 259 func, u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init() 266 if (func[0] == '\0') { in perf_kprobe_init() 267 kfree(func); in perf_kprobe_init() 268 func = NULL; in perf_kprobe_init() 273 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init() 286 kfree(func); in perf_kprobe_init() 481 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register()
|
D | trace_events_trigger.c | 70 data->ops->func(data, rec, event); in event_triggers_call() 80 data->ops->func(data, rec, event); in event_triggers_call() 108 data->ops->func(data, NULL, NULL); in event_triggers_post_call() 228 ret = p->func(p, file, buff, command, next); in trigger_process_regex() 995 .func = traceon_trigger, 1002 .func = traceon_count_trigger, 1009 .func = traceoff_trigger, 1016 .func = traceoff_count_trigger, 1041 .func = event_trigger_callback, 1052 .func = event_trigger_callback, [all …]
|
D | trace_entries.h | 83 __field_desc( unsigned long, graph_ent, func ) 87 F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth), 99 __field_desc( unsigned long, ret, func ) 107 (void *)__entry->func, __entry->depth, 328 __array( char, func, TRACE_FUNC_SIZE+1 ) 336 __entry->func, __entry->file, __entry->correct,
|
D | trace_selftest.c | 152 .func = trace_selftest_test_probe1_func, 157 .func = trace_selftest_test_probe2_func, 162 .func = trace_selftest_test_probe3_func, 259 dyn_ops->func = trace_selftest_test_dyn_func; in trace_selftest_ops() 327 int (*func)(void)) in trace_selftest_startup_dynamic_tracing() 342 func(); in trace_selftest_startup_dynamic_tracing() 376 func(); in trace_selftest_startup_dynamic_tracing() 450 .func = trace_selftest_test_recursion_func, 454 .func = trace_selftest_test_recursion_safe_func, 536 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) argument [all …]
|
D | bpf_trace.c | 133 .func = bpf_override_return, 158 .func = bpf_probe_read, 194 .func = bpf_probe_write_user, 328 .func = bpf_trace_printk, 385 .func = bpf_perf_event_read, 410 .func = bpf_perf_event_read_value, 496 .func = bpf_perf_event_output, 558 .func = bpf_get_current_task, 579 .func = bpf_current_task_under_cgroup, 613 .func = bpf_probe_read_str, [all …]
|
D | trace_functions_graph.c | 147 if (ftrace_graph_notrace_addr(trace->func)) { in trace_graph_entry() 195 .func = ip, in __trace_graph_function() 199 .func = ip, in __trace_graph_function() 480 curr->graph_ent.func != next->ret.func) in get_return_for_leaf() 668 trace_seq_printf(s, "%ps();\n", (void *)call->func); in print_graph_entry_leaf() 670 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, in print_graph_entry_leaf() 696 cpu_data->enter_funcs[call->depth] = call->func; in print_graph_entry_nested() 706 trace_seq_printf(s, "%ps() {\n", (void *)call->func); in print_graph_entry_nested() 874 if (check_irq_entry(iter, flags, call->func, call->depth)) in print_graph_entry() 877 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); in print_graph_entry() [all …]
|
D | trace_branch.c | 77 strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); in probe_likely_condition() 79 entry->func[TRACE_FUNC_SIZE] = 0; in probe_likely_condition() 149 field->func, in trace_branch_print() 286 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); in branch_stat_show()
|
D | trace_uprobe.c | 106 unsigned long func, struct pt_regs *regs); 944 unsigned long func, struct pt_regs *regs, in __uprobe_trace_func() argument 972 entry->vaddr[0] = func; in __uprobe_trace_func() 1002 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, in uretprobe_trace_func() argument 1010 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); in uretprobe_trace_func() 1341 unsigned long func, struct pt_regs *regs, in __uprobe_perf_func() argument 1371 entry->vaddr[0] = func; in __uprobe_perf_func() 1405 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, in uretprobe_perf_func() argument 1409 __uprobe_perf_func(tu, func, regs, ucb, dsize); in uretprobe_perf_func() 1508 unsigned long func, struct pt_regs *regs) in uretprobe_dispatcher() argument [all …]
|
/kernel/sched/ |
D | cpufreq.c | 33 void (*func)(struct update_util_data *data, u64 time, in cpufreq_add_update_util_hook() 36 if (WARN_ON(!data || !func)) in cpufreq_add_update_util_hook() 42 data->func = func; in cpufreq_add_update_util_hook()
|
/kernel/rcu/ |
D | srcutiny.c | 138 rhp->func(rhp); in srcu_drive_gp() 159 rcu_callback_t func) in call_srcu() argument 163 rhp->func = func; in call_srcu()
|
D | tiny.c | 133 void call_rcu(struct rcu_head *head, rcu_callback_t func) in call_rcu() argument 138 head->func = func; in call_rcu()
|