/kernel/livepatch/ |
D | patch.c | 28 struct klp_func *func; in klp_find_ops() local 31 func = list_first_entry(&ops->func_stack, struct klp_func, in klp_find_ops() 33 if (func->old_func == old_func) in klp_find_ops() 46 struct klp_func *func; in klp_ftrace_handler() local 57 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, in klp_ftrace_handler() 65 if (WARN_ON_ONCE(!func)) in klp_ftrace_handler() 81 if (unlikely(func->transition)) { in klp_ftrace_handler() 101 func = list_entry_rcu(func->stack_node.next, in klp_ftrace_handler() 104 if (&func->stack_node == &ops->func_stack) in klp_ftrace_handler() 113 if (func->nop) in klp_ftrace_handler() [all …]
|
D | core.c | 88 struct klp_func *func; in klp_find_func() local 90 klp_for_each_func(obj, func) { in klp_find_func() 91 if ((strcmp(old_func->old_name, func->old_name) == 0) && in klp_find_func() 92 (old_func->old_sympos == func->old_sympos)) { in klp_find_func() 93 return func; in klp_find_func() 442 struct klp_func *func); 469 static void klp_free_func_nop(struct klp_func *func) in klp_free_func_nop() argument 471 kfree(func->old_name); in klp_free_func_nop() 472 kfree(func); in klp_free_func_nop() 478 struct klp_func *func; in klp_alloc_func_nop() local [all …]
|
D | transition.c | 72 struct klp_func *func; in klp_complete_transition() local 102 klp_for_each_func(obj, func) in klp_complete_transition() 103 func->transition = false; in klp_complete_transition() 193 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, in klp_check_stack_func() argument 208 func_addr = (unsigned long)func->new_func; in klp_check_stack_func() 209 func_size = func->new_size; in klp_check_stack_func() 215 ops = klp_find_ops(func->old_func); in klp_check_stack_func() 219 func_addr = (unsigned long)func->old_func; in klp_check_stack_func() 220 func_size = func->old_size; in klp_check_stack_func() 225 prev = list_next_entry(func, stack_node); in klp_check_stack_func() [all …]
|
/kernel/ |
D | tracepoint.c | 175 for (i = 0; funcs[i].func; i++) in debug_print_probes() 176 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); in debug_print_probes() 188 if (WARN_ON(!tp_func->func)) in func_add() 195 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { in func_add() 199 if (old[nr_probes].func == tp_func->func && in func_add() 202 if (old[nr_probes].func == tp_stub_func) in func_add() 216 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { in func_add() 217 if (old[nr_probes].func == tp_stub_func) in func_add() 242 new[nr_probes + 1].func = NULL; in func_add() 261 if (tp_func->func) { in func_remove() [all …]
|
D | smp.c | 121 __this_cpu_write(cur_csd_func, csd->func); in csd_lock_record() 178 cpu, csd->func, csd->info); in csd_lock_wait_toolong() 283 smp_call_func_t func = csd->func; in generic_exec_single() local 294 func(info); in generic_exec_single() 364 csd->func); in flush_smp_call_function_queue() 386 smp_call_func_t func = csd->func; in flush_smp_call_function_queue() local 396 func(info); in flush_smp_call_function_queue() 422 smp_call_func_t func = csd->func; in flush_smp_call_function_queue() local 427 func(info); in flush_smp_call_function_queue() 468 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, in smp_call_function_single() argument [all …]
|
D | up.c | 12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, in smp_call_function_single() 21 func(info); in smp_call_function_single() 33 csd->func(csd->info); in smp_call_function_single_async() 39 void on_each_cpu(smp_call_func_t func, void *info, int wait) in on_each_cpu() argument 44 func(info); in on_each_cpu() 56 smp_call_func_t func, void *info, bool wait) in on_each_cpu_mask() argument 62 func(info); in on_each_cpu_mask() 72 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, in on_each_cpu_cond_mask() argument 80 func(info); in on_each_cpu_cond_mask() 87 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, in on_each_cpu_cond() argument [all …]
|
D | async.c | 72 async_func_t func; member 120 entry->func, task_pid_nr(current)); in async_run_entry_fn() 123 entry->func(entry->data, entry->cookie); in async_run_entry_fn() 129 entry->func, in async_run_entry_fn() 148 static async_cookie_t __async_schedule_node_domain(async_func_t func, in __async_schedule_node_domain() argument 159 entry->func = func; in __async_schedule_node_domain() 198 async_cookie_t async_schedule_node_domain(async_func_t func, void *data, in async_schedule_node_domain() argument 219 func(data, newcookie); in async_schedule_node_domain() 223 return __async_schedule_node_domain(func, data, node, domain, entry); in async_schedule_node_domain() 240 async_cookie_t async_schedule_node(async_func_t func, void *data, int node) in async_schedule_node() argument [all …]
|
D | static_call.c | 123 void __static_call_update(struct static_call_key *key, void *tramp, void *func) in __static_call_update() argument 131 if (key->func == func) in __static_call_update() 134 key->func = func; in __static_call_update() 136 arch_static_call_transform(NULL, tramp, func, false); in __static_call_update() 198 arch_static_call_transform(site_addr, NULL, func, in __static_call_update() 275 arch_static_call_transform(site_addr, NULL, key->func, in __static_call_init() 517 int (*func)(int); member 533 if (scd->func) in test_static_call_init() 534 static_call_update(sc_selftest, scd->func); in test_static_call_init()
|
D | task_work.c | 104 return cb->func == data; in task_work_func_match() 119 task_work_cancel(struct task_struct *task, task_work_func_t func) in task_work_cancel() argument 121 return task_work_cancel_match(task, task_work_func_match, func); in task_work_cancel() 165 work->func(work); in task_work_run()
|
/kernel/bpf/ |
D | helpers.c | 37 .func = bpf_map_lookup_elem, 53 .func = bpf_map_update_elem, 70 .func = bpf_map_delete_elem, 84 .func = bpf_map_push_elem, 99 .func = bpf_map_pop_elem, 112 .func = bpf_map_peek_elem, 120 .func = bpf_user_rnd_u32, 131 .func = bpf_get_smp_processor_id, 142 .func = bpf_get_numa_node_id, 154 .func = bpf_ktime_get_ns, [all …]
|
D | trampoline.c | 106 void *ip = tr->func.addr; in unregister_fentry() 109 if (tr->func.ftrace_managed) in unregister_fentry() 118 void *ip = tr->func.addr; in modify_fentry() 121 if (tr->func.ftrace_managed) in modify_fentry() 131 void *ip = tr->func.addr; in register_fentry() 137 tr->func.ftrace_managed = ret; in register_fentry() 139 if (tr->func.ftrace_managed) in register_fentry() 334 &tr->func.model, flags, tprogs, in bpf_trampoline_update() 335 tr->func.addr); in bpf_trampoline_update() 406 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, in bpf_trampoline_link_prog() [all …]
|
/kernel/trace/ |
D | trace_functions.c | 50 ops->func = function_trace_call; in ftrace_allocate_ftrace_ops() 91 ftrace_func_t func; in function_trace_init() local 104 func = function_stack_trace_call; in function_trace_init() 106 func = function_trace_call; in function_trace_init() 108 ftrace_init_array_ops(tr, func); in function_trace_init() 257 tr->ops->func = function_stack_trace_call; in func_set_flag() 260 tr->ops->func = function_trace_call; in func_set_flag() 595 .func = ftrace_traceon_count, 602 .func = ftrace_traceoff_count, 609 .func = ftrace_stacktrace_count, [all …]
|
D | fgraph.c | 59 ftrace_push_return_trace(unsigned long ret, unsigned long func, in ftrace_push_return_trace() argument 88 current->ret_stack[index].func = func; in ftrace_push_return_trace() 113 int function_graph_enter(unsigned long ret, unsigned long func, in function_graph_enter() argument 127 trace.func = func; in function_graph_enter() 130 if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) in function_graph_enter() 183 (void *)current->ret_stack[index].func, in ftrace_pop_return_trace() 191 trace->func = current->ret_stack[index].func; in ftrace_pop_return_trace() 336 .func = ftrace_stub, 449 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) in ftrace_graph_entry_test()
|
D | bpf_trace.c | 141 .func = bpf_override_return, 167 .func = bpf_probe_read_user, 204 .func = bpf_probe_read_user_str, 230 .func = bpf_probe_read_kernel, 265 .func = bpf_probe_read_kernel_str, 285 .func = bpf_probe_read_compat, 304 .func = bpf_probe_read_compat_str, 341 .func = bpf_probe_write_user, 545 .func = bpf_trace_printk, 752 .func = bpf_seq_printf, [all …]
|
D | ftrace.c | 82 .func = ftrace_stub, 181 ftrace_func_t func; in update_ftrace_function() local 193 func = ftrace_stub; in update_ftrace_function() 202 func = ftrace_ops_get_list_func(ftrace_ops_list); in update_ftrace_function() 207 func = ftrace_ops_list_func; in update_ftrace_function() 213 if (ftrace_trace_function == func) in update_ftrace_function() 220 if (func == ftrace_ops_list_func) { in update_ftrace_function() 221 ftrace_trace_function = func; in update_ftrace_function() 255 ftrace_trace_function = func; in update_ftrace_function() 332 ops->saved_func = ops->func; in __register_ftrace_function() [all …]
|
D | trace_event_perf.c | 251 char *func = NULL; in perf_kprobe_init() local 255 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL); in perf_kprobe_init() 256 if (!func) in perf_kprobe_init() 259 func, u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init() 266 if (func[0] == '\0') { in perf_kprobe_init() 267 kfree(func); in perf_kprobe_init() 268 func = NULL; in perf_kprobe_init() 273 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init() 286 kfree(func); in perf_kprobe_init() 481 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register()
|
D | trace_events_trigger.c | 70 data->ops->func(data, rec, event); in event_triggers_call() 80 data->ops->func(data, rec, event); in event_triggers_call() 108 data->ops->func(data, NULL, NULL); in event_triggers_post_call() 235 ret = p->func(p, file, buff, command, next); in trigger_process_regex() 1048 .func = traceon_trigger, 1055 .func = traceon_count_trigger, 1062 .func = traceoff_trigger, 1069 .func = traceoff_count_trigger, 1094 .func = event_trigger_callback, 1105 .func = event_trigger_callback, [all …]
|
D | trace_entries.h | 81 __field_packed( unsigned long, graph_ent, func ) 85 F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth) 95 __field_packed( unsigned long, ret, func ) 103 (void *)__entry->func, __entry->depth, 302 __array( char, func, TRACE_FUNC_SIZE+1 ) 310 __entry->func, __entry->file, __entry->correct,
|
D | trace_selftest.c | 152 .func = trace_selftest_test_probe1_func, 157 .func = trace_selftest_test_probe2_func, 162 .func = trace_selftest_test_probe3_func, 259 dyn_ops->func = trace_selftest_test_dyn_func; in trace_selftest_ops() 327 int (*func)(void)) in trace_selftest_startup_dynamic_tracing() 342 func(); in trace_selftest_startup_dynamic_tracing() 376 func(); in trace_selftest_startup_dynamic_tracing() 450 .func = trace_selftest_test_recursion_func, 454 .func = trace_selftest_test_recursion_safe_func, 541 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) argument [all …]
|
D | trace_functions_graph.c | 147 if (ftrace_graph_notrace_addr(trace->func)) { in trace_graph_entry() 195 .func = ip, in __trace_graph_function() 199 .func = ip, in __trace_graph_function() 480 curr->graph_ent.func != next->ret.func) in get_return_for_leaf() 668 trace_seq_printf(s, "%ps();\n", (void *)call->func); in print_graph_entry_leaf() 670 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, in print_graph_entry_leaf() 696 cpu_data->enter_funcs[call->depth] = call->func; in print_graph_entry_nested() 706 trace_seq_printf(s, "%ps() {\n", (void *)call->func); in print_graph_entry_nested() 874 if (check_irq_entry(iter, flags, call->func, call->depth)) in print_graph_entry() 877 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); in print_graph_entry() [all …]
|
D | trace_branch.c | 77 strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); in probe_likely_condition() 79 entry->func[TRACE_FUNC_SIZE] = 0; in probe_likely_condition() 149 field->func, in trace_branch_print() 286 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); in branch_stat_show()
|
/kernel/rcu/ |
D | tiny.c | 85 unsigned long offset = (unsigned long)head->func; in rcu_reclaim_tiny() 96 f = head->func; in rcu_reclaim_tiny() 97 WRITE_ONCE(head->func, (rcu_callback_t)0L); in rcu_reclaim_tiny() 160 void call_rcu(struct rcu_head *head, rcu_callback_t func) in call_rcu() argument 165 head->func = func; in call_rcu()
|
D | tasks.h | 152 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, in call_rcu_tasks_generic() argument 159 rhp->func = func; in call_rcu_tasks_generic() 234 list->func(list); in rcu_tasks_kthread() 509 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); 530 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) in call_rcu_tasks() argument 532 call_rcu_tasks_generic(rhp, func, &rcu_tasks); in call_rcu_tasks() 656 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); 678 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) in call_rcu_tasks_rude() argument 680 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); in call_rcu_tasks_rude() 787 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); [all …]
|
D | srcutiny.c | 142 rhp->func(rhp); in srcu_drive_gp() 179 rcu_callback_t func) in call_srcu() argument 183 rhp->func = func; in call_srcu()
|
/kernel/sched/ |
D | cpufreq.c | 34 void (*func)(struct update_util_data *data, u64 time, in cpufreq_add_update_util_hook() 37 if (WARN_ON(!data || !func)) in cpufreq_add_update_util_hook() 43 data->func = func; in cpufreq_add_update_util_hook()
|