/kernel/bpf/ |
D | bpf_lsm.c | 33 #define LSM_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_lsm_##NAME) 45 BTF_ID(func, bpf_lsm_sk_alloc_security) in BTF_SET_END() 46 BTF_ID(func, bpf_lsm_sk_free_security) in BTF_SET_END() 54 BTF_ID(func, bpf_lsm_sock_graft) in BTF_SET_END() 55 BTF_ID(func, bpf_lsm_inet_csk_clone) in BTF_SET_END() 56 BTF_ID(func, bpf_lsm_inet_conn_established) in BTF_SET_END() 66 BTF_ID(func, bpf_lsm_socket_post_create) in BTF_SET_END() 67 BTF_ID(func, bpf_lsm_socket_socketpair) in BTF_SET_END() 130 .func = bpf_bprm_opts_set, 151 .func = bpf_ima_inode_hash, [all …]
|
D | cpumask.c | 413 BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) 414 BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE) 415 BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) 416 BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU) 417 BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU) 418 BTF_ID_FLAGS(func, bpf_cpumask_first_and, KF_RCU) 419 BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_RCU) 420 BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_RCU) 421 BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_RCU) 422 BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_RCU) [all …]
|
D | helpers.c | 45 .func = bpf_map_lookup_elem, 62 .func = bpf_map_update_elem, 80 .func = bpf_map_delete_elem, 94 .func = bpf_map_push_elem, 109 .func = bpf_map_pop_elem, 122 .func = bpf_map_peek_elem, 136 .func = bpf_map_lookup_percpu_elem, 146 .func = bpf_user_rnd_u32, 157 .func = bpf_get_smp_processor_id, 168 .func = bpf_get_numa_node_id, [all …]
|
D | trampoline.c | 176 void *ip = tr->func.addr; in unregister_fentry() 179 if (tr->func.ftrace_managed) in unregister_fentry() 190 void *ip = tr->func.addr; in modify_fentry() 193 if (tr->func.ftrace_managed) { in modify_fentry() 207 void *ip = tr->func.addr; in register_fentry() 215 tr->func.ftrace_managed = true; in register_fentry() 218 if (tr->func.ftrace_managed) { in register_fentry() 442 &tr->func.model, tr->flags, tlinks, in bpf_trampoline_update() 443 tr->func.addr); in bpf_trampoline_update() 464 tr->fops->func = NULL; in bpf_trampoline_update() [all …]
|
/kernel/livepatch/ |
D | patch.c | 28 struct klp_func *func; in klp_find_ops() local 31 func = list_first_entry(&ops->func_stack, struct klp_func, in klp_find_ops() 33 if (func->old_func == old_func) in klp_find_ops() 46 struct klp_func *func; in klp_ftrace_handler() local 62 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, in klp_ftrace_handler() 70 if (WARN_ON_ONCE(!func)) in klp_ftrace_handler() 86 if (unlikely(func->transition)) { in klp_ftrace_handler() 106 func = list_entry_rcu(func->stack_node.next, in klp_ftrace_handler() 109 if (&func->stack_node == &ops->func_stack) in klp_ftrace_handler() 118 if (func->nop) in klp_ftrace_handler() [all …]
|
D | core.c | 90 struct klp_func *func; in klp_find_func() local 92 klp_for_each_func(obj, func) { in klp_find_func() 93 if ((strcmp(old_func->old_name, func->old_name) == 0) && in klp_find_func() 94 (old_func->old_sympos == func->old_sympos)) { in klp_find_func() 95 return func; in klp_find_func() 480 struct klp_func *func); 507 static void klp_free_func_nop(struct klp_func *func) in klp_free_func_nop() argument 509 kfree(func->old_name); in klp_free_func_nop() 510 kfree(func); in klp_free_func_nop() 516 struct klp_func *func; in klp_alloc_func_nop() local [all …]
|
D | transition.c | 93 struct klp_func *func; in klp_complete_transition() local 123 klp_for_each_func(obj, func) in klp_complete_transition() 124 func->transition = false; in klp_complete_transition() 214 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, in klp_check_stack_func() argument 226 func_addr = (unsigned long)func->new_func; in klp_check_stack_func() 227 func_size = func->new_size; in klp_check_stack_func() 233 ops = klp_find_ops(func->old_func); in klp_check_stack_func() 237 func_addr = (unsigned long)func->old_func; in klp_check_stack_func() 238 func_size = func->old_size; in klp_check_stack_func() 243 prev = list_next_entry(func, stack_node); in klp_check_stack_func() [all …]
|
/kernel/ |
D | tracepoint.c | 175 for (i = 0; funcs[i].func; i++) in debug_print_probes() 176 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); in debug_print_probes() 188 if (WARN_ON(!tp_func->func)) in func_add() 195 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { in func_add() 196 if (old[iter_probes].func == tp_stub_func) in func_add() 198 if (old[iter_probes].func == tp_func->func && in func_add() 210 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { in func_add() 211 if (old[iter_probes].func == tp_stub_func) in func_add() 226 new[nr_probes].func = NULL; in func_add() 245 if (tp_func->func) { in func_remove() [all …]
|
D | smp.c | 130 csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd) in csd_do_func() argument 132 trace_csd_function_entry(func, csd); in csd_do_func() 133 func(info); in csd_do_func() 134 trace_csd_function_exit(func, csd); in csd_do_func() 186 __this_cpu_write(cur_csd_func, csd->func); in __csd_lock_record() 253 cpu, csd->func, csd->info); in csd_lock_wait_toolong() 360 smp_call_func_t func; in __smp_call_single_queue() local 363 func = CSD_TYPE(csd) == CSD_TYPE_TTWU ? in __smp_call_single_queue() 364 sched_ttwu_pending : csd->func; in __smp_call_single_queue() 366 trace_csd_queue_cpu(cpu, _RET_IP_, func, csd); in __smp_call_single_queue() [all …]
|
D | async.c | 72 async_func_t func; member 124 entry->func, task_pid_nr(current)); in async_run_entry_fn() 127 entry->func(entry->data, entry->cookie); in async_run_entry_fn() 130 (long long)entry->cookie, entry->func, in async_run_entry_fn() 148 static async_cookie_t __async_schedule_node_domain(async_func_t func, in __async_schedule_node_domain() argument 159 entry->func = func; in __async_schedule_node_domain() 198 async_cookie_t async_schedule_node_domain(async_func_t func, void *data, in async_schedule_node_domain() argument 219 func(data, newcookie); in async_schedule_node_domain() 223 return __async_schedule_node_domain(func, data, node, domain, entry); in async_schedule_node_domain() 240 async_cookie_t async_schedule_node(async_func_t func, void *data, int node) in async_schedule_node() argument [all …]
|
D | up.c | 12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, in smp_call_function_single() 21 func(info); in smp_call_function_single() 33 csd->func(csd->info); in smp_call_function_single_async() 43 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, in on_each_cpu_cond_mask() argument 51 func(info); in on_each_cpu_cond_mask() 58 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) in smp_call_on_cpu() 67 ret = func(par); in smp_call_on_cpu()
|
D | static_call_inline.c | 134 void __static_call_update(struct static_call_key *key, void *tramp, void *func) in __static_call_update() argument 142 if (key->func == func) in __static_call_update() 145 key->func = func; in __static_call_update() 147 arch_static_call_transform(NULL, tramp, func, false); in __static_call_update() 209 arch_static_call_transform(site_addr, NULL, func, in __static_call_update() 286 arch_static_call_transform(site_addr, NULL, key->func, in __static_call_init() 530 int (*func)(int); member 546 if (scd->func) in test_static_call_init() 547 static_call_update(sc_selftest, scd->func); in test_static_call_init()
|
D | task_work.c | 119 return cb->func == data; in task_work_func_match() 134 task_work_cancel_func(struct task_struct *task, task_work_func_t func) in task_work_cancel_func() argument 136 return task_work_cancel_match(task, task_work_func_match, func); in task_work_cancel_func() 204 work->func(work); in task_work_run()
|
/kernel/trace/ |
D | trace_functions.c | 65 ops->func = function_trace_call; in ftrace_allocate_ftrace_ops() 134 ftrace_func_t func; in function_trace_init() local 143 func = select_trace_function(func_flags.val); in function_trace_init() 144 if (!func) in function_trace_init() 150 ftrace_init_array_ops(tr, func); in function_trace_init() 371 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) }, 398 ftrace_func_t func; in func_set_flag() local 410 func = select_trace_function(new_flags); in func_set_flag() 411 if (!func) in func_set_flag() 415 if (tr->ops->func == func) in func_set_flag() [all …]
|
D | bpf_trace.c | 162 .func = bpf_override_return, 188 .func = bpf_probe_read_user, 225 .func = bpf_probe_read_user_str, 240 .func = bpf_probe_read_kernel, 275 .func = bpf_probe_read_kernel_str, 295 .func = bpf_probe_read_compat, 314 .func = bpf_probe_read_compat_str, 349 .func = bpf_probe_write_user, 396 .func = bpf_trace_printk, 451 .func = bpf_trace_vprintk, [all …]
|
D | ftrace.c | 88 .func = ftrace_stub, 137 .func = ftrace_ops_list_func, 154 .func = ftrace_ops_nop_func, 209 ftrace_func_t func; in update_ftrace_function() local 221 func = ftrace_stub; in update_ftrace_function() 230 func = ftrace_ops_get_list_func(ftrace_ops_list); in update_ftrace_function() 235 func = ftrace_ops_list_func; in update_ftrace_function() 241 if (ftrace_trace_function == func) in update_ftrace_function() 248 if (func == ftrace_ops_list_func) { in update_ftrace_function() 249 ftrace_trace_function = func; in update_ftrace_function() [all …]
|
D | fgraph.c | 69 ftrace_push_return_trace(unsigned long ret, unsigned long func, in ftrace_push_return_trace() argument 98 current->ret_stack[index].func = func; in ftrace_push_return_trace() 123 int function_graph_enter(unsigned long ret, unsigned long func, in function_graph_enter() argument 139 trace.func = func; in function_graph_enter() 142 if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) in function_graph_enter() 195 (void *)current->ret_stack[index].func, in ftrace_pop_return_trace() 203 trace->func = current->ret_stack[index].func; in ftrace_pop_return_trace() 372 .func = ftrace_graph_func, 486 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) in ftrace_graph_entry_test()
|
D | trace_event_perf.c | 250 char *func = NULL; in perf_kprobe_init() local 254 func = strndup_user(u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init() 256 if (IS_ERR(func)) { in perf_kprobe_init() 257 ret = PTR_ERR(func); in perf_kprobe_init() 261 if (func[0] == '\0') { in perf_kprobe_init() 262 kfree(func); in perf_kprobe_init() 263 func = NULL; in perf_kprobe_init() 268 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init() 281 kfree(func); in perf_kprobe_init() 485 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register()
|
D | trace_entries.h | 81 __field_packed( unsigned long, graph_ent, func ) 85 F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth) 97 __field_packed( unsigned long, ret, func ) 106 (void *)__entry->func, __entry->depth, 119 __field_packed( unsigned long, ret, func ) 127 (void *)__entry->func, __entry->depth, 328 __array( char, func, TRACE_FUNC_SIZE+1 ) 336 __entry->func, __entry->file, __entry->correct,
|
D | trace_functions_graph.c | 152 if (ftrace_graph_notrace_addr(trace->func)) { in trace_graph_entry() 200 .func = ip, in __trace_graph_function() 204 .func = ip, in __trace_graph_function() 484 curr->graph_ent.func != next->ret.func) in get_return_for_leaf() 633 bool leaf, void *func, bool hex_format) in print_graph_retval() argument 656 func, retval); in print_graph_retval() 659 func, err_code); in print_graph_retval() 663 func, retval); in print_graph_retval() 666 func, err_code); in print_graph_retval() 727 print_graph_retval(s, graph_ret->retval, true, (void *)call->func, in print_graph_entry_leaf() [all …]
|
D | trace_selftest.c | 152 .func = trace_selftest_test_probe1_func, 156 .func = trace_selftest_test_probe2_func, 160 .func = trace_selftest_test_probe3_func, 256 dyn_ops->func = trace_selftest_test_dyn_func; in trace_selftest_ops() 358 int (*func)(void)) in trace_selftest_startup_dynamic_tracing() 373 func(); in trace_selftest_startup_dynamic_tracing() 407 func(); in trace_selftest_startup_dynamic_tracing() 481 .func = trace_selftest_test_recursion_func, 486 .func = trace_selftest_test_recursion_safe_func, 572 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) argument [all …]
|
/kernel/rcu/ |
D | tiny.c | 88 unsigned long offset = (unsigned long)head->func; in rcu_reclaim_tiny() 99 f = head->func; in rcu_reclaim_tiny() 101 WRITE_ONCE(head->func, (rcu_callback_t)0L); in rcu_reclaim_tiny() 171 void call_rcu(struct rcu_head *head, rcu_callback_t func) in call_rcu() argument 178 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in call_rcu() 182 if (!__is_kvfree_rcu_offset((unsigned long)head->func)) in call_rcu() 183 WRITE_ONCE(head->func, tiny_rcu_leak_callback); in call_rcu() 187 head->func = func; in call_rcu()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 766 if (kp->func == kdb_exec_defcmd) { in kdb_defcmd() 789 mp->func = kdb_exec_defcmd; in kdb_defcmd() 1097 result = (*tp->func)(argc-1, (const char **)argv); in kdb_parse() 2642 cmd->name, cmd->func, cmd->help); in kdb_register() 2680 .func = kdb_md, 2687 .func = kdb_md, 2693 .func = kdb_md, 2699 .func = kdb_md, 2705 .func = kdb_mm, 2711 .func = kdb_go, [all …]
|
D | kdb_bp.c | 527 .func = kdb_bp, 533 .func = kdb_bp, 539 .func = kdb_bc, 545 .func = kdb_bc, 551 .func = kdb_bc, 557 .func = kdb_ss, 567 .func = kdb_bp,
|
/kernel/sched/ |
D | cpufreq.c | 31 void (*func)(struct update_util_data *data, u64 time, in cpufreq_add_update_util_hook() 34 if (WARN_ON(!data || !func)) in cpufreq_add_update_util_hook() 40 data->func = func; in cpufreq_add_update_util_hook()
|