Home
last modified time | relevance | path

Searched refs:func (Results 1 – 25 of 76) sorted by relevance

1234

/kernel/bpf/
Dbpf_lsm.c33 #define LSM_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_lsm_##NAME)
45 BTF_ID(func, bpf_lsm_sk_alloc_security) in BTF_SET_END()
46 BTF_ID(func, bpf_lsm_sk_free_security) in BTF_SET_END()
54 BTF_ID(func, bpf_lsm_sock_graft) in BTF_SET_END()
55 BTF_ID(func, bpf_lsm_inet_csk_clone) in BTF_SET_END()
56 BTF_ID(func, bpf_lsm_inet_conn_established) in BTF_SET_END()
66 BTF_ID(func, bpf_lsm_socket_post_create) in BTF_SET_END()
67 BTF_ID(func, bpf_lsm_socket_socketpair) in BTF_SET_END()
130 .func = bpf_bprm_opts_set,
151 .func = bpf_ima_inode_hash,
[all …]
Dhelpers.c42 .func = bpf_map_lookup_elem,
59 .func = bpf_map_update_elem,
77 .func = bpf_map_delete_elem,
91 .func = bpf_map_push_elem,
106 .func = bpf_map_pop_elem,
119 .func = bpf_map_peek_elem,
133 .func = bpf_map_lookup_percpu_elem,
143 .func = bpf_user_rnd_u32,
154 .func = bpf_get_smp_processor_id,
165 .func = bpf_get_numa_node_id,
[all …]
Dtrampoline.c181 mod = __module_text_address((unsigned long) tr->func.addr); in bpf_trampoline_module_get()
197 void *ip = tr->func.addr; in unregister_fentry()
200 if (tr->func.ftrace_managed) in unregister_fentry()
213 void *ip = tr->func.addr; in modify_fentry()
216 if (tr->func.ftrace_managed) { in modify_fentry()
230 void *ip = tr->func.addr; in register_fentry()
238 tr->func.ftrace_managed = true; in register_fentry()
244 if (tr->func.ftrace_managed) { in register_fentry()
471 &tr->func.model, tr->flags, tlinks, in bpf_trampoline_update()
472 tr->func.addr); in bpf_trampoline_update()
[all …]
Dringbuf.c449 .func = bpf_ringbuf_reserve,
491 .func = bpf_ringbuf_submit,
504 .func = bpf_ringbuf_discard,
530 .func = bpf_ringbuf_output,
559 .func = bpf_ringbuf_query,
597 .func = bpf_ringbuf_reserve_dynptr,
618 .func = bpf_ringbuf_submit_dynptr,
637 .func = bpf_ringbuf_discard_dynptr,
770 .func = bpf_user_ringbuf_drain,
/kernel/livepatch/
Dpatch.c28 struct klp_func *func; in klp_find_ops() local
31 func = list_first_entry(&ops->func_stack, struct klp_func, in klp_find_ops()
33 if (func->old_func == old_func) in klp_find_ops()
46 struct klp_func *func; in klp_ftrace_handler() local
62 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, in klp_ftrace_handler()
70 if (WARN_ON_ONCE(!func)) in klp_ftrace_handler()
86 if (unlikely(func->transition)) { in klp_ftrace_handler()
106 func = list_entry_rcu(func->stack_node.next, in klp_ftrace_handler()
109 if (&func->stack_node == &ops->func_stack) in klp_ftrace_handler()
118 if (func->nop) in klp_ftrace_handler()
[all …]
Dcore.c89 struct klp_func *func; in klp_find_func() local
91 klp_for_each_func(obj, func) { in klp_find_func()
92 if ((strcmp(old_func->old_name, func->old_name) == 0) && in klp_find_func()
93 (old_func->old_sympos == func->old_sympos)) { in klp_find_func()
94 return func; in klp_find_func()
458 struct klp_func *func);
485 static void klp_free_func_nop(struct klp_func *func) in klp_free_func_nop() argument
487 kfree(func->old_name); in klp_free_func_nop()
488 kfree(func); in klp_free_func_nop()
494 struct klp_func *func; in klp_alloc_func_nop() local
[all …]
Dtransition.c71 struct klp_func *func; in klp_complete_transition() local
101 klp_for_each_func(obj, func) in klp_complete_transition()
102 func->transition = false; in klp_complete_transition()
192 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, in klp_check_stack_func() argument
207 func_addr = (unsigned long)func->new_func; in klp_check_stack_func()
208 func_size = func->new_size; in klp_check_stack_func()
214 ops = klp_find_ops(func->old_func); in klp_check_stack_func()
218 func_addr = (unsigned long)func->old_func; in klp_check_stack_func()
219 func_size = func->old_size; in klp_check_stack_func()
224 prev = list_next_entry(func, stack_node); in klp_check_stack_func()
[all …]
/kernel/
Dtracepoint.c175 for (i = 0; funcs[i].func; i++) in debug_print_probes()
176 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); in debug_print_probes()
188 if (WARN_ON(!tp_func->func)) in func_add()
195 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { in func_add()
196 if (old[iter_probes].func == tp_stub_func) in func_add()
198 if (old[iter_probes].func == tp_func->func && in func_add()
210 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { in func_add()
211 if (old[iter_probes].func == tp_stub_func) in func_add()
226 new[nr_probes].func = NULL; in func_add()
245 if (tp_func->func) { in func_remove()
[all …]
Dasync.c72 async_func_t func; member
124 entry->func, task_pid_nr(current)); in async_run_entry_fn()
127 entry->func(entry->data, entry->cookie); in async_run_entry_fn()
130 (long long)entry->cookie, entry->func, in async_run_entry_fn()
148 static async_cookie_t __async_schedule_node_domain(async_func_t func, in __async_schedule_node_domain() argument
159 entry->func = func; in __async_schedule_node_domain()
198 async_cookie_t async_schedule_node_domain(async_func_t func, void *data, in async_schedule_node_domain() argument
219 func(data, newcookie); in async_schedule_node_domain()
223 return __async_schedule_node_domain(func, data, node, domain, entry); in async_schedule_node_domain()
240 async_cookie_t async_schedule_node(async_func_t func, void *data, int node) in async_schedule_node() argument
[all …]
Dup.c12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, in smp_call_function_single()
21 func(info); in smp_call_function_single()
33 csd->func(csd->info); in smp_call_function_single_async()
43 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, in on_each_cpu_cond_mask() argument
51 func(info); in on_each_cpu_cond_mask()
58 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) in smp_call_on_cpu()
67 ret = func(par); in smp_call_on_cpu()
Dsmp.c225 __this_cpu_write(cur_csd_func, csd->func); in __csd_lock_record()
366 cpu, csd->func, csd->info); in csd_lock_wait_toolong()
520 smp_call_func_t func = csd->func; in generic_exec_single() local
531 func(info); in generic_exec_single()
609 csd->func); in __flush_smp_call_function_queue()
631 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue() local
641 func(info); in __flush_smp_call_function_queue()
671 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue() local
676 func(info); in __flush_smp_call_function_queue()
738 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, in smp_call_function_single() argument
[all …]
Dstatic_call_inline.c123 void __static_call_update(struct static_call_key *key, void *tramp, void *func) in __static_call_update() argument
131 if (key->func == func) in __static_call_update()
134 key->func = func; in __static_call_update()
136 arch_static_call_transform(NULL, tramp, func, false); in __static_call_update()
198 arch_static_call_transform(site_addr, NULL, func, in __static_call_update()
275 arch_static_call_transform(site_addr, NULL, key->func, in __static_call_init()
517 int (*func)(int); member
533 if (scd->func) in test_static_call_init()
534 static_call_update(sc_selftest, scd->func); in test_static_call_init()
Dtask_work.c118 return cb->func == data; in task_work_func_match()
133 task_work_cancel(struct task_struct *task, task_work_func_t func) in task_work_cancel() argument
135 return task_work_cancel_match(task, task_work_func_match, func); in task_work_cancel()
179 work->func(work); in task_work_run()
/kernel/rcu/
Dtiny.c88 unsigned long offset = (unsigned long)head->func; in rcu_reclaim_tiny()
99 f = head->func; in rcu_reclaim_tiny()
100 WRITE_ONCE(head->func, (rcu_callback_t)0L); in rcu_reclaim_tiny()
170 void call_rcu(struct rcu_head *head, rcu_callback_t func) in call_rcu() argument
177 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in call_rcu()
181 if (!__is_kvfree_rcu_offset((unsigned long)head->func)) in call_rcu()
182 WRITE_ONCE(head->func, tiny_rcu_leak_callback); in call_rcu()
186 head->func = func; in call_rcu()
249 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) in kvfree_call_rcu() argument
252 void *ptr = (void *) head - (unsigned long) func; in kvfree_call_rcu()
[all …]
/kernel/trace/
Dtrace_functions.c65 ops->func = function_trace_call; in ftrace_allocate_ftrace_ops()
134 ftrace_func_t func; in function_trace_init() local
143 func = select_trace_function(func_flags.val); in function_trace_init()
144 if (!func) in function_trace_init()
150 ftrace_init_array_ops(tr, func); in function_trace_init()
371 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
398 ftrace_func_t func; in func_set_flag() local
410 func = select_trace_function(new_flags); in func_set_flag()
411 if (!func) in func_set_flag()
415 if (tr->ops->func == func) in func_set_flag()
[all …]
Dfgraph.c68 ftrace_push_return_trace(unsigned long ret, unsigned long func, in ftrace_push_return_trace() argument
97 current->ret_stack[index].func = func; in ftrace_push_return_trace()
122 int function_graph_enter(unsigned long ret, unsigned long func, in function_graph_enter() argument
138 trace.func = func; in function_graph_enter()
141 if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) in function_graph_enter()
194 (void *)current->ret_stack[index].func, in ftrace_pop_return_trace()
202 trace->func = current->ret_stack[index].func; in ftrace_pop_return_trace()
347 .func = ftrace_graph_func,
461 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) in ftrace_graph_entry_test()
Dbpf_trace.c154 .func = bpf_override_return,
180 .func = bpf_probe_read_user,
217 .func = bpf_probe_read_user_str,
243 .func = bpf_probe_read_kernel,
278 .func = bpf_probe_read_kernel_str,
298 .func = bpf_probe_read_compat,
317 .func = bpf_probe_read_compat_str,
352 .func = bpf_probe_write_user,
399 .func = bpf_trace_printk,
454 .func = bpf_trace_vprintk,
[all …]
Dftrace.c84 .func = ftrace_stub,
178 ftrace_func_t func; in update_ftrace_function() local
190 func = ftrace_stub; in update_ftrace_function()
199 func = ftrace_ops_get_list_func(ftrace_ops_list); in update_ftrace_function()
204 func = ftrace_ops_list_func; in update_ftrace_function()
210 if (ftrace_trace_function == func) in update_ftrace_function()
217 if (func == ftrace_ops_list_func) { in update_ftrace_function()
218 ftrace_trace_function = func; in update_ftrace_function()
252 ftrace_trace_function = func; in update_ftrace_function()
329 ops->saved_func = ops->func; in __register_ftrace_function()
[all …]
Dtrace_event_perf.c250 char *func = NULL; in perf_kprobe_init() local
254 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL); in perf_kprobe_init()
255 if (!func) in perf_kprobe_init()
258 func, u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init()
265 if (func[0] == '\0') { in perf_kprobe_init()
266 kfree(func); in perf_kprobe_init()
267 func = NULL; in perf_kprobe_init()
272 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init()
285 kfree(func); in perf_kprobe_init()
489 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register()
Dtrace_entries.h81 __field_packed( unsigned long, graph_ent, func )
85 F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
95 __field_packed( unsigned long, ret, func )
103 (void *)__entry->func, __entry->depth,
302 __array( char, func, TRACE_FUNC_SIZE+1 )
310 __entry->func, __entry->file, __entry->correct,
Dtrace_selftest.c152 .func = trace_selftest_test_probe1_func,
156 .func = trace_selftest_test_probe2_func,
160 .func = trace_selftest_test_probe3_func,
256 dyn_ops->func = trace_selftest_test_dyn_func; in trace_selftest_ops()
358 int (*func)(void)) in trace_selftest_startup_dynamic_tracing()
373 func(); in trace_selftest_startup_dynamic_tracing()
407 func(); in trace_selftest_startup_dynamic_tracing()
481 .func = trace_selftest_test_recursion_func,
486 .func = trace_selftest_test_recursion_safe_func,
572 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) argument
[all …]
Dtrace_functions_graph.c146 if (ftrace_graph_notrace_addr(trace->func)) { in trace_graph_entry()
194 .func = ip, in __trace_graph_function()
198 .func = ip, in __trace_graph_function()
478 curr->graph_ent.func != next->ret.func) in get_return_for_leaf()
666 trace_seq_printf(s, "%ps();\n", (void *)call->func); in print_graph_entry_leaf()
668 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, in print_graph_entry_leaf()
694 cpu_data->enter_funcs[call->depth] = call->func; in print_graph_entry_nested()
704 trace_seq_printf(s, "%ps() {\n", (void *)call->func); in print_graph_entry_nested()
872 if (check_irq_entry(iter, flags, call->func, call->depth)) in print_graph_entry()
875 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); in print_graph_entry()
[all …]
/kernel/debug/kdb/
Dkdb_main.c766 if (kp->func == kdb_exec_defcmd) { in kdb_defcmd()
789 mp->func = kdb_exec_defcmd; in kdb_defcmd()
1097 result = (*tp->func)(argc-1, (const char **)argv); in kdb_parse()
2642 cmd->name, cmd->func, cmd->help); in kdb_register()
2680 .func = kdb_md,
2687 .func = kdb_md,
2693 .func = kdb_md,
2699 .func = kdb_md,
2705 .func = kdb_mm,
2711 .func = kdb_go,
[all …]
Dkdb_bp.c527 .func = kdb_bp,
533 .func = kdb_bp,
539 .func = kdb_bc,
545 .func = kdb_bc,
551 .func = kdb_bc,
557 .func = kdb_ss,
567 .func = kdb_bp,
/kernel/sched/
Dcpufreq.c31 void (*func)(struct update_util_data *data, u64 time, in cpufreq_add_update_util_hook()
34 if (WARN_ON(!data || !func)) in cpufreq_add_update_util_hook()
40 data->func = func; in cpufreq_add_update_util_hook()

1234