Home
last modified time | relevance | path

Searched refs:ops (Results 1 – 25 of 57) sorted by relevance

123

/kernel/trace/
Dtrace_functions.c39 struct ftrace_ops *ops; in allocate_ftrace_ops() local
41 ops = kzalloc(sizeof(*ops), GFP_KERNEL); in allocate_ftrace_ops()
42 if (!ops) in allocate_ftrace_ops()
46 ops->func = function_trace_call; in allocate_ftrace_ops()
47 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID; in allocate_ftrace_ops()
49 tr->ops = ops; in allocate_ftrace_ops()
50 ops->private = tr; in allocate_ftrace_ops()
71 ftrace_create_filter_files(tr->ops, parent); in ftrace_create_function_files()
78 ftrace_destroy_filter_files(tr->ops); in ftrace_destroy_function_files()
79 kfree(tr->ops); in ftrace_destroy_function_files()
[all …]
Dftrace.c98 static bool ftrace_pids_enabled(struct ftrace_ops *ops) in ftrace_pids_enabled() argument
102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) in ftrace_pids_enabled()
105 tr = ops->private; in ftrace_pids_enabled()
110 static void ftrace_update_trampoline(struct ftrace_ops *ops);
133 static inline void ftrace_ops_init(struct ftrace_ops *ops) in ftrace_ops_init() argument
136 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { in ftrace_ops_init()
137 mutex_init(&ops->local_hash.regex_lock); in ftrace_ops_init()
138 ops->func_hash = &ops->local_hash; in ftrace_ops_init()
139 ops->flags |= FTRACE_OPS_FL_INITIALIZED; in ftrace_ops_init()
172 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) in ftrace_ops_get_list_func() argument
[all …]
Dtrace_dynevent.c21 int dyn_event_register(struct dyn_event_operations *ops) in dyn_event_register() argument
23 if (!ops || !ops->create || !ops->show || !ops->is_busy || in dyn_event_register()
24 !ops->free || !ops->match) in dyn_event_register()
27 INIT_LIST_HEAD(&ops->list); in dyn_event_register()
29 list_add_tail(&ops->list, &dyn_event_ops_list); in dyn_event_register()
63 if (type && type != pos->ops) in dyn_event_release()
65 if (!pos->ops->match(system, event, in dyn_event_release()
69 ret = pos->ops->free(pos); in dyn_event_release()
80 struct dyn_event_operations *ops; in create_dyn_event() local
87 list_for_each_entry(ops, &dyn_event_ops_list, list) { in create_dyn_event()
[all …]
Dftrace_internal.h34 int ftrace_startup(struct ftrace_ops *ops, int command);
35 int ftrace_shutdown(struct ftrace_ops *ops, int command);
36 int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
40 int __register_ftrace_function(struct ftrace_ops *ops);
41 int __unregister_ftrace_function(struct ftrace_ops *ops);
43 # define ftrace_startup(ops, command) \ argument
45 int ___ret = __register_ftrace_function(ops); \
47 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
50 # define ftrace_shutdown(ops, command) \ argument
52 int ___ret = __unregister_ftrace_function(ops); \
[all …]
Dtrace_events_trigger.c70 data->ops->func(data, rec, event); in event_triggers_call()
80 data->ops->func(data, rec, event); in event_triggers_call()
108 data->ops->func(data, NULL, NULL); in event_triggers_post_call()
163 data->ops->print(m, data->ops, data); in trigger_show()
411 int event_trigger_init(struct event_trigger_ops *ops, in event_trigger_init() argument
429 event_trigger_free(struct event_trigger_ops *ops, in event_trigger_free() argument
483 if (data->ops->free) in clear_event_triggers()
484 data->ops->free(data->ops, data); in clear_event_triggers()
534 static int register_trigger(char *glob, struct event_trigger_ops *ops, in register_trigger() argument
550 if (data->ops->init) { in register_trigger()
[all …]
Dtrace_dynevent.h51 int dyn_event_register(struct dyn_event_operations *ops);
63 struct dyn_event_operations *ops; member
69 int dyn_event_init(struct dyn_event *ev, struct dyn_event_operations *ops) in dyn_event_init() argument
71 if (!ev || !ops) in dyn_event_init()
75 ev->ops = ops; in dyn_event_init()
83 if (!ev || !ev->ops) in dyn_event_add()
Dtrace_event_perf.c435 struct ftrace_ops *ops, struct pt_regs *pt_regs) in perf_ftrace_function_call() argument
443 if ((unsigned long)ops->private != smp_processor_id()) in perf_ftrace_function_call()
446 event = container_of(ops, struct perf_event, ftrace_ops); in perf_ftrace_function_call()
478 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_register() local
480 ops->flags = FTRACE_OPS_FL_RCU; in perf_ftrace_function_register()
481 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register()
482 ops->private = (void *)(unsigned long)nr_cpu_ids; in perf_ftrace_function_register()
484 return register_ftrace_function(ops); in perf_ftrace_function_register()
489 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_unregister() local
490 int ret = unregister_ftrace_function(ops); in perf_ftrace_function_unregister()
[all …]
Dtracing_map.c368 if (elt->map->ops && elt->map->ops->elt_clear) in tracing_map_elt_clear()
369 elt->map->ops->elt_clear(elt); in tracing_map_elt_clear()
391 if (elt->map->ops && elt->map->ops->elt_free) in tracing_map_elt_free()
392 elt->map->ops->elt_free(elt); in tracing_map_elt_free()
437 if (map->ops && map->ops->elt_alloc) { in tracing_map_elt_alloc()
438 err = map->ops->elt_alloc(elt); in tracing_map_elt_alloc()
457 if (map->ops && map->ops->elt_init) in get_free_elt()
458 map->ops->elt_init(elt); in get_free_elt()
762 const struct tracing_map_ops *ops, in tracing_map_create() argument
781 map->ops = ops; in tracing_map_create()
Dtrace.h313 struct ftrace_ops *ops; member
1088 struct ftrace_probe_ops *ops,
1090 int (*init)(struct ftrace_probe_ops *ops,
1094 void (*free)(struct ftrace_probe_ops *ops,
1099 struct ftrace_probe_ops *ops,
1118 struct ftrace_probe_ops *ops, void *data);
1121 struct ftrace_probe_ops *ops);
1127 void ftrace_create_filter_files(struct ftrace_ops *ops,
1129 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1149 #define ftrace_create_filter_files(ops, parent) do { } while (0) argument
[all …]
Dtrace_sched_switch.c90 static void tracing_start_sched_switch(int ops) in tracing_start_sched_switch() argument
95 switch (ops) { in tracing_start_sched_switch()
110 static void tracing_stop_sched_switch(int ops) in tracing_stop_sched_switch() argument
114 switch (ops) { in tracing_stop_sched_switch()
Dtrace_events_filter.c43 static const char * ops[] = { OPS }; variable
1199 for (op = 0; ops[op]; op++) { in parse_pred()
1201 if (strncmp(str + i, ops[op], strlen(ops[op])) == 0) in parse_pred()
1205 if (!ops[op]) { in parse_pred()
1210 i += strlen(ops[op]); in parse_pred()
1902 struct ftrace_ops *ops; member
1928 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter, in ftrace_function_set_regexp() argument
1934 ret = ftrace_set_filter(ops, re, len, reset); in ftrace_function_set_regexp()
1936 ret = ftrace_set_notrace(ops, re, len, reset); in ftrace_function_set_regexp()
1960 ret = ftrace_function_set_regexp(data->ops, filter, *reset, in __ftrace_function_set_filter()
[all …]
/kernel/livepatch/
Dpatch.c27 struct klp_ops *ops; in klp_find_ops() local
30 list_for_each_entry(ops, &klp_ops, node) { in klp_find_ops()
31 func = list_first_entry(&ops->func_stack, struct klp_func, in klp_find_ops()
34 return ops; in klp_find_ops()
45 struct klp_ops *ops; in klp_ftrace_handler() local
49 ops = container_of(fops, struct klp_ops, fops); in klp_ftrace_handler()
57 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, in klp_ftrace_handler()
104 if (&func->stack_node == &ops->func_stack) in klp_ftrace_handler()
137 struct klp_ops *ops; in klp_unpatch_func() local
144 ops = klp_find_ops(func->old_func); in klp_unpatch_func()
[all …]
/kernel/dma/
Dmapping.c154 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_sgtable_attrs() local
156 if (dma_is_direct(ops)) in dma_get_sgtable_attrs()
159 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
161 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
234 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_can_mmap() local
236 if (dma_is_direct(ops)) { in dma_can_mmap()
242 return ops->mmap != NULL; in dma_can_mmap()
263 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_attrs() local
265 if (dma_is_direct(ops)) in dma_mmap_attrs()
268 if (!ops->mmap) in dma_mmap_attrs()
[all …]
Dcoherent.c354 rmem->ops = &rmem_dma_ops; in rmem_dma_setup()
362 const struct reserved_mem_ops *ops; in dma_init_reserved_memory() local
368 ops = dma_reserved_default_memory->ops; in dma_init_reserved_memory()
374 ret = ops->device_init(dma_reserved_default_memory, NULL); in dma_init_reserved_memory()
/kernel/irq/
Dmsi.c142 struct msi_domain_ops *ops = info->ops; in msi_domain_alloc() local
143 irq_hw_number_t hwirq = ops->get_hwirq(info, arg); in msi_domain_alloc()
156 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); in msi_domain_alloc()
158 if (ops->msi_free) { in msi_domain_alloc()
160 ops->msi_free(domain, info, virq + i); in msi_domain_alloc()
176 if (info->ops->msi_free) { in msi_domain_free()
178 info->ops->msi_free(domain, info, virq + i); in msi_domain_free()
247 struct msi_domain_ops *ops = info->ops; in msi_domain_update_dom_ops() local
249 if (ops == NULL) { in msi_domain_update_dom_ops()
250 info->ops = &msi_domain_ops_default; in msi_domain_update_dom_ops()
[all …]
Dirqdomain.c94 fwid->fwnode.ops = &irqchip_fwnode_ops; in __irq_domain_alloc_fwnode()
132 const struct irq_domain_ops *ops, in __irq_domain_add() argument
218 domain->ops = ops; in __irq_domain_add()
321 const struct irq_domain_ops *ops, in irq_domain_add_simple() argument
326 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); in irq_domain_add_simple()
365 const struct irq_domain_ops *ops, in irq_domain_add_legacy() argument
371 first_hwirq + size, 0, ops, host_data); in irq_domain_add_legacy()
402 if (h->ops->select && fwspec->param_count) in irq_find_matching_fwspec()
403 rc = h->ops->select(h, fwspec, bus_token); in irq_find_matching_fwspec()
404 else if (h->ops->match) in irq_find_matching_fwspec()
[all …]
/kernel/time/
Dposix-clock.c48 if (clk->ops.read) in posix_clock_read()
49 err = clk->ops.read(clk, fp->f_flags, buf, count); in posix_clock_read()
64 if (clk->ops.poll) in posix_clock_poll()
65 result = clk->ops.poll(clk, fp, wait); in posix_clock_poll()
81 if (clk->ops.ioctl) in posix_clock_ioctl()
82 err = clk->ops.ioctl(clk, cmd, arg); in posix_clock_ioctl()
99 if (clk->ops.ioctl) in posix_clock_compat_ioctl()
100 err = clk->ops.ioctl(clk, cmd, arg); in posix_clock_compat_ioctl()
120 if (clk->ops.open) in posix_clock_open()
121 err = clk->ops.open(clk, fp->f_mode); in posix_clock_open()
[all …]
/kernel/bpf/
Doffload.c36 const struct bpf_prog_offload_ops *ops; member
135 ret = offload->offdev->ops->prepare(prog); in bpf_prog_offload_verifier_prep()
152 ret = offload->offdev->ops->insn_hook(env, insn_idx, in bpf_prog_offload_verify_insn()
167 if (offload->offdev->ops->finalize) in bpf_prog_offload_finalize()
168 ret = offload->offdev->ops->finalize(env); in bpf_prog_offload_finalize()
181 const struct bpf_prog_offload_ops *ops; in bpf_prog_offload_replace_insn() local
188 ops = offload->offdev->ops; in bpf_prog_offload_replace_insn()
189 if (!offload->opt_failed && ops->replace_insn) in bpf_prog_offload_replace_insn()
190 ret = ops->replace_insn(env, off, insn); in bpf_prog_offload_replace_insn()
205 if (!offload->opt_failed && offload->offdev->ops->remove_insns) in bpf_prog_offload_remove_insns()
[all …]
Dmap_in_map.c44 if (inner_map->ops == &array_map_ops) in bpf_map_meta_alloc()
61 inner_map_meta->ops = inner_map->ops; in bpf_map_meta_alloc()
62 if (inner_map->ops == &array_map_ops) { in bpf_map_meta_alloc()
Dsyscall.c102 const struct bpf_map_ops *ops; in find_and_alloc_map() local
110 ops = bpf_map_types[type]; in find_and_alloc_map()
111 if (!ops) in find_and_alloc_map()
114 if (ops->map_alloc_check) { in find_and_alloc_map()
115 err = ops->map_alloc_check(attr); in find_and_alloc_map()
120 ops = &bpf_map_offload_ops; in find_and_alloc_map()
121 map = ops->map_alloc(attr); in find_and_alloc_map()
124 map->ops = ops; in find_and_alloc_map()
310 map->ops->map_free(map); in bpf_map_free_deferred()
317 if (map->ops->map_release_uref) in bpf_map_put_uref()
[all …]
Dhelpers.c29 return (unsigned long) map->ops->map_lookup_elem(map, key); in BPF_CALL_2()
45 return map->ops->map_update_elem(map, key, value, flags); in BPF_CALL_4()
62 return map->ops->map_delete_elem(map, key); in BPF_CALL_2()
76 return map->ops->map_push_elem(map, value, flags); in BPF_CALL_3()
91 return map->ops->map_pop_elem(map, value); in BPF_CALL_2()
104 return map->ops->map_peek_elem(map, value); in BPF_CALL_2()
/kernel/power/
Dhibernate.c79 void hibernation_set_ops(const struct platform_hibernation_ops *ops) in hibernation_set_ops() argument
81 if (ops && !(ops->begin && ops->end && ops->pre_snapshot in hibernation_set_ops()
82 && ops->prepare && ops->finish && ops->enter && ops->pre_restore in hibernation_set_ops()
83 && ops->restore_cleanup && ops->leave)) { in hibernation_set_ops()
88 hibernation_ops = ops; in hibernation_set_ops()
89 if (ops) in hibernation_set_ops()
Dsuspend.c77 void s2idle_set_ops(const struct platform_s2idle_ops *ops) in s2idle_set_ops() argument
80 s2idle_ops = ops; in s2idle_set_ops()
203 void suspend_set_ops(const struct platform_suspend_ops *ops) in suspend_set_ops() argument
207 suspend_ops = ops; in suspend_set_ops()
/kernel/
Dparams.c137 !(params[i].ops->flags & KERNEL_PARAM_OPS_FL_NOARG)) in parse_one()
140 params[i].ops->set); in parse_one()
143 err = params[i].ops->set(val, &params[i]); in parse_one()
449 arr->elemsize, arr->ops->set, kp->level, in param_array_set()
465 ret = arr->ops->get(buffer + off, &p); in param_array_get()
479 if (arr->ops->free) in param_array_free()
481 arr->ops->free(arr->elem + arr->elemsize * i); in param_array_free()
544 if (!attribute->param->ops->get) in param_attr_show()
548 count = attribute->param->ops->get(buf, attribute->param); in param_attr_show()
561 if (!attribute->param->ops->set) in param_attr_store()
[all …]
/kernel/locking/
Dlockdep_internals.h230 unsigned long ops = 0; in debug_class_ops_read() local
234 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); in debug_class_ops_read()
235 return ops; in debug_class_ops_read()

123