/kernel/dma/ |
D | mapping.c | 110 const struct dma_map_ops *ops) in dma_go_direct() argument 112 if (likely(!ops)) in dma_go_direct() 129 const struct dma_map_ops *ops) in dma_alloc_direct() argument 131 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct() 135 const struct dma_map_ops *ops) in dma_map_direct() argument 137 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct() 144 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_page_attrs() local 152 if (dma_map_direct(dev, ops)) in dma_map_page_attrs() 155 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs() 165 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_page_attrs() local [all …]
|
D | ops_helpers.c | 64 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_alloc_pages() local 73 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() 87 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_free_pages() local 89 if (ops->unmap_page) in dma_common_free_pages() 90 ops->unmap_page(dev, dma_handle, size, dir, in dma_common_free_pages()
|
D | coherent.c | 380 rmem->ops = &rmem_dma_ops; in rmem_dma_setup() 388 const struct reserved_mem_ops *ops; in dma_init_reserved_memory() local 394 ops = dma_reserved_default_memory->ops; in dma_init_reserved_memory() 400 ret = ops->device_init(dma_reserved_default_memory, NULL); in dma_init_reserved_memory()
|
/kernel/trace/ |
D | trace_functions.c | 39 struct ftrace_ops *ops; in ftrace_allocate_ftrace_ops() local 45 ops = kzalloc(sizeof(*ops), GFP_KERNEL); in ftrace_allocate_ftrace_ops() 46 if (!ops) in ftrace_allocate_ftrace_ops() 50 ops->func = function_trace_call; in ftrace_allocate_ftrace_ops() 51 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID; in ftrace_allocate_ftrace_ops() 53 tr->ops = ops; in ftrace_allocate_ftrace_ops() 54 ops->private = tr; in ftrace_allocate_ftrace_ops() 61 kfree(tr->ops); in ftrace_free_ftrace_ops() 62 tr->ops = NULL; in ftrace_free_ftrace_ops() 75 if (!tr->ops) in ftrace_create_function_files() [all …]
|
D | ftrace.c | 96 static bool ftrace_pids_enabled(struct ftrace_ops *ops) in ftrace_pids_enabled() argument 100 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) in ftrace_pids_enabled() 103 tr = ops->private; in ftrace_pids_enabled() 108 static void ftrace_update_trampoline(struct ftrace_ops *ops); 131 static inline void ftrace_ops_init(struct ftrace_ops *ops) in ftrace_ops_init() argument 134 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { in ftrace_ops_init() 135 mutex_init(&ops->local_hash.regex_lock); in ftrace_ops_init() 136 ops->func_hash = &ops->local_hash; in ftrace_ops_init() 137 ops->flags |= FTRACE_OPS_FL_INITIALIZED; in ftrace_ops_init() 166 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) in ftrace_ops_get_list_func() argument [all …]
|
D | ftrace_internal.h | 12 int ftrace_startup(struct ftrace_ops *ops, int command); 13 int ftrace_shutdown(struct ftrace_ops *ops, int command); 14 int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs); 18 int __register_ftrace_function(struct ftrace_ops *ops); 19 int __unregister_ftrace_function(struct ftrace_ops *ops); 21 # define ftrace_startup(ops, command) \ argument 23 int ___ret = __register_ftrace_function(ops); \ 25 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 28 # define ftrace_shutdown(ops, command) \ argument 30 int ___ret = __unregister_ftrace_function(ops); \ [all …]
|
D | trace_dynevent.c | 21 int dyn_event_register(struct dyn_event_operations *ops) in dyn_event_register() argument 23 if (!ops || !ops->create || !ops->show || !ops->is_busy || in dyn_event_register() 24 !ops->free || !ops->match) in dyn_event_register() 27 INIT_LIST_HEAD(&ops->list); in dyn_event_register() 29 list_add_tail(&ops->list, &dyn_event_ops_list); in dyn_event_register() 63 if (type && type != pos->ops) in dyn_event_release() 65 if (!pos->ops->match(system, event, in dyn_event_release() 69 ret = pos->ops->free(pos); in dyn_event_release() 81 struct dyn_event_operations *ops; in create_dyn_event() local 88 list_for_each_entry(ops, &dyn_event_ops_list, list) { in create_dyn_event() [all …]
|
D | trace_events_trigger.c | 70 data->ops->func(data, rec, event); in event_triggers_call() 80 data->ops->func(data, rec, event); in event_triggers_call() 108 data->ops->func(data, NULL, NULL); in event_triggers_post_call() 164 data->ops->print(m, data->ops, data); in trigger_show() 418 int event_trigger_init(struct event_trigger_ops *ops, in event_trigger_init() argument 436 event_trigger_free(struct event_trigger_ops *ops, in event_trigger_free() argument 490 if (data->ops->free) in clear_event_triggers() 491 data->ops->free(data->ops, data); in clear_event_triggers() 541 static int register_trigger(char *glob, struct event_trigger_ops *ops, in register_trigger() argument 557 if (data->ops->init) { in register_trigger() [all …]
|
D | trace_dynevent.h | 51 int dyn_event_register(struct dyn_event_operations *ops); 63 struct dyn_event_operations *ops; member 69 int dyn_event_init(struct dyn_event *ev, struct dyn_event_operations *ops) in dyn_event_init() argument 71 if (!ev || !ops) in dyn_event_init() 75 ev->ops = ops; in dyn_event_init() 83 if (!ev || !ev->ops) in dyn_event_add()
|
D | trace_event_perf.c | 435 struct ftrace_ops *ops, struct pt_regs *pt_regs) in perf_ftrace_function_call() argument 443 if ((unsigned long)ops->private != smp_processor_id()) in perf_ftrace_function_call() 446 event = container_of(ops, struct perf_event, ftrace_ops); in perf_ftrace_function_call() 478 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_register() local 480 ops->flags = FTRACE_OPS_FL_RCU; in perf_ftrace_function_register() 481 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register() 482 ops->private = (void *)(unsigned long)nr_cpu_ids; in perf_ftrace_function_register() 484 return register_ftrace_function(ops); in perf_ftrace_function_register() 489 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_unregister() local 490 int ret = unregister_ftrace_function(ops); in perf_ftrace_function_unregister() [all …]
|
D | trace.h | 362 struct ftrace_ops *ops; member 1183 struct ftrace_probe_ops *ops, 1185 int (*init)(struct ftrace_probe_ops *ops, 1189 void (*free)(struct ftrace_probe_ops *ops, 1194 struct ftrace_probe_ops *ops, 1213 struct ftrace_probe_ops *ops, void *data); 1216 struct ftrace_probe_ops *ops); 1222 void ftrace_create_filter_files(struct ftrace_ops *ops, 1224 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 1226 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, [all …]
|
D | tracing_map.c | 371 if (elt->map->ops && elt->map->ops->elt_clear) in tracing_map_elt_clear() 372 elt->map->ops->elt_clear(elt); in tracing_map_elt_clear() 394 if (elt->map->ops && elt->map->ops->elt_free) in tracing_map_elt_free() 395 elt->map->ops->elt_free(elt); in tracing_map_elt_free() 440 if (map->ops && map->ops->elt_alloc) { in tracing_map_elt_alloc() 441 err = map->ops->elt_alloc(elt); in tracing_map_elt_alloc() 460 if (map->ops && map->ops->elt_init) in get_free_elt() 461 map->ops->elt_init(elt); in get_free_elt() 770 const struct tracing_map_ops *ops, in tracing_map_create() argument 789 map->ops = ops; in tracing_map_create()
|
D | trace_sched_switch.c | 90 static void tracing_start_sched_switch(int ops) in tracing_start_sched_switch() argument 97 switch (ops) { in tracing_start_sched_switch() 112 static void tracing_stop_sched_switch(int ops) in tracing_stop_sched_switch() argument 116 switch (ops) { in tracing_stop_sched_switch()
|
/kernel/livepatch/ |
D | patch.c | 27 struct klp_ops *ops; in klp_find_ops() local 30 list_for_each_entry(ops, &klp_ops, node) { in klp_find_ops() 31 func = list_first_entry(&ops->func_stack, struct klp_func, in klp_find_ops() 34 return ops; in klp_find_ops() 45 struct klp_ops *ops; in klp_ftrace_handler() local 49 ops = container_of(fops, struct klp_ops, fops); in klp_ftrace_handler() 57 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, in klp_ftrace_handler() 104 if (&func->stack_node == &ops->func_stack) in klp_ftrace_handler() 137 struct klp_ops *ops; in klp_unpatch_func() local 144 ops = klp_find_ops(func->old_func); in klp_unpatch_func() [all …]
|
/kernel/irq/ |
D | msi.c | 142 struct msi_domain_ops *ops = info->ops; in msi_domain_alloc() local 143 irq_hw_number_t hwirq = ops->get_hwirq(info, arg); in msi_domain_alloc() 156 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); in msi_domain_alloc() 158 if (ops->msi_free) { in msi_domain_alloc() 160 ops->msi_free(domain, info, virq + i); in msi_domain_alloc() 176 if (info->ops->msi_free) { in msi_domain_free() 178 info->ops->msi_free(domain, info, virq + i); in msi_domain_free() 243 struct msi_domain_ops *ops = info->ops; in msi_domain_update_dom_ops() local 245 if (ops == NULL) { in msi_domain_update_dom_ops() 246 info->ops = &msi_domain_ops_default; in msi_domain_update_dom_ops() [all …]
|
D | irqdomain.c | 124 const struct irq_domain_ops *ops, in __irq_domain_create() argument 196 domain->ops = ops; in __irq_domain_create() 231 const struct irq_domain_ops *ops, in __irq_domain_add() argument 237 ops, host_data); in __irq_domain_add() 333 const struct irq_domain_ops *ops, in irq_domain_add_simple() argument 338 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); in irq_domain_add_simple() 377 const struct irq_domain_ops *ops, in irq_domain_add_legacy() argument 383 first_hwirq + size, 0, ops, host_data); in irq_domain_add_legacy() 414 if (h->ops->select && fwspec->param_count) in irq_find_matching_fwspec() 415 rc = h->ops->select(h, fwspec, bus_token); in irq_find_matching_fwspec() [all …]
|
/kernel/time/ |
D | posix-clock.c | 48 if (clk->ops.read) in posix_clock_read() 49 err = clk->ops.read(clk, fp->f_flags, buf, count); in posix_clock_read() 64 if (clk->ops.poll) in posix_clock_poll() 65 result = clk->ops.poll(clk, fp, wait); in posix_clock_poll() 81 if (clk->ops.ioctl) in posix_clock_ioctl() 82 err = clk->ops.ioctl(clk, cmd, arg); in posix_clock_ioctl() 99 if (clk->ops.ioctl) in posix_clock_compat_ioctl() 100 err = clk->ops.ioctl(clk, cmd, arg); in posix_clock_compat_ioctl() 120 if (clk->ops.open) in posix_clock_open() 121 err = clk->ops.open(clk, fp->f_mode); in posix_clock_open() [all …]
|
/kernel/bpf/ |
D | offload.c | 36 const struct bpf_prog_offload_ops *ops; member 135 ret = offload->offdev->ops->prepare(prog); in bpf_prog_offload_verifier_prep() 152 ret = offload->offdev->ops->insn_hook(env, insn_idx, in bpf_prog_offload_verify_insn() 167 if (offload->offdev->ops->finalize) in bpf_prog_offload_finalize() 168 ret = offload->offdev->ops->finalize(env); in bpf_prog_offload_finalize() 181 const struct bpf_prog_offload_ops *ops; in bpf_prog_offload_replace_insn() local 188 ops = offload->offdev->ops; in bpf_prog_offload_replace_insn() 189 if (!offload->opt_failed && ops->replace_insn) in bpf_prog_offload_replace_insn() 190 ret = ops->replace_insn(env, off, insn); in bpf_prog_offload_replace_insn() 205 if (!offload->opt_failed && offload->offdev->ops->remove_insns) in bpf_prog_offload_remove_insns() [all …]
|
D | map_in_map.c | 26 if (!inner_map->ops->map_meta_equal) { in bpf_map_meta_alloc() 38 if (inner_map->ops == &array_map_ops) in bpf_map_meta_alloc() 55 inner_map_meta->ops = inner_map->ops; in bpf_map_meta_alloc() 56 if (inner_map->ops == &array_map_ops) { in bpf_map_meta_alloc() 94 if (inner_map_meta->ops->map_meta_equal(inner_map_meta, inner_map)) in bpf_map_fd_get_ptr()
|
D | syscall.c | 106 const struct bpf_map_ops *ops; in find_and_alloc_map() local 114 ops = bpf_map_types[type]; in find_and_alloc_map() 115 if (!ops) in find_and_alloc_map() 118 if (ops->map_alloc_check) { in find_and_alloc_map() 119 err = ops->map_alloc_check(attr); in find_and_alloc_map() 124 ops = &bpf_map_offload_ops; in find_and_alloc_map() 125 map = ops->map_alloc(attr); in find_and_alloc_map() 128 map->ops = ops; in find_and_alloc_map() 182 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value() 216 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value() [all …]
|
D | helpers.c | 33 return (unsigned long) map->ops->map_lookup_elem(map, key); in BPF_CALL_2() 49 return map->ops->map_update_elem(map, key, value, flags); in BPF_CALL_4() 66 return map->ops->map_delete_elem(map, key); in BPF_CALL_2() 80 return map->ops->map_push_elem(map, value, flags); in BPF_CALL_3() 95 return map->ops->map_pop_elem(map, value); in BPF_CALL_2() 108 return map->ops->map_peek_elem(map, value); in BPF_CALL_2()
|
D | bpf_local_storage.c | 28 if (!map->ops->map_local_storage_charge) in mem_charge() 31 return map->ops->map_local_storage_charge(smap, owner, size); in mem_charge() 39 if (map->ops->map_local_storage_uncharge) in mem_uncharge() 40 map->ops->map_local_storage_uncharge(smap, owner, size); in mem_uncharge() 48 return map->ops->map_owner_storage_ptr(owner); in owner_storage()
|
/kernel/power/ |
D | hibernate.c | 91 void hibernation_set_ops(const struct platform_hibernation_ops *ops) in hibernation_set_ops() argument 93 if (ops && !(ops->begin && ops->end && ops->pre_snapshot in hibernation_set_ops() 94 && ops->prepare && ops->finish && ops->enter && ops->pre_restore in hibernation_set_ops() 95 && ops->restore_cleanup && ops->leave)) { in hibernation_set_ops() 100 hibernation_ops = ops; in hibernation_set_ops() 101 if (ops) in hibernation_set_ops()
|
/kernel/ |
D | params.c | 137 !(params[i].ops->flags & KERNEL_PARAM_OPS_FL_NOARG)) in parse_one() 140 params[i].ops->set); in parse_one() 143 err = params[i].ops->set(val, ¶ms[i]); in parse_one() 450 arr->elemsize, arr->ops->set, kp->level, in param_array_set() 466 ret = arr->ops->get(buffer + off, &p); in param_array_get() 480 if (arr->ops->free) in param_array_free() 482 arr->ops->free(arr->elem + arr->elemsize * i); in param_array_free() 545 if (!attribute->param->ops->get) in param_attr_show() 549 count = attribute->param->ops->get(buf, attribute->param); in param_attr_show() 562 if (!attribute->param->ops->set) in param_attr_store() [all …]
|
/kernel/locking/ |
D | lockdep_internals.h | 247 unsigned long ops = 0; in debug_class_ops_read() local 251 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); in debug_class_ops_read() 252 return ops; in debug_class_ops_read()
|