Home
last modified time | relevance | path

Searched refs:data (Results 1 – 25 of 94) sorted by relevance

1234

/kernel/power/
Duser.c48 struct snapshot_data *data; in snapshot_open() local
69 data = &snapshot_state; in snapshot_open()
70 filp->private_data = data; in snapshot_open()
71 memset(&data->handle, 0, sizeof(struct snapshot_handle)); in snapshot_open()
74 data->swap = swsusp_resume_device ? in snapshot_open()
76 data->mode = O_RDONLY; in snapshot_open()
87 data->swap = -1; in snapshot_open()
88 data->mode = O_WRONLY; in snapshot_open()
97 data->frozen = 0; in snapshot_open()
98 data->ready = 0; in snapshot_open()
[all …]
Dswap.c499 static int crc32_threadfn(void *data) in crc32_threadfn() argument
501 struct crc_data *d = data; in crc32_threadfn()
543 static int lzo_compress_threadfn(void *data) in lzo_compress_threadfn() argument
545 struct cmp_data *d = data; in lzo_compress_threadfn()
588 struct cmp_data *data = NULL; in save_image_lzo() local
605 data = vmalloc(sizeof(*data) * nr_threads); in save_image_lzo()
606 if (!data) { in save_image_lzo()
612 memset(&data[thr], 0, offsetof(struct cmp_data, go)); in save_image_lzo()
626 init_waitqueue_head(&data[thr].go); in save_image_lzo()
627 init_waitqueue_head(&data[thr].done); in save_image_lzo()
[all …]
/kernel/
Dsysctl.c280 .data = &sysctl_sched_child_runs_first,
288 .data = &sysctl_sched_min_granularity,
297 .data = &sysctl_sched_latency,
306 .data = &sysctl_sched_wakeup_granularity,
316 .data = &sysctl_sched_tunable_scaling,
325 .data = &sysctl_sched_migration_cost,
332 .data = &sysctl_sched_nr_migrate,
339 .data = &sysctl_sched_time_avg,
346 .data = &sysctl_sched_shares_window,
353 .data = &sysctl_timer_migration,
[all …]
Dptrace.c462 static int ptrace_detach(struct task_struct *child, unsigned int data) in ptrace_detach() argument
466 if (!valid_signal(data)) in ptrace_detach()
479 child->exit_code = data; in ptrace_detach()
575 static int ptrace_setoptions(struct task_struct *child, unsigned long data) in ptrace_setoptions() argument
579 if (data & ~(unsigned long)PTRACE_O_MASK) in ptrace_setoptions()
585 flags |= (data << PT_OPT_FLAG_SHIFT); in ptrace_setoptions()
625 unsigned long data) in ptrace_peek_siginfo() argument
666 compat_siginfo_t __user *uinfo = compat_ptr(data); in ptrace_peek_siginfo()
677 siginfo_t __user *uinfo = (siginfo_t __user *) data; in ptrace_peek_siginfo()
686 data += sizeof(siginfo_t); in ptrace_peek_siginfo()
[all …]
Dkthread.c30 int (*threadfn)(void *data);
31 void *data; member
44 void *data; member
136 return to_kthread(task)->data; in kthread_data()
151 void *data = NULL; in probe_kthread_data() local
153 probe_kernel_read(&data, &kthread->data, sizeof(data)); in probe_kthread_data()
154 return data; in probe_kthread_data()
179 int (*threadfn)(void *data) = create->threadfn; in kthread()
180 void *data = create->data; in kthread() local
185 self.data = data; in kthread()
[all …]
Dtracepoint.c110 void *probe, void *data) in tracepoint_entry_add_probe() argument
124 old[nr_probes].data == data) in tracepoint_entry_add_probe()
134 new[nr_probes].data = data; in tracepoint_entry_add_probe()
144 void *probe, void *data) in tracepoint_entry_remove_probe() argument
159 old[nr_probes].data == data) in tracepoint_entry_remove_probe()
182 if (old[i].func != probe || old[i].data != data) in tracepoint_entry_remove_probe()
355 tracepoint_add_probe(const char *name, void *probe, void *data) in tracepoint_add_probe() argument
366 old = tracepoint_entry_add_probe(entry, probe, data); in tracepoint_add_probe()
380 int tracepoint_probe_register(const char *name, void *probe, void *data) in tracepoint_probe_register() argument
385 old = tracepoint_add_probe(name, probe, data); in tracepoint_probe_register()
[all …]
Dauditfilter.c393 static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, in audit_data_to_entry() argument
403 entry = audit_to_entry_common((struct audit_rule *)data); in audit_data_to_entry()
407 bufp = data->buf; in audit_data_to_entry()
409 for (i = 0; i < data->field_count; i++) { in audit_data_to_entry()
414 f->op = audit_to_op(data->fieldflags[i]); in audit_data_to_entry()
418 f->type = data->fields[i]; in audit_data_to_entry()
419 f->val = data->values[i]; in audit_data_to_entry()
558 struct audit_rule_data *data; in audit_krule_to_data() local
562 data = kmalloc(sizeof(*data) + krule->buflen, GFP_KERNEL); in audit_krule_to_data()
563 if (unlikely(!data)) in audit_krule_to_data()
[all …]
Dutsname_sysctl.c22 char *which = table->data; in get_uts()
53 uts_table.data = get_uts(table, write); in proc_do_uts_string()
55 put_uts(table, write, uts_table.data); in proc_do_uts_string()
72 .data = init_uts_ns.name.sysname,
79 .data = init_uts_ns.name.release,
86 .data = init_uts_ns.name.version,
93 .data = init_uts_ns.name.nodename,
101 .data = init_uts_ns.name.domainname,
Dkmod.c199 static int ____call_usermodehelper(void *data) in ____call_usermodehelper() argument
201 struct subprocess_info *sub_info = data; in ____call_usermodehelper()
251 static int call_helper(void *data) in call_helper() argument
255 return ____call_usermodehelper(data); in call_helper()
279 static int wait_for_helper(void *data) in wait_for_helper() argument
281 struct subprocess_info *sub_info = data; in wait_for_helper()
534 void *data) in call_usermodehelper_setup() argument
548 sub_info->data = data; in call_usermodehelper_setup()
669 if (table->data == CAP_BSET) in proc_cap_handler()
671 else if (table->data == CAP_PI) in proc_cap_handler()
[all …]
Dasync.c77 void *data; member
123 entry->func(entry->data, entry->cookie); in async_run_entry_fn()
148 static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain) in __async_schedule() argument
168 func(data, newcookie); in __async_schedule()
175 entry->data = data; in __async_schedule()
207 async_cookie_t async_schedule(async_func_t func, void *data) in async_schedule() argument
209 return __async_schedule(func, data, &async_dfl_domain); in async_schedule()
225 async_cookie_t async_schedule_domain(async_func_t func, void *data, in async_schedule_domain() argument
228 return __async_schedule(func, data, domain); in async_schedule_domain()
Dstop_machine.c378 void *data; member
404 static int stop_machine_cpu_stop(void *data) in stop_machine_cpu_stop() argument
406 struct stop_machine_data *smdata = data; in stop_machine_cpu_stop()
436 err = smdata->fn(smdata->data); in stop_machine_cpu_stop()
449 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) in __stop_machine() argument
451 struct stop_machine_data smdata = { .fn = fn, .data = data, in __stop_machine()
468 ret = (*fn)(data); in __stop_machine()
479 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) in stop_machine() argument
485 ret = __stop_machine(fn, data, cpus); in stop_machine()
513 int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, in stop_machine_from_inactive_cpu() argument
[all …]
Dlockdep_proc.c426 static void seq_stats(struct seq_file *m, struct lock_stat_data *data) in seq_stats() argument
433 class = data->class; in seq_stats()
434 stats = &data->stats; in seq_stats()
553 struct lock_stat_seq *data = m->private; in ls_start() local
559 iter = data->stats + (*pos - 1); in ls_start()
560 if (iter >= data->iter_end) in ls_start()
597 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq)); in lock_stat_open() local
599 if (!data) in lock_stat_open()
604 struct lock_stat_data *iter = data->stats; in lock_stat_open()
612 data->iter_end = iter; in lock_stat_open()
[all …]
/kernel/trace/
Dtrace_irqsoff.c104 struct trace_array_cpu **data, in func_prolog_dec() argument
125 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_dec()
126 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_dec()
131 atomic_dec(&(*data)->disabled); in func_prolog_dec()
144 struct trace_array_cpu *data; in irqsoff_tracer_call() local
147 if (!func_prolog_dec(tr, &data, &flags)) in irqsoff_tracer_call()
152 atomic_dec(&data->disabled); in irqsoff_tracer_call()
187 struct trace_array_cpu *data; in irqsoff_graph_entry() local
192 if (!func_prolog_dec(tr, &data, &flags)) in irqsoff_graph_entry()
197 atomic_dec(&data->disabled); in irqsoff_graph_entry()
[all …]
Dtrace_functions.c62 struct trace_array_cpu *data; in function_trace_call() local
79 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_trace_call()
80 if (!atomic_read(&data->disabled)) { in function_trace_call()
95 struct trace_array_cpu *data; in function_stack_trace_call() local
110 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_stack_trace_call()
111 disabled = atomic_inc_return(&data->disabled); in function_stack_trace_call()
127 atomic_dec(&data->disabled); in function_stack_trace_call()
217 static int update_count(void **data) in update_count() argument
219 unsigned long *count = (long *)data; in update_count()
231 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) in ftrace_traceon_count() argument
[all …]
Dtrace_probe.c46 void *data, void *ent)\
48 return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\
79 void *data, void *ent) in PRINT_TYPE_FUNC_NAME()
81 int len = *(u32 *)data >> 16; in PRINT_TYPE_FUNC_NAME()
87 (const char *)get_loc_data(data, ent)); in PRINT_TYPE_FUNC_NAME()
272 void *data, void *dest) \
274 struct symbol_cache *sc = data; \
292 void *data, void *dest) \
294 struct deref_fetch_param *dprm = data; \
307 static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) in DEFINE_FETCH_deref()
[all …]
Dtrace_functions_graph.c254 struct trace_array_cpu *data; in trace_graph_entry() local
272 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_entry()
273 disabled = atomic_inc_return(&data->disabled); in trace_graph_entry()
281 atomic_dec(&data->disabled); in trace_graph_entry()
349 struct trace_array_cpu *data; in trace_graph_return() local
357 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_return()
358 disabled = atomic_inc_return(&data->disabled); in trace_graph_return()
363 atomic_dec(&data->disabled); in trace_graph_return()
483 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) in verif_pid() argument
489 if (!data) in verif_pid()
[all …]
Dtrace_sched_wakeup.c77 struct trace_array_cpu **data, in func_prolog_preempt_disable() argument
93 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_preempt_disable()
94 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_preempt_disable()
101 atomic_dec(&(*data)->disabled); in func_prolog_preempt_disable()
116 struct trace_array_cpu *data; in wakeup_tracer_call() local
120 if (!func_prolog_preempt_disable(tr, &data, &pc)) in wakeup_tracer_call()
127 atomic_dec(&data->disabled); in wakeup_tracer_call()
229 struct trace_array_cpu *data; in wakeup_graph_entry() local
233 if (!func_prolog_preempt_disable(tr, &data, &pc)) in wakeup_graph_entry()
238 atomic_dec(&data->disabled); in wakeup_graph_entry()
[all …]
Dtrace_syscalls.c17 enum trace_reg type, void *data);
19 enum trace_reg type, void *data);
24 struct syscall_metadata *entry = call->data; in syscall_get_enter_fields()
235 struct syscall_metadata *entry = call->data; in set_syscall_print_fmt()
258 struct syscall_metadata *entry = call->data; in free_syscall_print_fmt()
267 struct syscall_metadata *meta = call->data; in syscall_enter_define_fields()
302 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) in ftrace_syscall_enter() argument
304 struct trace_array *tr = data; in ftrace_syscall_enter()
339 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) in ftrace_syscall_exit() argument
341 struct trace_array *tr = data; in ftrace_syscall_exit()
[all …]
Dtrace_sched_switch.c55 struct trace_array_cpu *data; in probe_sched_switch() local
72 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); in probe_sched_switch()
74 if (likely(!atomic_read(&data->disabled))) in probe_sched_switch()
111 struct trace_array_cpu *data; in probe_sched_wakeup() local
126 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); in probe_sched_wakeup()
128 if (likely(!atomic_read(&data->disabled))) in probe_sched_wakeup()
Dtrace_events.c186 enum trace_reg type, void *data) in ftrace_event_reg() argument
188 struct ftrace_event_file *file = data; in ftrace_event_reg()
1761 unsigned long val, void *data) in trace_module_notify() argument
1763 struct module *mod = data; in trace_module_notify()
1796 unsigned long val, void *data) in trace_module_notify() argument
1886 struct event_probe_data *data = *pdata; in event_enable_probe() local
1888 if (!data) in event_enable_probe()
1891 if (data->enable) in event_enable_probe()
1892 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); in event_enable_probe()
1894 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); in event_enable_probe()
[all …]
Dring_buffer.c166 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
342 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ member
443 (unsigned int)offsetof(typeof(field), data), in ring_buffer_print_page_header()
1824 return bpage->data + index; in __rb_data_page_index()
1829 return bpage->page->data + index; in __rb_page_index()
2938 void *data) in ring_buffer_write() argument
2973 memcpy(body, data, length); in ring_buffer_write()
4385 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) in ring_buffer_free_read_page() argument
4387 free_page((unsigned long)data); in ring_buffer_free_read_page()
4509 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
[all …]
Dtrace_events_filter.c400 int *err, void *data);
404 filter_pred_walkcb_t cb, void *data) in walk_pred_tree() argument
416 ret = cb(move, pred, &err, data); in walk_pred_tree()
493 int *err, void *data) in filter_match_preds_cb() argument
495 struct filter_match_preds_data *d = data; in filter_match_preds_cb()
537 struct filter_match_preds_data data = { in filter_match_preds() local
559 data.preds = preds = rcu_dereference_sched(filter->preds); in filter_match_preds()
560 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data); in filter_match_preds()
562 return data.match; in filter_match_preds()
1373 int *err, void *data) in check_pred_tree_cb() argument
[all …]
Dtrace_mmiotrace.c308 struct trace_array_cpu *data, in __trace_mmiotrace_rw() argument
333 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_rw() local
334 __trace_mmiotrace_rw(tr, data, rw); in mmio_trace_rw()
338 struct trace_array_cpu *data, in __trace_mmiotrace_map() argument
363 struct trace_array_cpu *data; in mmio_trace_mapping() local
366 data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_mapping()
367 __trace_mmiotrace_map(tr, data, map); in mmio_trace_mapping()
/kernel/irq/
Ddummychip.c17 static void ack_bad(struct irq_data *data) in ack_bad() argument
19 struct irq_desc *desc = irq_data_to_desc(data); in ack_bad()
21 print_irq_desc(data->irq, desc); in ack_bad()
22 ack_bad_irq(data->irq); in ack_bad()
28 static void noop(struct irq_data *data) { } in noop() argument
30 static unsigned int noop_ret(struct irq_data *data) in noop_ret() argument
/kernel/events/
Dcore.c54 static void remote_function(void *data) in remote_function() argument
56 struct remote_function_call *tfc = data; in remote_function()
84 struct remote_function_call data = { in task_function_call() local
92 smp_call_function_single(task_cpu(p), remote_function, &data, 1); in task_function_call()
94 return data.ret; in task_function_call()
108 struct remote_function_call data = { in cpu_function_call() local
115 smp_call_function_single(cpu, remote_function, &data, 1); in cpu_function_call()
117 return data.ret; in cpu_function_call()
1057 struct perf_sample_data *data; in perf_event__header_size() local
1064 size += sizeof(data->ip); in perf_event__header_size()
[all …]

1234