/kernel/power/ |
D | user.c | 49 struct snapshot_data *data; in snapshot_open() local 69 data = &snapshot_state; in snapshot_open() 70 filp->private_data = data; in snapshot_open() 71 memset(&data->handle, 0, sizeof(struct snapshot_handle)); in snapshot_open() 74 data->swap = swap_type_of(swsusp_resume_device, 0); in snapshot_open() 75 data->mode = O_RDONLY; in snapshot_open() 76 data->free_bitmaps = false; in snapshot_open() 85 data->swap = -1; in snapshot_open() 86 data->mode = O_WRONLY; in snapshot_open() 90 data->free_bitmaps = !error; in snapshot_open() [all …]
|
D | swap.c | 610 static int crc32_threadfn(void *data) in crc32_threadfn() argument 612 struct crc_data *d = data; in crc32_threadfn() 654 static int lzo_compress_threadfn(void *data) in lzo_compress_threadfn() argument 656 struct cmp_data *d = data; in lzo_compress_threadfn() 699 struct cmp_data *data = NULL; in save_image_lzo() local 718 data = vzalloc(array_size(nr_threads, sizeof(*data))); in save_image_lzo() 719 if (!data) { in save_image_lzo() 736 init_waitqueue_head(&data[thr].go); in save_image_lzo() 737 init_waitqueue_head(&data[thr].done); in save_image_lzo() 739 data[thr].thr = kthread_run(lzo_compress_threadfn, in save_image_lzo() [all …]
|
/kernel/trace/ |
D | trace_events_trigger.c | 20 void trigger_data_free(struct event_trigger_data *data) in trigger_data_free() argument 22 if (data->cmd_ops->set_filter) in trigger_data_free() 23 data->cmd_ops->set_filter(NULL, data, NULL); in trigger_data_free() 28 kfree(data); in trigger_data_free() 60 struct event_trigger_data *data; in event_triggers_call() local 67 list_for_each_entry_rcu(data, &file->triggers, list) { in event_triggers_call() 68 if (data->paused) in event_triggers_call() 71 data->ops->trigger(data, buffer, rec, event); in event_triggers_call() 74 filter = rcu_dereference_sched(data->filter); in event_triggers_call() 77 if (event_command_post_trigger(data->cmd_ops)) { in event_triggers_call() [all …]
|
D | trace_functions.c | 176 struct trace_array_cpu *data; in function_trace_call() local 191 data = per_cpu_ptr(tr->array_buffer.data, cpu); in function_trace_call() 192 if (!atomic_read(&data->disabled)) in function_trace_call() 221 struct trace_array_cpu *data; in function_stack_trace_call() local 236 data = per_cpu_ptr(tr->array_buffer.data, cpu); in function_stack_trace_call() 237 disabled = atomic_inc_return(&data->disabled); in function_stack_trace_call() 245 atomic_dec(&data->disabled); in function_stack_trace_call() 286 struct trace_array_cpu *data; in function_no_repeats_trace_call() local 300 data = per_cpu_ptr(tr->array_buffer.data, cpu); in function_no_repeats_trace_call() 301 if (atomic_read(&data->disabled)) in function_no_repeats_trace_call() [all …]
|
D | trace_irqsoff.c | 100 struct trace_array_cpu **data, in func_prolog_dec() argument 125 *data = per_cpu_ptr(tr->array_buffer.data, cpu); in func_prolog_dec() 126 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_dec() 131 atomic_dec(&(*data)->disabled); in func_prolog_dec() 144 struct trace_array_cpu *data; in irqsoff_tracer_call() local 148 if (!func_prolog_dec(tr, &data, &flags)) in irqsoff_tracer_call() 155 atomic_dec(&data->disabled); in irqsoff_tracer_call() 181 struct trace_array_cpu *data; in irqsoff_graph_entry() local 198 if (!func_prolog_dec(tr, &data, &flags)) in irqsoff_graph_entry() 203 atomic_dec(&data->disabled); in irqsoff_graph_entry() [all …]
|
D | trace_functions_graph.c | 129 struct trace_array_cpu *data; in trace_graph_entry() local 173 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_entry() 174 disabled = atomic_inc_return(&data->disabled); in trace_graph_entry() 182 atomic_dec(&data->disabled); in trace_graph_entry() 238 struct trace_array_cpu *data; in trace_graph_return() local 253 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_return() 254 disabled = atomic_inc_return(&data->disabled); in trace_graph_return() 259 atomic_dec(&data->disabled); in trace_graph_return() 383 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) in verif_pid() argument 388 if (!data) in verif_pid() [all …]
|
D | trace_events_hist.c | 584 struct action_data *data, u64 *var_ref_vals); 724 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); in track_data_alloc() local 727 if (!data) in track_data_alloc() 730 data->key = kzalloc(key_len, GFP_KERNEL); in track_data_alloc() 731 if (!data->key) { in track_data_alloc() 732 track_data_free(data); in track_data_alloc() 736 data->key_len = key_len; in track_data_alloc() 737 data->action_data = action_data; in track_data_alloc() 738 data->hist_data = hist_data; in track_data_alloc() 742 track_data_free(data); in track_data_alloc() [all …]
|
D | trace_sched_wakeup.c | 69 struct trace_array_cpu **data, in func_prolog_preempt_disable() argument 85 *data = per_cpu_ptr(tr->array_buffer.data, cpu); in func_prolog_preempt_disable() 86 disabled = atomic_inc_return(&(*data)->disabled); in func_prolog_preempt_disable() 93 atomic_dec(&(*data)->disabled); in func_prolog_preempt_disable() 118 struct trace_array_cpu *data; in wakeup_graph_entry() local 134 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) in wakeup_graph_entry() 138 atomic_dec(&data->disabled); in wakeup_graph_entry() 147 struct trace_array_cpu *data; in wakeup_graph_return() local 152 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) in wakeup_graph_return() 156 atomic_dec(&data->disabled); in wakeup_graph_return() [all …]
|
D | trace_branch.c | 36 struct trace_array_cpu *data; in probe_likely_condition() local 58 data = this_cpu_ptr(tr->array_buffer.data); in probe_likely_condition() 59 if (atomic_read(&data->disabled)) in probe_likely_condition() 72 p = f->data.file + strlen(f->data.file); in probe_likely_condition() 73 while (p >= f->data.file && *p != '/') in probe_likely_condition() 77 strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); in probe_likely_condition() 82 entry->line = f->data.line; in probe_likely_condition() 225 f->data.correct++; in ftrace_likely_update() 227 f->data.incorrect++; in ftrace_likely_update() 303 f = branch_stat_process_file(&p->data); in annotate_branch_stat_show() [all …]
|
/kernel/irq/ |
D | ipi.c | 27 struct irq_data *data; in irq_reserve_ipi() local 93 data = irq_get_irq_data(virq + i); in irq_reserve_ipi() 94 cpumask_copy(data->common->affinity, dest); in irq_reserve_ipi() 95 data->common->ipi_offset = offset; in irq_reserve_ipi() 117 struct irq_data *data = irq_get_irq_data(irq); in irq_destroy_ipi() local 122 if (!irq || !data) in irq_destroy_ipi() 125 domain = data->domain; in irq_destroy_ipi() 134 ipimask = irq_data_get_affinity_mask(data); in irq_destroy_ipi() 143 irq = irq + cpumask_first(dest) - data->common->ipi_offset; in irq_destroy_ipi() 165 struct irq_data *data = irq_get_irq_data(irq); in ipi_get_hwirq() local [all …]
|
D | chip.c | 88 int irq_set_handler_data(unsigned int irq, void *data) in irq_set_handler_data() argument 95 desc->irq_common_data.handler_data = data; in irq_set_handler_data() 143 int irq_set_chip_data(unsigned int irq, void *data) in irq_set_chip_data() argument 150 desc->irq_data.chip_data = data; in irq_set_chip_data() 1077 void *data) in irq_set_chained_handler_and_data() argument 1085 desc->irq_common_data.handler_data = data; in irq_set_chained_handler_and_data() 1316 int irq_chip_set_parent_state(struct irq_data *data, in irq_chip_set_parent_state() argument 1320 data = data->parent_data; in irq_chip_set_parent_state() 1322 if (!data || !data->chip->irq_set_irqchip_state) in irq_chip_set_parent_state() 1325 return data->chip->irq_set_irqchip_state(data, which, val); in irq_chip_set_parent_state() [all …]
|
D | debugfs.c | 32 struct irq_data *data = irq_desc_get_irq_data(desc); in irq_debug_show_masks() local 35 msk = irq_data_get_affinity_mask(data); in irq_debug_show_masks() 38 msk = irq_data_get_effective_affinity_mask(data); in irq_debug_show_masks() 65 irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind) in irq_debug_show_chip() argument 67 struct irq_chip *chip = data->chip; in irq_debug_show_chip() 75 chip->irq_print_chip(data, m); in irq_debug_show_chip() 84 irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind) in irq_debug_show_data() argument 87 data->domain ? data->domain->name : ""); in irq_debug_show_data() 88 seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq); in irq_debug_show_data() 89 irq_debug_show_chip(m, data, ind + 1); in irq_debug_show_data() [all …]
|
D | migration.c | 20 struct irq_data *data = irq_desc_get_irq_data(desc); in irq_fixup_move_pending() local 22 if (!irqd_is_setaffinity_pending(data)) in irq_fixup_move_pending() 30 irqd_clr_move_pending(data); in irq_fixup_move_pending() 34 irqd_clr_move_pending(data); in irq_fixup_move_pending() 41 struct irq_data *data = &desc->irq_data; in irq_move_masked_irq() local 42 struct irq_chip *chip = data->chip; in irq_move_masked_irq() 44 if (likely(!irqd_is_setaffinity_pending(data))) in irq_move_masked_irq() 47 irqd_clr_move_pending(data); in irq_move_masked_irq() 52 if (irqd_is_per_cpu(data)) { in irq_move_masked_irq() 80 ret = irq_do_set_affinity(data, desc->pending_mask, false); in irq_move_masked_irq() [all …]
|
D | irq_sim.c | 27 static void irq_sim_irqmask(struct irq_data *data) in irq_sim_irqmask() argument 29 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data); in irq_sim_irqmask() 34 static void irq_sim_irqunmask(struct irq_data *data) in irq_sim_irqunmask() argument 36 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data); in irq_sim_irqunmask() 41 static int irq_sim_set_type(struct irq_data *data, unsigned int type) in irq_sim_set_type() argument 47 irqd_set_trigger_type(data, type); in irq_sim_set_type() 52 static int irq_sim_get_irqchip_state(struct irq_data *data, in irq_sim_get_irqchip_state() argument 55 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data); in irq_sim_get_irqchip_state() 56 irq_hw_number_t hwirq = irqd_to_hwirq(data); in irq_sim_get_irqchip_state() 70 static int irq_sim_set_irqchip_state(struct irq_data *data, in irq_sim_set_irqchip_state() argument [all …]
|
D | manage.c | 198 static void irq_validate_effective_affinity(struct irq_data *data) in irq_validate_effective_affinity() argument 200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data); in irq_validate_effective_affinity() 201 struct irq_chip *chip = irq_data_get_irq_chip(data); in irq_validate_effective_affinity() 206 chip->name, data->irq); in irq_validate_effective_affinity() 209 static inline void irq_validate_effective_affinity(struct irq_data *data) { } in irq_validate_effective_affinity() argument 212 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity() argument 215 struct irq_desc *desc = irq_data_to_desc(data); in irq_do_set_affinity() 216 struct irq_chip *chip = irq_data_get_irq_chip(data); in irq_do_set_affinity() 246 if (irqd_affinity_is_managed(data) && in irq_do_set_affinity() 268 ret = chip->irq_set_affinity(data, &tmp_mask, force); in irq_do_set_affinity() [all …]
|
D | cpuhotplug.c | 175 static bool hk_should_isolate(struct irq_data *data, unsigned int cpu) in hk_should_isolate() argument 183 if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask)) in hk_should_isolate() 191 struct irq_data *data = irq_desc_get_irq_data(desc); in irq_restore_affinity_of_irq() local 192 const struct cpumask *affinity = irq_data_get_affinity_mask(data); in irq_restore_affinity_of_irq() 194 if (!irqd_affinity_is_managed(data) || !desc->action || in irq_restore_affinity_of_irq() 195 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) in irq_restore_affinity_of_irq() 198 if (irqd_is_managed_and_shutdown(data)) { in irq_restore_affinity_of_irq() 210 if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu)) in irq_restore_affinity_of_irq() 211 irq_set_affinity_locked(data, affinity, false); in irq_restore_affinity_of_irq()
|
/kernel/ |
D | sysctl.c | 147 static int _proc_do_string(char *data, int maxlen, int write, in _proc_do_string() argument 153 if (!data || !maxlen || !*lenp) { in _proc_do_string() 161 len = strlen(data); in _proc_do_string() 179 data[len++] = c; in _proc_do_string() 181 data[len] = 0; in _proc_do_string() 183 len = strlen(data); in _proc_do_string() 192 data += *ppos; in _proc_do_string() 198 memcpy(buffer, data, len); in _proc_do_string() 266 return _proc_do_string(table->data, table->maxlen, write, buffer, lenp, in proc_dostring() 430 int write, void *data) in do_proc_dobool_conv() argument [all …]
|
D | sysctl-test.c | 25 .data = NULL, in sysctl_test_api_dointvec_null_tbl_data() 68 int data = 0; in sysctl_test_api_dointvec_table_maxlen_unset() local 71 .data = &data, in sysctl_test_api_dointvec_table_maxlen_unset() 114 int data = 0; in sysctl_test_api_dointvec_table_len_is_zero() local 118 .data = &data, in sysctl_test_api_dointvec_table_len_is_zero() 148 int data = 0; in sysctl_test_api_dointvec_table_read_but_position_set() local 152 .data = &data, in sysctl_test_api_dointvec_table_read_but_position_set() 183 int data = 0; in sysctl_test_dointvec_read_happy_single_positive() local 187 .data = &data, in sysctl_test_dointvec_read_happy_single_positive() 199 *((int *)table.data) = 13; in sysctl_test_dointvec_read_happy_single_positive() [all …]
|
D | tracepoint.c | 199 old[iter_probes].data == tp_func->data) in func_add() 248 old[nr_probes].data == tp_func->data) || in func_remove() 271 old[i].data != tp_func->data) && in func_remove() 284 old[i].data == tp_func->data) in func_remove() 378 if (tp_funcs[0].data != old[0].data) in tracepoint_add_func() 437 if (tp_funcs[0].data != old[0].data) in tracepoint_remove_func() 451 if (tp_funcs[0].data != old[0].data) in tracepoint_remove_func() 473 void *data, int prio) in tracepoint_probe_register_prio_may_exist() argument 480 tp_func.data = data; in tracepoint_probe_register_prio_may_exist() 502 void *data, int prio) in tracepoint_probe_register_prio() argument [all …]
|
D | regset.c | 9 void **data) in __regset_get() argument 11 void *p = *data, *to_free = NULL; in __regset_get() 29 *data = p; in __regset_get() 36 void *data) in regset_get() argument 38 return __regset_get(target, regset, size, &data); in regset_get() 45 void **data) in regset_get_alloc() argument 47 *data = NULL; in regset_get_alloc() 48 return __regset_get(target, regset, size, data); in regset_get_alloc() 65 void __user *data) in copy_regset_to_user() argument 73 ret = copy_to_user(data, buf, ret) ? -EFAULT : 0; in copy_regset_to_user()
|
D | ptrace.c | 368 static int check_ptrace_options(unsigned long data) in check_ptrace_options() argument 370 if (data & ~(unsigned long)PTRACE_O_MASK) in check_ptrace_options() 373 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { in check_ptrace_options() 582 static int ptrace_detach(struct task_struct *child, unsigned int data) in ptrace_detach() argument 584 if (!valid_signal(data)) in ptrace_detach() 600 child->exit_code = data; in ptrace_detach() 678 static int ptrace_setoptions(struct task_struct *child, unsigned long data) in ptrace_setoptions() argument 683 ret = check_ptrace_options(data); in ptrace_setoptions() 690 flags |= (data << PT_OPT_FLAG_SHIFT); in ptrace_setoptions() 730 unsigned long data) in ptrace_peek_siginfo() argument [all …]
|
D | auditfilter.c | 449 static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, in audit_data_to_entry() argument 460 entry = audit_to_entry_common(data); in audit_data_to_entry() 464 bufp = data->buf; in audit_data_to_entry() 465 for (i = 0; i < data->field_count; i++) { in audit_data_to_entry() 471 f->op = audit_to_op(data->fieldflags[i]); in audit_data_to_entry() 475 f->type = data->fields[i]; in audit_data_to_entry() 476 f_val = data->values[i]; in audit_data_to_entry() 636 struct audit_rule_data *data; in audit_krule_to_data() local 640 data = kmalloc(struct_size(data, buf, krule->buflen), GFP_KERNEL); in audit_krule_to_data() 641 if (unlikely(!data)) in audit_krule_to_data() [all …]
|
/kernel/gcov/ |
D | base.c | 65 u32 *data; in store_gcov_u32() local 68 data = buffer + off; in store_gcov_u32() 69 *data = v; in store_gcov_u32() 72 return sizeof(*data); in store_gcov_u32() 89 u32 *data; in store_gcov_u64() local 92 data = buffer + off; in store_gcov_u64() 94 data[0] = (v & 0xffffffffUL); in store_gcov_u64() 95 data[1] = (v >> 32); in store_gcov_u64() 98 return sizeof(*data) * 2; in store_gcov_u64() 104 void *data) in gcov_module_notifier() argument [all …]
|
/kernel/bpf/ |
D | lpm_trie.c | 29 u8 data[]; member 152 static inline int extract_bit(const u8 *data, size_t index) in extract_bit() argument 154 return !!(data[index / 8] & (1 << (7 - (index % 8)))); in extract_bit() 172 BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32)); in longest_prefix_match() 173 BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32)); in longest_prefix_match() 181 u64 diff = be64_to_cpu(*(__be64 *)node->data ^ in longest_prefix_match() 182 *(__be64 *)key->data); in longest_prefix_match() 194 u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^ in longest_prefix_match() 195 *(__be32 *)&key->data[i]); in longest_prefix_match() 206 u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^ in longest_prefix_match() [all …]
|
/kernel/printk/ |
D | sysctl.c | 26 .data = &console_loglevel, 33 .data = &printk_ratelimit_state.interval, 40 .data = &printk_ratelimit_state.burst, 47 .data = &printk_delay_msec, 56 .data = devkmsg_log_str, 63 .data = &dmesg_restrict, 72 .data = &kptr_restrict,
|