/kernel/dma/ |
D | debug.c | 167 static inline void dump_entry_trace(struct dma_debug_entry *entry) in dump_entry_trace() argument 170 if (entry) { in dump_entry_trace() 172 stack_trace_print(entry->stack_entries, entry->stack_len, 0); in dump_entry_trace() 218 #define err_printk(dev, entry, format, arg...) do { \ argument 225 dump_entry_trace(entry); \ 237 static int hash_fn(struct dma_debug_entry *entry) in hash_fn() argument 243 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; in hash_fn() 249 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, in get_hash_bucket() argument 253 int idx = hash_fn(entry); in get_hash_bucket() 297 struct dma_debug_entry *entry, *ret = NULL; in __hash_bucket_find() local [all …]
|
/kernel/ |
D | auditfilter.c | 108 struct audit_entry *entry; in audit_init_entry() local 111 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in audit_init_entry() 112 if (unlikely(!entry)) in audit_init_entry() 117 kfree(entry); in audit_init_entry() 120 entry->rule.fields = fields; in audit_init_entry() 122 return entry; in audit_init_entry() 211 static int audit_match_signal(struct audit_entry *entry) in audit_match_signal() argument 213 struct audit_field *arch = entry->rule.arch_f; in audit_match_signal() 219 entry->rule.mask) && in audit_match_signal() 221 entry->rule.mask)); in audit_match_signal() [all …]
|
D | async.c | 117 struct async_entry *entry = in async_run_entry_fn() local 123 pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie, in async_run_entry_fn() 124 entry->func, task_pid_nr(current)); in async_run_entry_fn() 127 entry->func(entry->data, entry->cookie); in async_run_entry_fn() 130 (long long)entry->cookie, entry->func, in async_run_entry_fn() 135 list_del_init(&entry->domain_list); in async_run_entry_fn() 136 list_del_init(&entry->global_list); in async_run_entry_fn() 139 kfree(entry); in async_run_entry_fn() 151 struct async_entry *entry) in __async_schedule_node_domain() argument 156 INIT_LIST_HEAD(&entry->domain_list); in __async_schedule_node_domain() [all …]
|
D | jump_label.c | 350 static int addr_conflict(struct jump_entry *entry, void *start, void *end) in addr_conflict() argument 352 if (jump_entry_code(entry) <= (unsigned long)end && in addr_conflict() 353 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) in addr_conflict() 377 static void arch_jump_label_transform_static(struct jump_entry *entry, in arch_jump_label_transform_static() argument 430 static enum jump_label_type jump_label_type(struct jump_entry *entry) in jump_label_type() argument 432 struct static_key *key = jump_entry_key(entry); in jump_label_type() 434 bool branch = jump_entry_is_branch(entry); in jump_label_type() 440 static bool jump_label_can_update(struct jump_entry *entry, bool init) in jump_label_can_update() argument 445 if (!init && jump_entry_is_init(entry)) in jump_label_can_update() 448 if (!kernel_text_address(jump_entry_code(entry))) { in jump_label_can_update() [all …]
|
D | kexec_core.c | 263 image->entry = &image->head; in do_kimage_alloc_init() 539 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) in kimage_add_entry() argument 541 if (*image->entry != 0) in kimage_add_entry() 542 image->entry++; in kimage_add_entry() 544 if (image->entry == image->last_entry) { in kimage_add_entry() 553 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; in kimage_add_entry() 554 image->entry = ind_page; in kimage_add_entry() 558 *image->entry = entry; in kimage_add_entry() 559 image->entry++; in kimage_add_entry() 560 *image->entry = 0; in kimage_add_entry() [all …]
|
D | kexec.c | 22 static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, in kimage_alloc_init() argument 33 if ((entry < phys_to_boot_phys(crashk_res.start)) || in kimage_alloc_init() 34 (entry > phys_to_boot_phys(crashk_res.end))) in kimage_alloc_init() 43 image->start = entry; in kimage_alloc_init() 87 static int do_kexec_load(unsigned long entry, unsigned long nr_segments, in do_kexec_load() argument 125 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); in do_kexec_load() 235 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, in SYSCALL_DEFINE4() argument 254 result = do_kexec_load(entry, nr_segments, ksegments, flags); in SYSCALL_DEFINE4() 261 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, in COMPAT_SYSCALL_DEFINE4() argument 296 result = do_kexec_load(entry, nr_segments, ksegments, flags); in COMPAT_SYSCALL_DEFINE4()
|
/kernel/bpf/ |
D | mprog.c | 108 static int bpf_mprog_replace(struct bpf_mprog_entry *entry, in bpf_mprog_replace() argument 116 bpf_mprog_read(entry, idx, &fp, &cp); in bpf_mprog_replace() 123 *entry_new = entry; in bpf_mprog_replace() 127 static int bpf_mprog_insert(struct bpf_mprog_entry *entry, in bpf_mprog_insert() argument 131 int total = bpf_mprog_total(entry); in bpf_mprog_insert() 136 peer = bpf_mprog_peer(entry); in bpf_mprog_insert() 137 bpf_mprog_entry_copy(peer, entry); in bpf_mprog_insert() 151 static int bpf_mprog_delete(struct bpf_mprog_entry *entry, in bpf_mprog_delete() argument 155 int total = bpf_mprog_total(entry); in bpf_mprog_delete() 158 peer = bpf_mprog_peer(entry); in bpf_mprog_delete() [all …]
|
D | tcx.c | 14 struct bpf_mprog_entry *entry, *entry_new; in tcx_prog_attach() local 34 entry = tcx_entry_fetch_or_create(dev, ingress, &created); in tcx_prog_attach() 35 if (!entry) { in tcx_prog_attach() 39 ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog, in tcx_prog_attach() 43 if (entry != entry_new) { in tcx_prog_attach() 48 bpf_mprog_commit(entry); in tcx_prog_attach() 50 tcx_entry_free(entry); in tcx_prog_attach() 63 struct bpf_mprog_entry *entry, *entry_new; in tcx_prog_detach() local 73 entry = tcx_entry_fetch(dev, ingress); in tcx_prog_detach() 74 if (!entry) { in tcx_prog_detach() [all …]
|
D | dispatcher.c | 46 struct bpf_dispatcher_prog *entry; in bpf_dispatcher_add_prog() local 51 entry = bpf_dispatcher_find_prog(d, prog); in bpf_dispatcher_add_prog() 52 if (entry) { in bpf_dispatcher_add_prog() 53 refcount_inc(&entry->users); in bpf_dispatcher_add_prog() 57 entry = bpf_dispatcher_find_free(d); in bpf_dispatcher_add_prog() 58 if (!entry) in bpf_dispatcher_add_prog() 62 entry->prog = prog; in bpf_dispatcher_add_prog() 63 refcount_set(&entry->users, 1); in bpf_dispatcher_add_prog() 71 struct bpf_dispatcher_prog *entry; in bpf_dispatcher_remove_prog() local 76 entry = bpf_dispatcher_find_prog(d, prog); in bpf_dispatcher_remove_prog() [all …]
|
/kernel/trace/ |
D | trace_events_inject.c | 21 void *entry; in trace_inject_entry() local 24 entry = trace_event_buffer_reserve(&fbuffer, file, len); in trace_inject_entry() 25 if (entry) { in trace_inject_entry() 26 memcpy(entry, rec, len); in trace_inject_entry() 158 void *entry = NULL; in trace_alloc_entry() local 161 entry = kzalloc(entry_size + 1, GFP_KERNEL); in trace_alloc_entry() 162 if (!entry) in trace_alloc_entry() 179 str_item = (u32 *)(entry + field->offset); in trace_alloc_entry() 184 paddr = (char **)(entry + field->offset); in trace_alloc_entry() 190 return entry; in trace_alloc_entry() [all …]
|
D | trace_syscalls.c | 26 struct syscall_metadata *entry = call->data; in syscall_get_enter_fields() local 28 return &entry->enter_fields; in syscall_get_enter_fields() 117 struct syscall_metadata *entry; in get_syscall_name() local 119 entry = syscall_nr_to_meta(syscall); in get_syscall_name() 120 if (!entry) in get_syscall_name() 123 return entry->name; in get_syscall_name() 134 struct syscall_metadata *entry; in print_syscall_enter() local 139 entry = syscall_nr_to_meta(syscall); in print_syscall_enter() 141 if (!entry) in print_syscall_enter() 144 if (entry->enter_event->event.type != ent->type) { in print_syscall_enter() [all …]
|
D | trace_fprobe.c | 176 struct fentry_trace_entry_head *entry; in NOKPROBE_SYMBOL() local 189 entry = trace_event_buffer_reserve(&fbuffer, trace_file, in NOKPROBE_SYMBOL() 190 sizeof(*entry) + tf->tp.size + dsize); in NOKPROBE_SYMBOL() 191 if (!entry) in NOKPROBE_SYMBOL() 195 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); in NOKPROBE_SYMBOL() 196 entry->ip = entry_ip; in NOKPROBE_SYMBOL() 197 store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize); in NOKPROBE_SYMBOL() 219 struct fexit_trace_entry_head *entry; in __fexit_trace_func() local 232 entry = trace_event_buffer_reserve(&fbuffer, trace_file, in __fexit_trace_func() 233 sizeof(*entry) + tf->tp.size + dsize); in __fexit_trace_func() [all …]
|
D | trace_mmiotrace.c | 167 struct trace_entry *entry = iter->ent; in mmio_print_rw() local 175 trace_assign_type(field, entry); in mmio_print_rw() 212 struct trace_entry *entry = iter->ent; in mmio_print_map() local 220 trace_assign_type(field, entry); in mmio_print_map() 246 struct trace_entry *entry = iter->ent; in mmio_print_mark() local 247 struct print_entry *print = (struct print_entry *)entry; in mmio_print_mark() 300 struct trace_mmiotrace_rw *entry; in __trace_mmiotrace_rw() local 305 sizeof(*entry), trace_ctx); in __trace_mmiotrace_rw() 310 entry = ring_buffer_event_data(event); in __trace_mmiotrace_rw() 311 entry->rw = *rw; in __trace_mmiotrace_rw() [all …]
|
D | trace_sched_wakeup.c | 382 struct ctx_switch_entry *entry; in tracing_sched_switch_trace() local 385 sizeof(*entry), trace_ctx); in tracing_sched_switch_trace() 388 entry = ring_buffer_event_data(event); in tracing_sched_switch_trace() 389 entry->prev_pid = prev->pid; in tracing_sched_switch_trace() 390 entry->prev_prio = prev->prio; in tracing_sched_switch_trace() 391 entry->prev_state = task_state_index(prev); in tracing_sched_switch_trace() 392 entry->next_pid = next->pid; in tracing_sched_switch_trace() 393 entry->next_prio = next->prio; in tracing_sched_switch_trace() 394 entry->next_state = task_state_index(next); in tracing_sched_switch_trace() 395 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() [all …]
|
D | trace_output.c | 28 struct trace_entry *entry = iter->ent; in trace_print_bputs_msg_only() local 31 trace_assign_type(field, entry); in trace_print_bputs_msg_only() 41 struct trace_entry *entry = iter->ent; in trace_print_bprintk_msg_only() local 44 trace_assign_type(field, entry); in trace_print_bprintk_msg_only() 54 struct trace_entry *entry = iter->ent; in trace_print_printk_msg_only() local 57 trace_assign_type(field, entry); in trace_print_printk_msg_only() 301 struct trace_entry *entry; in trace_raw_output_prep() local 304 entry = iter->ent; in trace_raw_output_prep() 306 if (entry->type != event->event.type) { in trace_raw_output_prep() 444 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) in trace_print_lat_fmt() argument [all …]
|
D | trace_events_synth.c | 351 struct synth_trace_event *entry; in print_synth_event() local 357 entry = (struct synth_trace_event *)iter->ent; in print_synth_event() 377 union trace_synth_field *data = &entry->fields[n_u64]; in print_synth_event() 381 (char *)entry + data->as_dynamic.offset, in print_synth_event() 387 (char *)&entry->fields[n_u64].as_u64, in print_synth_event() 392 union trace_synth_field *data = &entry->fields[n_u64]; in print_synth_event() 393 unsigned long *p = (void *)entry + data->as_dynamic.offset; in print_synth_event() 407 &entry->fields[n_u64], in print_synth_event() 413 entry->fields[n_u64].as_u64, in print_synth_event() 430 static unsigned int trace_string(struct synth_trace_event *entry, in trace_string() argument [all …]
|
D | ftrace.c | 1142 struct ftrace_func_entry *entry; in __ftrace_lookup_ip() local 1148 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { in __ftrace_lookup_ip() 1149 if (entry->ip == ip) in __ftrace_lookup_ip() 1150 return entry; in __ftrace_lookup_ip() 1175 struct ftrace_func_entry *entry) in __add_hash_entry() argument 1180 key = ftrace_hash_key(hash, entry->ip); in __add_hash_entry() 1182 hlist_add_head(&entry->hlist, hhd); in __add_hash_entry() 1189 struct ftrace_func_entry *entry; in add_hash_entry() local 1191 entry = kmalloc(sizeof(*entry), GFP_KERNEL); in add_hash_entry() 1192 if (!entry) in add_hash_entry() [all …]
|
D | ring_buffer_benchmark.c | 88 int *entry; in read_event() local 95 entry = ring_buffer_event_data(event); in read_event() 96 if (*entry != cpu) { in read_event() 111 int *entry; in read_page() local 145 entry = ring_buffer_event_data(event); in read_page() 146 if (*entry != cpu) { in read_page() 158 entry = ring_buffer_event_data(event); in read_page() 159 if (*entry != cpu) { in read_page() 250 int *entry; in ring_buffer_producer() local 259 entry = ring_buffer_event_data(event); in ring_buffer_producer() [all …]
|
D | trace_branch.c | 38 struct trace_branch *entry; in probe_likely_condition() local 65 sizeof(*entry), trace_ctx); in probe_likely_condition() 69 entry = ring_buffer_event_data(event); in probe_likely_condition() 77 strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); in probe_likely_condition() 78 strncpy(entry->file, p, TRACE_FILE_SIZE); in probe_likely_condition() 79 entry->func[TRACE_FUNC_SIZE] = 0; in probe_likely_condition() 80 entry->file[TRACE_FILE_SIZE] = 0; in probe_likely_condition() 81 entry->constant = f->constant; in probe_likely_condition() 82 entry->line = f->data.line; in probe_likely_condition() 83 entry->correct = val == expect; in probe_likely_condition() [all …]
|
/kernel/printk/ |
D | index.c | 46 struct pi_entry *entry = pi_get_entry(mod, *pos); in pi_next() local 50 return entry; in pi_next() 75 const struct pi_entry *entry = v; in pi_show() local 85 if (!entry->fmt) in pi_show() 88 if (entry->level) in pi_show() 89 printk_parse_prefix(entry->level, &level, &flags); in pi_show() 91 prefix_len = printk_parse_prefix(entry->fmt, &level, &flags); in pi_show() 107 seq_printf(s, " %s:%d %s \"", entry->file, entry->line, entry->func); in pi_show() 108 if (entry->subsys_fmt_prefix) in pi_show() 109 seq_escape_printf_format(s, entry->subsys_fmt_prefix); in pi_show() [all …]
|
/kernel/power/ |
D | console.c | 48 struct pm_vt_switch *entry, *tmp; in pm_vt_switch_required() local 59 entry = kmalloc(sizeof(*entry), GFP_KERNEL); in pm_vt_switch_required() 60 if (!entry) in pm_vt_switch_required() 63 entry->required = required; in pm_vt_switch_required() 64 entry->dev = dev; in pm_vt_switch_required() 66 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required() 109 struct pm_vt_switch *entry; in pm_vt_switch() local 119 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch() 120 if (entry->required) in pm_vt_switch()
|
/kernel/sched/ |
D | wait.c | 91 curr = list_next_entry(bookmark, entry); in __wake_up_common() 93 list_del(&bookmark->entry); in __wake_up_common() 96 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common() 98 if (&curr->entry == &wq_head->head) in __wake_up_common() 101 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common() 115 (&next->entry != &wq_head->head)) { in __wake_up_common() 117 list_add_tail(&bookmark->entry, &next->entry); in __wake_up_common() 135 INIT_LIST_HEAD(&bookmark.entry); in __wake_up_common_lock() 279 if (list_empty(&wq_entry->entry)) in prepare_to_wait() 295 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_exclusive() [all …]
|
/kernel/futex/ |
D | core.c | 766 static inline int fetch_robust_entry(struct robust_list __user **entry, in fetch_robust_entry() argument 775 *entry = (void __user *)(uentry & ~1UL); in fetch_robust_entry() 790 struct robust_list __user *entry, *next_entry, *pending; in exit_robust_list() local 800 if (fetch_robust_entry(&entry, &head->list.next, &pi)) in exit_robust_list() 815 while (entry != &head->list) { in exit_robust_list() 820 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); in exit_robust_list() 825 if (entry != pending) { in exit_robust_list() 826 if (handle_futex_death((void __user *)entry + futex_offset, in exit_robust_list() 832 entry = next_entry; in exit_robust_list() 850 static void __user *futex_uaddr(struct robust_list __user *entry, in futex_uaddr() argument [all …]
|
/kernel/events/ |
D | callchain.c | 38 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, in perf_callchain_kernel() argument 43 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry, in perf_callchain_user() argument 183 struct perf_callchain_entry *entry; in get_perf_callchain() local 187 entry = get_callchain_entry(&rctx); in get_perf_callchain() 188 if (!entry) in get_perf_callchain() 191 ctx.entry = entry; in get_perf_callchain() 193 ctx.nr = entry->nr = init_nr; in get_perf_callchain() 225 return entry; in get_perf_callchain()
|
/kernel/locking/ |
D | lockdep.c | 1020 list_for_each_entry(e, h, entry) { in class_lock_list_valid() 1107 hlist_for_each_entry_rcu(chain, head, entry) { in __check_data_structures() 1119 if (!in_any_class_list(&e->entry)) { in __check_data_structures() 1134 if (in_any_class_list(&e->entry)) { in __check_data_structures() 1413 struct lock_list *entry; in add_lock_to_list() local 1418 entry = alloc_list_entry(); in add_lock_to_list() 1419 if (!entry) in add_lock_to_list() 1422 entry->class = this; in add_lock_to_list() 1423 entry->links_to = links_to; in add_lock_to_list() 1424 entry->dep = dep; in add_lock_to_list() [all …]
|