/kernel/dma/ |
D | debug.c | 166 static inline void dump_entry_trace(struct dma_debug_entry *entry) in dump_entry_trace() argument 169 if (entry) { in dump_entry_trace() 171 stack_trace_print(entry->stack_entries, entry->stack_len, 0); in dump_entry_trace() 217 #define err_printk(dev, entry, format, arg...) do { \ argument 224 dump_entry_trace(entry); \ 236 static int hash_fn(struct dma_debug_entry *entry) in hash_fn() argument 242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; in hash_fn() 248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, in get_hash_bucket() argument 252 int idx = hash_fn(entry); in get_hash_bucket() 296 struct dma_debug_entry *entry, *ret = NULL; in __hash_bucket_find() local [all …]
|
/kernel/ |
D | auditfilter.c | 108 struct audit_entry *entry; in audit_init_entry() local 111 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in audit_init_entry() 112 if (unlikely(!entry)) in audit_init_entry() 117 kfree(entry); in audit_init_entry() 120 entry->rule.fields = fields; in audit_init_entry() 122 return entry; in audit_init_entry() 211 static int audit_match_signal(struct audit_entry *entry) in audit_match_signal() argument 213 struct audit_field *arch = entry->rule.arch_f; in audit_match_signal() 219 entry->rule.mask) && in audit_match_signal() 221 entry->rule.mask)); in audit_match_signal() [all …]
|
D | async.c | 117 struct async_entry *entry = in async_run_entry_fn() local 123 pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie, in async_run_entry_fn() 124 entry->func, task_pid_nr(current)); in async_run_entry_fn() 127 entry->func(entry->data, entry->cookie); in async_run_entry_fn() 130 (long long)entry->cookie, entry->func, in async_run_entry_fn() 135 list_del_init(&entry->domain_list); in async_run_entry_fn() 136 list_del_init(&entry->global_list); in async_run_entry_fn() 139 kfree(entry); in async_run_entry_fn() 151 struct async_entry *entry) in __async_schedule_node_domain() argument 156 INIT_LIST_HEAD(&entry->domain_list); in __async_schedule_node_domain() [all …]
|
D | jump_label.c | 309 static int addr_conflict(struct jump_entry *entry, void *start, void *end) in addr_conflict() argument 311 if (jump_entry_code(entry) <= (unsigned long)end && in addr_conflict() 312 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) in addr_conflict() 336 static void arch_jump_label_transform_static(struct jump_entry *entry, in arch_jump_label_transform_static() argument 389 static enum jump_label_type jump_label_type(struct jump_entry *entry) in jump_label_type() argument 391 struct static_key *key = jump_entry_key(entry); in jump_label_type() 393 bool branch = jump_entry_is_branch(entry); in jump_label_type() 399 static bool jump_label_can_update(struct jump_entry *entry, bool init) in jump_label_can_update() argument 404 if (!init && jump_entry_is_init(entry)) in jump_label_can_update() 407 if (!kernel_text_address(jump_entry_code(entry))) { in jump_label_can_update() [all …]
|
D | kexec_core.c | 265 image->entry = &image->head; in do_kimage_alloc_init() 535 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) in kimage_add_entry() argument 537 if (*image->entry != 0) in kimage_add_entry() 538 image->entry++; in kimage_add_entry() 540 if (image->entry == image->last_entry) { in kimage_add_entry() 549 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; in kimage_add_entry() 550 image->entry = ind_page; in kimage_add_entry() 554 *image->entry = entry; in kimage_add_entry() 555 image->entry++; in kimage_add_entry() 556 *image->entry = 0; in kimage_add_entry() [all …]
|
D | kexec.c | 22 static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, in kimage_alloc_init() argument 33 if ((entry < phys_to_boot_phys(crashk_res.start)) || in kimage_alloc_init() 34 (entry > phys_to_boot_phys(crashk_res.end))) in kimage_alloc_init() 43 image->start = entry; in kimage_alloc_init() 87 static int do_kexec_load(unsigned long entry, unsigned long nr_segments, in do_kexec_load() argument 125 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); in do_kexec_load() 228 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, in SYSCALL_DEFINE4() argument 247 result = do_kexec_load(entry, nr_segments, ksegments, flags); in SYSCALL_DEFINE4() 254 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, in COMPAT_SYSCALL_DEFINE4() argument 289 result = do_kexec_load(entry, nr_segments, ksegments, flags); in COMPAT_SYSCALL_DEFINE4()
|
/kernel/trace/ |
D | trace_events_inject.c | 21 void *entry; in trace_inject_entry() local 24 entry = trace_event_buffer_reserve(&fbuffer, file, len); in trace_inject_entry() 25 if (entry) { in trace_inject_entry() 26 memcpy(entry, rec, len); in trace_inject_entry() 158 void *entry = NULL; in trace_alloc_entry() local 161 entry = kzalloc(entry_size + 1, GFP_KERNEL); in trace_alloc_entry() 162 if (!entry) in trace_alloc_entry() 179 str_item = (u32 *)(entry + field->offset); in trace_alloc_entry() 184 paddr = (char **)(entry + field->offset); in trace_alloc_entry() 190 return entry; in trace_alloc_entry() [all …]
|
D | trace_syscalls.c | 26 struct syscall_metadata *entry = call->data; in syscall_get_enter_fields() local 28 return &entry->enter_fields; in syscall_get_enter_fields() 117 struct syscall_metadata *entry; in get_syscall_name() local 119 entry = syscall_nr_to_meta(syscall); in get_syscall_name() 120 if (!entry) in get_syscall_name() 123 return entry->name; in get_syscall_name() 134 struct syscall_metadata *entry; in print_syscall_enter() local 139 entry = syscall_nr_to_meta(syscall); in print_syscall_enter() 141 if (!entry) in print_syscall_enter() 144 if (entry->enter_event->event.type != ent->type) { in print_syscall_enter() [all …]
|
D | trace_output.c | 29 struct trace_entry *entry = iter->ent; in trace_print_bputs_msg_only() local 32 trace_assign_type(field, entry); in trace_print_bputs_msg_only() 42 struct trace_entry *entry = iter->ent; in trace_print_bprintk_msg_only() local 45 trace_assign_type(field, entry); in trace_print_bprintk_msg_only() 55 struct trace_entry *entry = iter->ent; in trace_print_printk_msg_only() local 58 trace_assign_type(field, entry); in trace_print_printk_msg_only() 299 struct trace_entry *entry; in trace_raw_output_prep() local 302 entry = iter->ent; in trace_raw_output_prep() 304 if (entry->type != event->event.type) { in trace_raw_output_prep() 441 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) in trace_print_lat_fmt() argument [all …]
|
D | trace_mmiotrace.c | 167 struct trace_entry *entry = iter->ent; in mmio_print_rw() local 175 trace_assign_type(field, entry); in mmio_print_rw() 212 struct trace_entry *entry = iter->ent; in mmio_print_map() local 220 trace_assign_type(field, entry); in mmio_print_map() 246 struct trace_entry *entry = iter->ent; in mmio_print_mark() local 247 struct print_entry *print = (struct print_entry *)entry; in mmio_print_mark() 300 struct trace_mmiotrace_rw *entry; in __trace_mmiotrace_rw() local 305 sizeof(*entry), trace_ctx); in __trace_mmiotrace_rw() 310 entry = ring_buffer_event_data(event); in __trace_mmiotrace_rw() 311 entry->rw = *rw; in __trace_mmiotrace_rw() [all …]
|
D | trace_sched_wakeup.c | 382 struct ctx_switch_entry *entry; in tracing_sched_switch_trace() local 385 sizeof(*entry), trace_ctx); in tracing_sched_switch_trace() 388 entry = ring_buffer_event_data(event); in tracing_sched_switch_trace() 389 entry->prev_pid = prev->pid; in tracing_sched_switch_trace() 390 entry->prev_prio = prev->prio; in tracing_sched_switch_trace() 391 entry->prev_state = task_state_index(prev); in tracing_sched_switch_trace() 392 entry->next_pid = next->pid; in tracing_sched_switch_trace() 393 entry->next_prio = next->prio; in tracing_sched_switch_trace() 394 entry->next_state = task_state_index(next); in tracing_sched_switch_trace() 395 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() [all …]
|
D | trace_events_synth.c | 351 struct synth_trace_event *entry; in print_synth_event() local 357 entry = (struct synth_trace_event *)iter->ent; in print_synth_event() 380 offset = (u32)entry->fields[n_u64]; in print_synth_event() 383 str_field = (char *)entry + data_offset; in print_synth_event() 393 (char *)&entry->fields[n_u64], in print_synth_event() 401 offset = (u32)entry->fields[n_u64]; in print_synth_event() 405 p = (void *)entry + data_offset; in print_synth_event() 422 entry->fields[n_u64], in print_synth_event() 428 entry->fields[n_u64], in print_synth_event() 445 static unsigned int trace_string(struct synth_trace_event *entry, in trace_string() argument [all …]
|
D | ftrace.c | 1111 struct ftrace_func_entry *entry; in __ftrace_lookup_ip() local 1117 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { in __ftrace_lookup_ip() 1118 if (entry->ip == ip) in __ftrace_lookup_ip() 1119 return entry; in __ftrace_lookup_ip() 1144 struct ftrace_func_entry *entry) in __add_hash_entry() argument 1149 key = ftrace_hash_key(hash, entry->ip); in __add_hash_entry() 1151 hlist_add_head(&entry->hlist, hhd); in __add_hash_entry() 1157 struct ftrace_func_entry *entry; in add_hash_entry() local 1159 entry = kmalloc(sizeof(*entry), GFP_KERNEL); in add_hash_entry() 1160 if (!entry) in add_hash_entry() [all …]
|
D | ring_buffer_benchmark.c | 88 int *entry; in read_event() local 95 entry = ring_buffer_event_data(event); in read_event() 96 if (*entry != cpu) { in read_event() 111 int *entry; in read_page() local 145 entry = ring_buffer_event_data(event); in read_page() 146 if (*entry != cpu) { in read_page() 158 entry = ring_buffer_event_data(event); in read_page() 159 if (*entry != cpu) { in read_page() 250 int *entry; in ring_buffer_producer() local 259 entry = ring_buffer_event_data(event); in ring_buffer_producer() [all …]
|
D | trace_branch.c | 38 struct trace_branch *entry; in probe_likely_condition() local 65 sizeof(*entry), trace_ctx); in probe_likely_condition() 69 entry = ring_buffer_event_data(event); in probe_likely_condition() 77 strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); in probe_likely_condition() 78 strncpy(entry->file, p, TRACE_FILE_SIZE); in probe_likely_condition() 79 entry->func[TRACE_FUNC_SIZE] = 0; in probe_likely_condition() 80 entry->file[TRACE_FILE_SIZE] = 0; in probe_likely_condition() 81 entry->constant = f->constant; in probe_likely_condition() 82 entry->line = f->data.line; in probe_likely_condition() 83 entry->correct = val == expect; in probe_likely_condition() [all …]
|
D | trace_kprobe.c | 1390 struct kprobe_trace_entry_head *entry; in NOKPROBE_SYMBOL() local 1402 entry = trace_event_buffer_reserve(&fbuffer, trace_file, in NOKPROBE_SYMBOL() 1403 sizeof(*entry) + tk->tp.size + dsize); in NOKPROBE_SYMBOL() 1404 if (!entry) in NOKPROBE_SYMBOL() 1408 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); in NOKPROBE_SYMBOL() 1409 entry->ip = (unsigned long)tk->rp.kp.addr; in NOKPROBE_SYMBOL() 1410 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); in NOKPROBE_SYMBOL() 1431 struct kretprobe_trace_entry_head *entry; in __kretprobe_trace_func() local 1443 entry = trace_event_buffer_reserve(&fbuffer, trace_file, in __kretprobe_trace_func() 1444 sizeof(*entry) + tk->tp.size + dsize); in __kretprobe_trace_func() [all …]
|
D | tracing_map.c | 520 struct tracing_map_entry *entry; in __tracing_map_insert() local 530 entry = TRACING_MAP_ENTRY(map->map, idx); in __tracing_map_insert() 531 test_key = entry->key; in __tracing_map_insert() 534 val = READ_ONCE(entry->val); in __tracing_map_insert() 566 if (!cmpxchg(&entry->key, 0, key_hash)) { in __tracing_map_insert() 572 entry->key = 0; in __tracing_map_insert() 582 WRITE_ONCE(entry->val, elt); in __tracing_map_insert() 585 return entry->val; in __tracing_map_insert() 922 static void destroy_sort_entry(struct tracing_map_sort_entry *entry) in destroy_sort_entry() argument 924 if (!entry) in destroy_sort_entry() [all …]
|
D | trace_osnoise.c | 474 struct osnoise_entry *entry; in __trace_osnoise_sample() local 476 event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry), in __trace_osnoise_sample() 480 entry = ring_buffer_event_data(event); in __trace_osnoise_sample() 481 entry->runtime = sample->runtime; in __trace_osnoise_sample() 482 entry->noise = sample->noise; in __trace_osnoise_sample() 483 entry->max_sample = sample->max_sample; in __trace_osnoise_sample() 484 entry->hw_count = sample->hw_count; in __trace_osnoise_sample() 485 entry->nmi_count = sample->nmi_count; in __trace_osnoise_sample() 486 entry->irq_count = sample->irq_count; in __trace_osnoise_sample() 487 entry->softirq_count = sample->softirq_count; in __trace_osnoise_sample() [all …]
|
/kernel/printk/ |
D | index.c | 46 struct pi_entry *entry = pi_get_entry(mod, *pos); in pi_next() local 50 return entry; in pi_next() 75 const struct pi_entry *entry = v; in pi_show() local 85 if (!entry->fmt) in pi_show() 88 if (entry->level) in pi_show() 89 printk_parse_prefix(entry->level, &level, &flags); in pi_show() 91 prefix_len = printk_parse_prefix(entry->fmt, &level, &flags); in pi_show() 107 seq_printf(s, " %s:%d %s \"", entry->file, entry->line, entry->func); in pi_show() 108 if (entry->subsys_fmt_prefix) in pi_show() 109 seq_escape_printf_format(s, entry->subsys_fmt_prefix); in pi_show() [all …]
|
/kernel/bpf/ |
D | dispatcher.c | 46 struct bpf_dispatcher_prog *entry; in bpf_dispatcher_add_prog() local 51 entry = bpf_dispatcher_find_prog(d, prog); in bpf_dispatcher_add_prog() 52 if (entry) { in bpf_dispatcher_add_prog() 53 refcount_inc(&entry->users); in bpf_dispatcher_add_prog() 57 entry = bpf_dispatcher_find_free(d); in bpf_dispatcher_add_prog() 58 if (!entry) in bpf_dispatcher_add_prog() 62 entry->prog = prog; in bpf_dispatcher_add_prog() 63 refcount_set(&entry->users, 1); in bpf_dispatcher_add_prog() 71 struct bpf_dispatcher_prog *entry; in bpf_dispatcher_remove_prog() local 76 entry = bpf_dispatcher_find_prog(d, prog); in bpf_dispatcher_remove_prog() [all …]
|
/kernel/power/ |
D | console.c | 48 struct pm_vt_switch *entry, *tmp; in pm_vt_switch_required() local 59 entry = kmalloc(sizeof(*entry), GFP_KERNEL); in pm_vt_switch_required() 60 if (!entry) in pm_vt_switch_required() 63 entry->required = required; in pm_vt_switch_required() 64 entry->dev = dev; in pm_vt_switch_required() 66 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required() 109 struct pm_vt_switch *entry; in pm_vt_switch() local 119 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch() 120 if (entry->required) in pm_vt_switch()
|
/kernel/sched/ |
D | wait.c | 91 curr = list_next_entry(bookmark, entry); in __wake_up_common() 93 list_del(&bookmark->entry); in __wake_up_common() 96 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common() 98 if (&curr->entry == &wq_head->head) in __wake_up_common() 101 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common() 115 (&next->entry != &wq_head->head)) { in __wake_up_common() 117 list_add_tail(&bookmark->entry, &next->entry); in __wake_up_common() 135 INIT_LIST_HEAD(&bookmark.entry); in __wake_up_common_lock() 274 if (list_empty(&wq_entry->entry)) in prepare_to_wait() 290 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_exclusive() [all …]
|
/kernel/futex/ |
D | core.c | 766 static inline int fetch_robust_entry(struct robust_list __user **entry, in fetch_robust_entry() argument 775 *entry = (void __user *)(uentry & ~1UL); in fetch_robust_entry() 790 struct robust_list __user *entry, *next_entry, *pending; in exit_robust_list() local 800 if (fetch_robust_entry(&entry, &head->list.next, &pi)) in exit_robust_list() 815 while (entry != &head->list) { in exit_robust_list() 820 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); in exit_robust_list() 825 if (entry != pending) { in exit_robust_list() 826 if (handle_futex_death((void __user *)entry + futex_offset, in exit_robust_list() 832 entry = next_entry; in exit_robust_list() 850 static void __user *futex_uaddr(struct robust_list __user *entry, in futex_uaddr() argument [all …]
|
/kernel/events/ |
D | callchain.c | 38 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, in perf_callchain_kernel() argument 43 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry, in perf_callchain_user() argument 183 struct perf_callchain_entry *entry; in get_perf_callchain() local 187 entry = get_callchain_entry(&rctx); in get_perf_callchain() 188 if (!entry) in get_perf_callchain() 191 ctx.entry = entry; in get_perf_callchain() 193 ctx.nr = entry->nr = init_nr; in get_perf_callchain() 225 return entry; in get_perf_callchain()
|
/kernel/locking/ |
D | lockdep.c | 1018 list_for_each_entry(e, h, entry) { in class_lock_list_valid() 1105 hlist_for_each_entry_rcu(chain, head, entry) { in __check_data_structures() 1117 if (!in_any_class_list(&e->entry)) { in __check_data_structures() 1132 if (in_any_class_list(&e->entry)) { in __check_data_structures() 1411 struct lock_list *entry; in add_lock_to_list() local 1416 entry = alloc_list_entry(); in add_lock_to_list() 1417 if (!entry) in add_lock_to_list() 1420 entry->class = this; in add_lock_to_list() 1421 entry->links_to = links_to; in add_lock_to_list() 1422 entry->dep = dep; in add_lock_to_list() [all …]
|