Home
last modified time | relevance | path

Searched refs:entry (Results 1 – 25 of 39) sorted by relevance

12

/kernel/
Dmarker.c205 struct marker_entry *entry = container_of(head, in free_old_closure() local
207 kfree(entry->oldptr); in free_old_closure()
210 entry->rcu_pending = 0; in free_old_closure()
213 static void debug_print_probes(struct marker_entry *entry) in debug_print_probes() argument
220 if (!entry->ptype) { in debug_print_probes()
222 entry->single.func, in debug_print_probes()
223 entry->single.probe_private); in debug_print_probes()
225 for (i = 0; entry->multi[i].func; i++) in debug_print_probes()
227 entry->multi[i].func, in debug_print_probes()
228 entry->multi[i].probe_private); in debug_print_probes()
[all …]
Dtracepoint.c90 static void debug_print_probes(struct tracepoint_entry *entry) in debug_print_probes() argument
94 if (!tracepoint_debug || !entry->funcs) in debug_print_probes()
97 for (i = 0; entry->funcs[i]; i++) in debug_print_probes()
98 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]); in debug_print_probes()
102 tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) in tracepoint_entry_add_probe() argument
109 debug_print_probes(entry); in tracepoint_entry_add_probe()
110 old = entry->funcs; in tracepoint_entry_add_probe()
125 entry->refcount = nr_probes + 1; in tracepoint_entry_add_probe()
126 entry->funcs = new; in tracepoint_entry_add_probe()
127 debug_print_probes(entry); in tracepoint_entry_add_probe()
[all …]
Dasync.c93 struct async_entry *entry; in __lowest_in_progress() local
95 entry = list_first_entry(running, in __lowest_in_progress()
97 return entry->cookie; in __lowest_in_progress()
99 entry = list_first_entry(&async_pending, in __lowest_in_progress()
101 return entry->cookie; in __lowest_in_progress()
125 struct async_entry *entry; in run_one_entry() local
133 entry = list_first_entry(&async_pending, struct async_entry, list); in run_one_entry()
136 list_move_tail(&entry->list, entry->running); in run_one_entry()
141 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, in run_one_entry()
142 entry->func, task_pid_nr(current)); in run_one_entry()
[all …]
Dauditfilter.c206 struct audit_entry *entry; in audit_init_entry() local
209 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in audit_init_entry()
210 if (unlikely(!entry)) in audit_init_entry()
215 kfree(entry); in audit_init_entry()
218 entry->rule.fields = fields; in audit_init_entry()
220 return entry; in audit_init_entry()
333 static int audit_match_signal(struct audit_entry *entry) in audit_match_signal() argument
335 struct audit_field *arch = entry->rule.arch_f; in audit_match_signal()
341 entry->rule.mask) && in audit_match_signal()
343 entry->rule.mask)); in audit_match_signal()
[all …]
Dkexec.c117 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, in do_kimage_alloc() argument
133 image->entry = &image->head; in do_kimage_alloc()
136 image->start = entry; in do_kimage_alloc()
224 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, in kimage_normal_alloc() argument
233 result = do_kimage_alloc(&image, entry, nr_segments, segments); in kimage_normal_alloc()
268 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, in kimage_crash_alloc() argument
278 if ((entry < crashk_res.start) || (entry > crashk_res.end)) { in kimage_crash_alloc()
284 result = do_kimage_alloc(&image, entry, nr_segments, segments); in kimage_crash_alloc()
543 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) in kimage_add_entry() argument
545 if (*image->entry != 0) in kimage_add_entry()
[all …]
Dfutex_compat.c21 fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, in fetch_robust_entry() argument
27 *entry = compat_ptr((*uentry) & ~1); in fetch_robust_entry()
33 static void __user *futex_uaddr(struct robust_list __user *entry, in futex_uaddr() argument
36 compat_uptr_t base = ptr_to_compat(entry); in futex_uaddr()
51 struct robust_list __user *entry, *next_entry, *pending; in compat_exit_robust_list() local
64 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) in compat_exit_robust_list()
80 while (entry != (struct robust_list __user *) &head->list) { in compat_exit_robust_list()
86 (compat_uptr_t __user *)&entry->next, &next_pi); in compat_exit_robust_list()
91 if (entry != pending) { in compat_exit_robust_list()
92 void __user *uaddr = futex_uaddr(entry, futex_offset); in compat_exit_robust_list()
[all …]
Dconfigs.c73 struct proc_dir_entry *entry; in ikconfig_init() local
76 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, in ikconfig_init()
78 if (!entry) in ikconfig_init()
81 entry->size = kernel_config_data_size; in ikconfig_init()
Dlockdep.c585 struct lock_list *entry; in print_lock_dependencies() local
595 list_for_each_entry(entry, &class->locks_after, entry) { in print_lock_dependencies()
596 if (DEBUG_LOCKS_WARN_ON(!entry->class)) in print_lock_dependencies()
599 print_lock_dependencies(entry->class, depth + 1); in print_lock_dependencies()
602 print_stack_trace(&entry->trace, 2); in print_lock_dependencies()
873 struct lock_list *entry; in add_lock_to_list() local
878 entry = alloc_list_entry(); in add_lock_to_list()
879 if (!entry) in add_lock_to_list()
882 if (!save_trace(&entry->trace)) in add_lock_to_list()
885 entry->class = this; in add_lock_to_list()
[all …]
Dposix-cpu-timers.c397 INIT_LIST_HEAD(&new_timer->it.cpu.entry); in posix_cpu_timer_create()
448 BUG_ON(!list_empty(&timer->it.cpu.entry)); in posix_cpu_timer_del()
454 list_del(&timer->it.cpu.entry); in posix_cpu_timer_del()
479 list_for_each_entry_safe(timer, next, head, entry) { in cleanup_timers()
480 list_del_init(&timer->entry); in cleanup_timers()
490 list_for_each_entry_safe(timer, next, head, entry) { in cleanup_timers()
491 list_del_init(&timer->entry); in cleanup_timers()
501 list_for_each_entry_safe(timer, next, head, entry) { in cleanup_timers()
502 list_del_init(&timer->entry); in cleanup_timers()
566 list_for_each_entry(next, head, entry) { in arm_timer()
[all …]
Dprofile.c483 struct proc_dir_entry *entry; in create_prof_cpu_mask() local
486 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); in create_prof_cpu_mask()
487 if (!entry) in create_prof_cpu_mask()
489 entry->data = prof_cpu_mask; in create_prof_cpu_mask()
490 entry->read_proc = prof_cpu_mask_read_proc; in create_prof_cpu_mask()
491 entry->write_proc = prof_cpu_mask_write_proc; in create_prof_cpu_mask()
615 struct proc_dir_entry *entry; in create_proc_profile() local
621 entry = proc_create("profile", S_IWUSR | S_IRUGO, in create_proc_profile()
623 if (!entry) in create_proc_profile()
625 entry->size = (1+prof_len) * sizeof(atomic_t); in create_proc_profile()
Dtimer.c363 list_add_tail(&timer->entry, vec); in internal_add_timer()
431 if (timer->entry.next == NULL && in timer_fixup_activate()
432 timer->entry.prev == TIMER_ENTRY_STATIC) { in timer_fixup_activate()
517 timer->entry.next = NULL; in __init_timer()
550 struct list_head *entry = &timer->entry; in detach_timer() local
554 __list_del(entry->prev, entry->next); in detach_timer()
556 entry->next = NULL; in detach_timer()
557 entry->prev = LIST_POISON2; in detach_timer()
815 list_for_each_entry_safe(timer, tmp, &tv_list, entry) { in cascade()
856 timer = list_first_entry(head, struct timer_list,entry); in __run_timers()
[all …]
Dworkqueue.c137 list_add_tail(&work->entry, head); in insert_work()
189 BUG_ON(!list_empty(&work->entry)); in queue_work_on()
242 BUG_ON(!list_empty(&work->entry)); in queue_delayed_work_on()
274 struct work_struct, entry); in run_workqueue()
450 if (!list_empty(&work->entry)) { in flush_work()
458 prev = &work->entry; in flush_work()
497 if (!list_empty(&work->entry)) { in try_to_grab_pending()
505 list_del_init(&work->entry); in try_to_grab_pending()
/kernel/time/
Dtimer_stats.c53 struct entry { struct
57 struct entry *next; argument
117 static struct entry entries[MAX_ENTRIES];
128 #define __tstat_hashfn(entry) \ argument
129 (((unsigned long)(entry)->timer ^ \
130 (unsigned long)(entry)->start_func ^ \
131 (unsigned long)(entry)->expire_func ^ \
132 (unsigned long)(entry)->pid ) & TSTAT_HASH_MASK)
134 #define tstat_hashentry(entry) (tstat_hash_table + __tstat_hashfn(entry)) argument
136 static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
[all …]
/kernel/power/
Dswsusp.c293 struct nvs_page *entry, *next; in hibernate_nvs_register() local
298 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); in hibernate_nvs_register()
299 if (!entry) in hibernate_nvs_register()
302 list_add_tail(&entry->node, &nvs_list); in hibernate_nvs_register()
303 entry->phys_start = start; in hibernate_nvs_register()
305 entry->size = (size < nr_bytes) ? size : nr_bytes; in hibernate_nvs_register()
307 start += entry->size; in hibernate_nvs_register()
308 size -= entry->size; in hibernate_nvs_register()
313 list_for_each_entry_safe(entry, next, &nvs_list, node) { in hibernate_nvs_register()
314 list_del(&entry->node); in hibernate_nvs_register()
[all …]
/kernel/trace/
Dtrace_boot.c55 struct trace_entry *entry = iter->ent; in initcall_call_print_line() local
63 trace_assign_type(field, entry); in initcall_call_print_line()
80 struct trace_entry *entry = iter->ent; in initcall_ret_print_line() local
88 trace_assign_type(field, entry); in initcall_ret_print_line()
107 struct trace_entry *entry = iter->ent; in initcall_print_line() local
109 switch (entry->type) { in initcall_print_line()
130 struct trace_boot_call *entry; in trace_boot_call() local
143 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in trace_boot_call()
147 entry = ring_buffer_event_data(event); in trace_boot_call()
148 tracing_generic_entry_update(&entry->ent, 0, 0); in trace_boot_call()
[all …]
Dtrace.c860 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, argument
865 entry->preempt_count = pc & 0xff;
866 entry->pid = (tsk) ? tsk->pid : 0;
867 entry->tgid = (tsk) ? tsk->tgid : 0;
868 entry->flags =
885 struct ftrace_entry *entry; local
892 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
896 entry = ring_buffer_event_data(event);
897 tracing_generic_entry_update(&entry->ent, flags, pc);
898 entry->ent.type = TRACE_FN;
[all …]
Dtrace_power.c50 struct trace_entry *entry = iter->ent; in power_print_line() local
57 trace_assign_type(field, entry); in power_print_line()
62 if (entry->type == TRACE_POWER) { in power_print_line()
115 struct trace_power *entry; in trace_power_end() local
127 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in trace_power_end()
131 entry = ring_buffer_event_data(event); in trace_power_end()
132 tracing_generic_entry_update(&entry->ent, 0, 0); in trace_power_end()
133 entry->ent.type = TRACE_POWER; in trace_power_end()
134 entry->state_data = *it; in trace_power_end()
148 struct trace_power *entry; in trace_power_mark() local
[all …]
Dtrace_branch.c30 struct trace_branch *entry; in probe_likely_condition() local
50 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in probe_likely_condition()
56 entry = ring_buffer_event_data(event); in probe_likely_condition()
57 tracing_generic_entry_update(&entry->ent, flags, pc); in probe_likely_condition()
58 entry->ent.type = TRACE_BRANCH; in probe_likely_condition()
66 strncpy(entry->func, f->func, TRACE_FUNC_SIZE); in probe_likely_condition()
67 strncpy(entry->file, p, TRACE_FILE_SIZE); in probe_likely_condition()
68 entry->func[TRACE_FUNC_SIZE] = 0; in probe_likely_condition()
69 entry->file[TRACE_FILE_SIZE] = 0; in probe_likely_condition()
70 entry->line = f->line; in probe_likely_condition()
[all …]
Dtrace_mmiotrace.c171 struct trace_entry *entry = iter->ent; in mmio_print_rw() local
180 trace_assign_type(field, entry); in mmio_print_rw()
217 struct trace_entry *entry = iter->ent; in mmio_print_map() local
226 trace_assign_type(field, entry); in mmio_print_map()
253 struct trace_entry *entry = iter->ent; in mmio_print_mark() local
254 struct print_entry *print = (struct print_entry *)entry; in mmio_print_mark()
267 if (entry->flags & TRACE_FLAG_CONT) in mmio_print_mark()
310 struct trace_mmiotrace_rw *entry; in __trace_mmiotrace_rw() local
313 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), in __trace_mmiotrace_rw()
319 entry = ring_buffer_event_data(event); in __trace_mmiotrace_rw()
[all …]
Dtrace_hw_branches.c87 struct trace_entry *entry = iter->ent; in bts_trace_print_line() local
91 trace_assign_type(it, entry); in bts_trace_print_line()
93 if (entry->type == TRACE_HW_BRANCHES) { in bts_trace_print_line()
94 if (trace_seq_printf(seq, "%4d ", entry->cpu) && in bts_trace_print_line()
109 struct hw_branch_entry *entry; in trace_hw_branch() local
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); in trace_hw_branch()
115 entry = ring_buffer_event_data(event); in trace_hw_branch()
116 tracing_generic_entry_update(&entry->ent, 0, from); in trace_hw_branch()
117 entry->ent.type = TRACE_HW_BRANCHES; in trace_hw_branch()
118 entry->ent.cpu = smp_processor_id(); in trace_hw_branch()
[all …]
Dtrace_functions_graph.c342 struct ftrace_graph_ent_entry *entry, struct trace_seq *s) in print_graph_entry_leaf() argument
355 call = &entry->graph_ent; in print_graph_entry_leaf()
389 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, in print_graph_entry_nested() argument
394 struct ftrace_graph_ent *call = &entry->graph_ent; in print_graph_entry_nested()
607 struct trace_entry *entry = iter->ent; in print_graph_function() local
609 switch (entry->type) { in print_graph_function()
612 trace_assign_type(field, entry); in print_graph_function()
618 trace_assign_type(field, entry); in print_graph_function()
619 return print_graph_return(&field->ret, s, entry, iter->cpu); in print_graph_function()
623 trace_assign_type(field, entry); in print_graph_function()
[all …]
Dtrace_selftest.c6 static inline int trace_valid_entry(struct trace_entry *entry) in trace_valid_entry() argument
8 switch (entry->type) { in trace_valid_entry()
25 struct trace_entry *entry; in trace_test_buffer_cpu() local
29 entry = ring_buffer_event_data(event); in trace_test_buffer_cpu()
40 if (!trace_valid_entry(entry)) { in trace_test_buffer_cpu()
42 entry->type); in trace_test_buffer_cpu()
Dtrace_stack.c340 struct dentry *entry; in stack_trace_init() local
344 entry = debugfs_create_file("stack_max_size", 0644, d_tracer, in stack_trace_init()
346 if (!entry) in stack_trace_init()
349 entry = debugfs_create_file("stack_trace", 0444, d_tracer, in stack_trace_init()
351 if (!entry) in stack_trace_init()
Dftrace.c1562 struct dentry *entry; in ftrace_init_dyn_debugfs() local
1564 entry = debugfs_create_file("available_filter_functions", 0444, in ftrace_init_dyn_debugfs()
1566 if (!entry) in ftrace_init_dyn_debugfs()
1570 entry = debugfs_create_file("failures", 0444, in ftrace_init_dyn_debugfs()
1572 if (!entry) in ftrace_init_dyn_debugfs()
1575 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer, in ftrace_init_dyn_debugfs()
1577 if (!entry) in ftrace_init_dyn_debugfs()
1581 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer, in ftrace_init_dyn_debugfs()
1583 if (!entry) in ftrace_init_dyn_debugfs()
1588 entry = debugfs_create_file("set_graph_function", 0444, d_tracer, in ftrace_init_dyn_debugfs()
[all …]
/kernel/irq/
Dproc.c207 struct proc_dir_entry *entry; in register_irq_proc() local
224 entry = create_proc_entry("spurious", 0444, desc->dir); in register_irq_proc()
225 if (entry) { in register_irq_proc()
226 entry->data = (void *)(long)irq; in register_irq_proc()
227 entry->read_proc = irq_spurious_read; in register_irq_proc()

12