Home
last modified time | relevance | path

Searched refs:head (Results 1 – 25 of 46) sorted by relevance

12

/kernel/rcu/
Dsrcu.c46 b->head = NULL; in rcu_batch_init()
47 b->tail = &b->head; in rcu_batch_init()
53 static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) in rcu_batch_queue() argument
55 *b->tail = head; in rcu_batch_queue()
56 b->tail = &head->next; in rcu_batch_queue()
64 return b->tail == &b->head; in rcu_batch_empty()
73 struct rcu_head *head; in rcu_batch_dequeue() local
78 head = b->head; in rcu_batch_dequeue()
79 b->head = head->next; in rcu_batch_dequeue()
80 if (b->tail == &head->next) in rcu_batch_dequeue()
[all …]
Drcu.h71 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
75 r1 = debug_object_activate(head, &rcuhead_debug_descr); in debug_rcu_head_queue()
76 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_queue()
82 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
84 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_unqueue()
87 debug_object_deactivate(head, &rcuhead_debug_descr); in debug_rcu_head_unqueue()
90 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
95 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
106 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) in __rcu_reclaim() argument
108 unsigned long offset = (unsigned long)head->func; in __rcu_reclaim()
[all …]
Dupdate.c314 void wakeme_after_rcu(struct rcu_head *head) in wakeme_after_rcu() argument
318 rcu = container_of(head, struct rcu_synchronize, head); in wakeme_after_rcu()
336 init_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
338 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); in __wait_rcu_gp()
348 destroy_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
354 void init_rcu_head(struct rcu_head *head) in init_rcu_head() argument
356 debug_object_init(head, &rcuhead_debug_descr); in init_rcu_head()
359 void destroy_rcu_head(struct rcu_head *head) in destroy_rcu_head() argument
361 debug_object_free(head, &rcuhead_debug_descr); in destroy_rcu_head()
372 struct rcu_head *head = addr; in rcuhead_fixup_activate() local
[all …]
Dtiny.c46 static void __call_rcu(struct rcu_head *head,
205 static void __call_rcu(struct rcu_head *head, in __call_rcu() argument
211 debug_rcu_head_queue(head); in __call_rcu()
212 head->func = func; in __call_rcu()
213 head->next = NULL; in __call_rcu()
216 *rcp->curtail = head; in __call_rcu()
217 rcp->curtail = &head->next; in __call_rcu()
232 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) in call_rcu_sched() argument
234 __call_rcu(head, func, &rcu_sched_ctrlblk); in call_rcu_sched()
242 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) in call_rcu_bh() argument
[all …]
Dtree.c2997 struct rcu_head *head, unsigned long flags) in __call_rcu_core() argument
3038 *rdp->nxttail[RCU_DONE_TAIL] != head) in __call_rcu_core()
3060 __call_rcu(struct rcu_head *head, rcu_callback_t func, in __call_rcu() argument
3066 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */ in __call_rcu()
3067 if (debug_rcu_head_queue(head)) { in __call_rcu()
3069 WRITE_ONCE(head->func, rcu_leak_callback); in __call_rcu()
3073 head->func = func; in __call_rcu()
3074 head->next = NULL; in __call_rcu()
3093 offline = !__call_rcu_nocb(rdp, head, lazy, flags); in __call_rcu()
3114 *rdp->nxttail[RCU_NEXT_TAIL] = head; in __call_rcu()
[all …]
/kernel/
Dnotifier.c128 ret = notifier_chain_register(&nh->head, n); in atomic_notifier_chain_register()
150 ret = notifier_chain_unregister(&nh->head, n); in atomic_notifier_chain_unregister()
183 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); in __atomic_notifier_call_chain()
224 return notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register()
227 ret = notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register()
250 ret = notifier_chain_cond_register(&nh->head, n); in blocking_notifier_chain_cond_register()
277 return notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
280 ret = notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
315 if (rcu_access_pointer(nh->head)) { in __blocking_notifier_call_chain()
317 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, in __blocking_notifier_call_chain()
[all …]
Dtask_work.c29 struct callback_head *head; in task_work_add() local
32 head = ACCESS_ONCE(task->task_works); in task_work_add()
33 if (unlikely(head == &work_exited)) in task_work_add()
35 work->next = head; in task_work_add()
36 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add()
90 struct callback_head *work, *head, *next; in task_work_run() local
99 head = !work && (task->flags & PF_EXITING) ? in task_work_run()
101 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
Dsoftirq.c442 struct tasklet_struct *head; member
479 t->next = __this_cpu_read(tasklet_hi_vec.head); in __tasklet_hi_schedule_first()
480 __this_cpu_write(tasklet_hi_vec.head, t); in __tasklet_hi_schedule_first()
490 list = __this_cpu_read(tasklet_vec.head); in tasklet_action()
491 __this_cpu_write(tasklet_vec.head, NULL); in tasklet_action()
492 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); in tasklet_action()
526 list = __this_cpu_read(tasklet_hi_vec.head); in tasklet_hi_action()
527 __this_cpu_write(tasklet_hi_vec.head, NULL); in tasklet_hi_action()
528 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); in tasklet_hi_action()
640 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
[all …]
Dkprobes.c306 struct hlist_head *head; in get_kprobe() local
309 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; in get_kprobe()
310 hlist_for_each_entry_rcu(p, head, hlist) { in get_kprobe()
791 struct hlist_head *head; in optimize_all_kprobes() local
802 head = &kprobe_table[i]; in optimize_all_kprobes()
803 hlist_for_each_entry_rcu(p, head, hlist) in optimize_all_kprobes()
814 struct hlist_head *head; in unoptimize_all_kprobes() local
827 head = &kprobe_table[i]; in unoptimize_all_kprobes()
828 hlist_for_each_entry_rcu(p, head, hlist) { in unoptimize_all_kprobes()
1093 struct hlist_head *head) in recycle_rp_inst() argument
[all …]
Dfutex.c930 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() local
943 while (!list_empty(head)) { in exit_pi_state_list()
945 next = head->next; in exit_pi_state_list()
958 if (head->next != next) { in exit_pi_state_list()
3373 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, in SYSCALL_DEFINE2() argument
3381 if (unlikely(len != sizeof(*head))) in SYSCALL_DEFINE2()
3384 current->robust_list = head; in SYSCALL_DEFINE2()
3399 struct robust_list_head __user *head; in SYSCALL_DEFINE3() local
3421 head = p->robust_list; in SYSCALL_DEFINE3()
3424 if (put_user(sizeof(*head), len_ptr)) in SYSCALL_DEFINE3()
[all …]
Duser-return-notifier.c38 struct hlist_head *head; in fire_user_return_notifiers() local
40 head = &get_cpu_var(return_notifier_list); in fire_user_return_notifiers()
41 hlist_for_each_entry_safe(urn, tmp2, head, link) in fire_user_return_notifiers()
Dsmp.c214 struct llist_head *head; in flush_smp_call_function_queue() local
221 head = this_cpu_ptr(&call_single_queue); in flush_smp_call_function_queue()
222 entry = llist_del_all(head); in flush_smp_call_function_queue()
227 !warned && !llist_empty(head))) { in flush_smp_call_function_queue()
/kernel/bpf/
Dhashtab.c128 static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, in lookup_elem_raw() argument
133 hlist_for_each_entry_rcu(l, head, hash_node) in lookup_elem_raw()
144 struct hlist_head *head; in htab_map_lookup_elem() local
155 head = select_bucket(htab, hash); in htab_map_lookup_elem()
157 l = lookup_elem_raw(head, hash, key, key_size); in htab_map_lookup_elem()
169 struct hlist_head *head; in htab_map_get_next_key() local
183 head = select_bucket(htab, hash); in htab_map_get_next_key()
186 l = lookup_elem_raw(head, hash, key, key_size); in htab_map_get_next_key()
208 head = select_bucket(htab, i); in htab_map_get_next_key()
211 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), in htab_map_get_next_key()
[all …]
/kernel/power/
Dconsole.c22 struct list_head head; member
50 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required()
65 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required()
82 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister()
84 list_del(&tmp->head); in pm_vt_switch_unregister()
118 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch()
/kernel/events/
Dring_buffer.c49 unsigned long head; in perf_output_put_handle() local
61 head = local_read(&rb->head); in perf_output_put_handle()
104 rb->user_page->data_head = head; in perf_output_put_handle()
119 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
135 unsigned long tail, offset, head; in perf_output_begin() local
171 offset = head = local_read(&rb->head); in perf_output_begin()
173 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) in perf_output_begin()
188 head += size; in perf_output_begin()
189 } while (local_cmpxchg(&rb->head, offset, head) != offset); in perf_output_begin()
196 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) in perf_output_begin()
[all …]
Dcallchain.c37 static void release_callchain_buffers_rcu(struct rcu_head *head) in release_callchain_buffers_rcu() argument
42 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu()
/kernel/trace/
Dring_buffer.c498 unsigned long head; member
863 struct buffer_page *head; in rb_head_page_activate() local
865 head = cpu_buffer->head_page; in rb_head_page_activate()
866 if (!head) in rb_head_page_activate()
872 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate()
898 struct buffer_page *head, in rb_head_page_set() argument
903 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
921 struct buffer_page *head, in rb_head_page_set_update() argument
925 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
930 struct buffer_page *head, in rb_head_page_set_head() argument
[all …]
Dtrace_syscalls.c553 struct hlist_head *head; in perf_syscall_enter() local
568 head = this_cpu_ptr(sys_data->enter_event->perf_events); in perf_syscall_enter()
569 if (hlist_empty(head)) in perf_syscall_enter()
585 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); in perf_syscall_enter()
627 struct hlist_head *head; in perf_syscall_exit() local
642 head = this_cpu_ptr(sys_data->exit_event->perf_events); in perf_syscall_exit()
643 if (hlist_empty(head)) in perf_syscall_exit()
657 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); in perf_syscall_exit()
Dtrace_event_perf.c306 struct hlist_head *head; in perf_ftrace_function_call() local
310 head = this_cpu_ptr(event_function.perf_events); in perf_ftrace_function_call()
311 if (hlist_empty(head)) in perf_ftrace_function_call()
328 1, &regs, head, NULL); in perf_ftrace_function_call()
Dtrace_events.c82 __find_event_field(struct list_head *head, char *name) in __find_event_field() argument
86 list_for_each_entry(field, head, link) { in __find_event_field()
98 struct list_head *head; in trace_find_event_field() local
100 head = trace_get_fields(call); in trace_find_event_field()
101 field = __find_event_field(head, name); in trace_find_event_field()
112 static int __trace_define_field(struct list_head *head, const char *type, in __trace_define_field() argument
134 list_add(&field->link, head); in __trace_define_field()
143 struct list_head *head; in trace_define_field() local
148 head = trace_get_fields(call); in trace_define_field()
149 return __trace_define_field(head, type, name, offset, size, in trace_define_field()
[all …]
/kernel/time/
Dtimer_stats.c169 struct entry **head, *curr, *prev; in tstat_lookup() local
171 head = tstat_hashentry(entry); in tstat_lookup()
172 curr = *head; in tstat_lookup()
189 curr = *head; in tstat_lookup()
215 *head = curr; in tstat_lookup()
Dposix-cpu-timers.c423 static void cleanup_timers_list(struct list_head *head) in cleanup_timers_list() argument
427 list_for_each_entry_safe(timer, next, head, entry) in cleanup_timers_list()
437 static void cleanup_timers(struct list_head *head) in cleanup_timers() argument
439 cleanup_timers_list(head); in cleanup_timers()
440 cleanup_timers_list(++head); in cleanup_timers()
441 cleanup_timers_list(++head); in cleanup_timers()
473 struct list_head *head, *listpos; in arm_timer() local
479 head = p->cpu_timers; in arm_timer()
482 head = p->signal->cpu_timers; in arm_timer()
485 head += CPUCLOCK_WHICH(timer->it_clock); in arm_timer()
[all …]
Dposix-timers.c155 static struct k_itimer *__posix_timers_find(struct hlist_head *head, in __posix_timers_find() argument
161 hlist_for_each_entry_rcu(timer, head, t_hash) { in __posix_timers_find()
171 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; in posix_timer_by_id() local
173 return __posix_timers_find(head, sig, id); in posix_timer_by_id()
180 struct hlist_head *head; in posix_timer_add() local
185 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; in posix_timer_add()
186 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { in posix_timer_add()
187 hlist_add_head_rcu(&timer->t_hash, head); in posix_timer_add()
575 static void k_itimer_rcu_free(struct rcu_head *head) in k_itimer_rcu_free() argument
577 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); in k_itimer_rcu_free()
Dtimer.c1221 struct hlist_head *head = &work_list; in __run_timers() local
1240 hlist_move_list(base->tv1.vec + index, head); in __run_timers()
1241 while (!hlist_empty(head)) { in __run_timers()
1246 timer = hlist_entry(head->first, struct timer_list, entry); in __run_timers()
1578 static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head) in migrate_timer_list() argument
1583 while (!hlist_empty(head)) { in migrate_timer_list()
1584 timer = hlist_entry(head->first, struct timer_list, entry); in migrate_timer_list()
/kernel/locking/
Dlockdep.c868 struct list_head *head, unsigned long ip, in add_lock_to_list() argument
888 list_add_tail_rcu(&entry->entry, head); in add_lock_to_list()
1001 struct list_head *head; in __bfs() local
1012 head = &source_entry->class->locks_after; in __bfs()
1014 head = &source_entry->class->locks_before; in __bfs()
1016 if (list_empty(head)) in __bfs()
1033 head = &lock->class->locks_after; in __bfs()
1035 head = &lock->class->locks_before; in __bfs()
1039 list_for_each_entry_rcu(entry, head, entry) { in __bfs()
3934 struct list_head *head; in lockdep_free_key_range() local
[all …]

12