Home
last modified time | relevance | path

Searched refs:head (Results 1 – 25 of 49) sorted by relevance

12

/kernel/bpf/
Dpercpu_freelist.c18 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() local
20 raw_spin_lock_init(&head->lock); in pcpu_freelist_init()
21 head->first = NULL; in pcpu_freelist_init()
31 static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, in __pcpu_freelist_push() argument
34 raw_spin_lock(&head->lock); in __pcpu_freelist_push()
35 node->next = head->first; in __pcpu_freelist_push()
36 head->first = node; in __pcpu_freelist_push()
37 raw_spin_unlock(&head->lock); in __pcpu_freelist_push()
43 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); in pcpu_freelist_push() local
45 __pcpu_freelist_push(head, node); in pcpu_freelist_push()
[all …]
Dhashtab.c21 struct hlist_head head; member
238 INIT_HLIST_HEAD(&htab->buckets[i].head); in htab_map_alloc()
277 return &__select_bucket(htab, hash)->head; in select_bucket()
280 static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, in lookup_elem_raw() argument
285 hlist_for_each_entry_rcu(l, head, hash_node) in lookup_elem_raw()
296 struct hlist_head *head; in __htab_map_lookup_elem() local
307 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
309 l = lookup_elem_raw(head, hash, key, key_size); in __htab_map_lookup_elem()
328 struct hlist_head *head; in htab_map_get_next_key() local
339 head = select_bucket(htab, hash); in htab_map_get_next_key()
[all …]
/kernel/rcu/
Dsrcu.c46 b->head = NULL; in rcu_batch_init()
47 b->tail = &b->head; in rcu_batch_init()
53 static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) in rcu_batch_queue() argument
55 *b->tail = head; in rcu_batch_queue()
56 b->tail = &head->next; in rcu_batch_queue()
64 return b->tail == &b->head; in rcu_batch_empty()
73 struct rcu_head *head; in rcu_batch_dequeue() local
78 head = b->head; in rcu_batch_dequeue()
79 b->head = head->next; in rcu_batch_dequeue()
80 if (b->tail == &head->next) in rcu_batch_dequeue()
[all …]
Drcu.h71 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
75 r1 = debug_object_activate(head, &rcuhead_debug_descr); in debug_rcu_head_queue()
76 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_queue()
82 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
84 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_unqueue()
87 debug_object_deactivate(head, &rcuhead_debug_descr); in debug_rcu_head_unqueue()
90 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
95 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
106 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) in __rcu_reclaim() argument
108 unsigned long offset = (unsigned long)head->func; in __rcu_reclaim()
[all …]
Dupdate.c336 void wakeme_after_rcu(struct rcu_head *head) in wakeme_after_rcu() argument
340 rcu = container_of(head, struct rcu_synchronize, head); in wakeme_after_rcu()
358 init_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
360 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); in __wait_rcu_gp()
370 destroy_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
376 void init_rcu_head(struct rcu_head *head) in init_rcu_head() argument
378 debug_object_init(head, &rcuhead_debug_descr); in init_rcu_head()
381 void destroy_rcu_head(struct rcu_head *head) in destroy_rcu_head() argument
383 debug_object_free(head, &rcuhead_debug_descr); in destroy_rcu_head()
401 void init_rcu_head_on_stack(struct rcu_head *head) in init_rcu_head_on_stack() argument
[all …]
Dtiny.c46 static void __call_rcu(struct rcu_head *head,
201 static void __call_rcu(struct rcu_head *head, in __call_rcu() argument
207 debug_rcu_head_queue(head); in __call_rcu()
208 head->func = func; in __call_rcu()
209 head->next = NULL; in __call_rcu()
212 *rcp->curtail = head; in __call_rcu()
213 rcp->curtail = &head->next; in __call_rcu()
228 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) in call_rcu_sched() argument
230 __call_rcu(head, func, &rcu_sched_ctrlblk); in call_rcu_sched()
238 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) in call_rcu_bh() argument
[all …]
Dtree.c3069 struct rcu_head *head, unsigned long flags) in __call_rcu_core() argument
3109 *rdp->nxttail[RCU_DONE_TAIL] != head) in __call_rcu_core()
3131 __call_rcu(struct rcu_head *head, rcu_callback_t func, in __call_rcu() argument
3137 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */ in __call_rcu()
3138 if (debug_rcu_head_queue(head)) { in __call_rcu()
3140 WRITE_ONCE(head->func, rcu_leak_callback); in __call_rcu()
3144 head->func = func; in __call_rcu()
3145 head->next = NULL; in __call_rcu()
3164 offline = !__call_rcu_nocb(rdp, head, lazy, flags); in __call_rcu()
3185 *rdp->nxttail[RCU_NEXT_TAIL] = head; in __call_rcu()
[all …]
/kernel/
Dfutex_compat.c24 compat_uptr_t __user *head, unsigned int *pi) in fetch_robust_entry() argument
26 if (get_user(*uentry, head)) in fetch_robust_entry()
52 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list() local
67 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) in compat_exit_robust_list()
72 if (get_user(futex_offset, &head->futex_offset)) in compat_exit_robust_list()
79 &head->list_op_pending, &pip)) in compat_exit_robust_list()
83 while (entry != (struct robust_list __user *) &head->list) { in compat_exit_robust_list()
121 struct compat_robust_list_head __user *, head, in COMPAT_SYSCALL_DEFINE2() argument
127 if (unlikely(len != sizeof(*head))) in COMPAT_SYSCALL_DEFINE2()
130 current->compat_robust_list = head; in COMPAT_SYSCALL_DEFINE2()
[all …]
Dnotifier.c128 ret = notifier_chain_register(&nh->head, n); in atomic_notifier_chain_register()
150 ret = notifier_chain_unregister(&nh->head, n); in atomic_notifier_chain_unregister()
183 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); in __atomic_notifier_call_chain()
224 return notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register()
227 ret = notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register()
250 ret = notifier_chain_cond_register(&nh->head, n); in blocking_notifier_chain_cond_register()
277 return notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
280 ret = notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
315 if (rcu_access_pointer(nh->head)) { in __blocking_notifier_call_chain()
317 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, in __blocking_notifier_call_chain()
[all …]
Dtask_work.c29 struct callback_head *head; in task_work_add() local
32 head = READ_ONCE(task->task_works); in task_work_add()
33 if (unlikely(head == &work_exited)) in task_work_add()
35 work->next = head; in task_work_add()
36 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add()
92 struct callback_head *work, *head, *next; in task_work_run() local
101 head = !work && (task->flags & PF_EXITING) ? in task_work_run()
103 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
Dsoftirq.c456 struct tasklet_struct *head; member
493 t->next = __this_cpu_read(tasklet_hi_vec.head); in __tasklet_hi_schedule_first()
494 __this_cpu_write(tasklet_hi_vec.head, t); in __tasklet_hi_schedule_first()
504 list = __this_cpu_read(tasklet_vec.head); in tasklet_action()
505 __this_cpu_write(tasklet_vec.head, NULL); in tasklet_action()
506 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); in tasklet_action()
540 list = __this_cpu_read(tasklet_hi_vec.head); in tasklet_hi_action()
541 __this_cpu_write(tasklet_hi_vec.head, NULL); in tasklet_hi_action()
542 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); in tasklet_hi_action()
654 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
[all …]
Dkprobes.c306 struct hlist_head *head; in get_kprobe() local
309 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; in get_kprobe()
310 hlist_for_each_entry_rcu(p, head, hlist) { in get_kprobe()
781 struct hlist_head *head; in optimize_all_kprobes() local
792 head = &kprobe_table[i]; in optimize_all_kprobes()
793 hlist_for_each_entry_rcu(p, head, hlist) in optimize_all_kprobes()
804 struct hlist_head *head; in unoptimize_all_kprobes() local
817 head = &kprobe_table[i]; in unoptimize_all_kprobes()
818 hlist_for_each_entry_rcu(p, head, hlist) { in unoptimize_all_kprobes()
1078 struct hlist_head *head) in recycle_rp_inst() argument
[all …]
Duser-return-notifier.c38 struct hlist_head *head; in fire_user_return_notifiers() local
40 head = &get_cpu_var(return_notifier_list); in fire_user_return_notifiers()
41 hlist_for_each_entry_safe(urn, tmp2, head, link) in fire_user_return_notifiers()
Dfutex.c879 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() local
892 while (!list_empty(head)) { in exit_pi_state_list()
894 next = head->next; in exit_pi_state_list()
907 if (head->next != next) { in exit_pi_state_list()
2994 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, in SYSCALL_DEFINE2() argument
3002 if (unlikely(len != sizeof(*head))) in SYSCALL_DEFINE2()
3005 current->robust_list = head; in SYSCALL_DEFINE2()
3020 struct robust_list_head __user *head; in SYSCALL_DEFINE3() local
3042 head = p->robust_list; in SYSCALL_DEFINE3()
3045 if (put_user(sizeof(*head), len_ptr)) in SYSCALL_DEFINE3()
[all …]
Dsmp.c199 struct llist_head *head; in flush_smp_call_function_queue() local
206 head = this_cpu_ptr(&call_single_queue); in flush_smp_call_function_queue()
207 entry = llist_del_all(head); in flush_smp_call_function_queue()
212 !warned && !llist_empty(head))) { in flush_smp_call_function_queue()
/kernel/events/
Dring_buffer.c48 unsigned long head; in perf_output_put_handle() local
51 head = local_read(&rb->head); in perf_output_put_handle()
87 rb->user_page->data_head = head; in perf_output_put_handle()
93 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
106 ring_buffer_has_space(unsigned long head, unsigned long tail, in ring_buffer_has_space() argument
111 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space()
113 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space()
122 unsigned long tail, offset, head; in __perf_output_begin() local
161 offset = head = local_read(&rb->head); in __perf_output_begin()
163 if (unlikely(!ring_buffer_has_space(head, tail, in __perf_output_begin()
[all …]
Dinternal.h24 local_t head; /* write position */ member
90 void perf_event_aux_event(struct perf_event *event, unsigned long head,
/kernel/power/
Dconsole.c22 struct list_head head; member
50 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required()
65 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required()
82 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister()
84 list_del(&tmp->head); in pm_vt_switch_unregister()
118 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch()
/kernel/trace/
Dring_buffer.c496 unsigned long head; member
861 struct buffer_page *head; in rb_head_page_activate() local
863 head = cpu_buffer->head_page; in rb_head_page_activate()
864 if (!head) in rb_head_page_activate()
870 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate()
896 struct buffer_page *head, in rb_head_page_set() argument
901 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
919 struct buffer_page *head, in rb_head_page_set_update() argument
923 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
928 struct buffer_page *head, in rb_head_page_set_head() argument
[all …]
Dtrace_syscalls.c566 struct hlist_head *head; in perf_syscall_enter() local
581 head = this_cpu_ptr(sys_data->enter_event->perf_events); in perf_syscall_enter()
582 if (hlist_empty(head)) in perf_syscall_enter()
599 head, NULL); in perf_syscall_enter()
640 struct hlist_head *head; in perf_syscall_exit() local
655 head = this_cpu_ptr(sys_data->exit_event->perf_events); in perf_syscall_exit()
656 if (hlist_empty(head)) in perf_syscall_exit()
670 1, regs, head, NULL); in perf_syscall_exit()
Dtrace_events.c81 __find_event_field(struct list_head *head, char *name) in __find_event_field() argument
85 list_for_each_entry(field, head, link) { in __find_event_field()
97 struct list_head *head; in trace_find_event_field() local
99 head = trace_get_fields(call); in trace_find_event_field()
100 field = __find_event_field(head, name); in trace_find_event_field()
111 static int __trace_define_field(struct list_head *head, const char *type, in __trace_define_field() argument
133 list_add(&field->link, head); in __trace_define_field()
142 struct list_head *head; in trace_define_field() local
147 head = trace_get_fields(call); in trace_define_field()
148 return __trace_define_field(head, type, name, offset, size, in trace_define_field()
[all …]
Dtrace_event_perf.c310 struct hlist_head *head; in perf_ftrace_function_call() local
314 head = this_cpu_ptr(event_function.perf_events); in perf_ftrace_function_call()
315 if (hlist_empty(head)) in perf_ftrace_function_call()
333 1, &regs, head, NULL); in perf_ftrace_function_call()
/kernel/time/
Dtimer_stats.c169 struct entry **head, *curr, *prev; in tstat_lookup() local
171 head = tstat_hashentry(entry); in tstat_lookup()
172 curr = *head; in tstat_lookup()
189 curr = *head; in tstat_lookup()
215 *head = curr; in tstat_lookup()
Dposix-cpu-timers.c422 static void cleanup_timers_list(struct list_head *head) in cleanup_timers_list() argument
426 list_for_each_entry_safe(timer, next, head, entry) in cleanup_timers_list()
436 static void cleanup_timers(struct list_head *head) in cleanup_timers() argument
438 cleanup_timers_list(head); in cleanup_timers()
439 cleanup_timers_list(++head); in cleanup_timers()
440 cleanup_timers_list(++head); in cleanup_timers()
472 struct list_head *head, *listpos; in arm_timer() local
478 head = p->cpu_timers; in arm_timer()
481 head = p->signal->cpu_timers; in arm_timer()
484 head += CPUCLOCK_WHICH(timer->it_clock); in arm_timer()
[all …]
Dposix-timers.c155 static struct k_itimer *__posix_timers_find(struct hlist_head *head, in __posix_timers_find() argument
161 hlist_for_each_entry_rcu(timer, head, t_hash) { in __posix_timers_find()
171 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; in posix_timer_by_id() local
173 return __posix_timers_find(head, sig, id); in posix_timer_by_id()
180 struct hlist_head *head; in posix_timer_add() local
185 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; in posix_timer_add()
186 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { in posix_timer_add()
187 hlist_add_head_rcu(&timer->t_hash, head); in posix_timer_add()
566 static void k_itimer_rcu_free(struct rcu_head *head) in k_itimer_rcu_free() argument
568 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); in k_itimer_rcu_free()

12