Home
last modified time | relevance | path

Searched refs:head (Results 1 – 25 of 71) sorted by relevance

123

/kernel/bpf/
Dpercpu_freelist.c15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() local
17 raw_spin_lock_init(&head->lock); in pcpu_freelist_init()
18 head->first = NULL; in pcpu_freelist_init()
30 static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head, in pcpu_freelist_push_node() argument
33 node->next = head->first; in pcpu_freelist_push_node()
34 WRITE_ONCE(head->first, node); in pcpu_freelist_push_node()
37 static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, in ___pcpu_freelist_push() argument
40 raw_spin_lock(&head->lock); in ___pcpu_freelist_push()
41 pcpu_freelist_push_node(head, node); in ___pcpu_freelist_push()
42 raw_spin_unlock(&head->lock); in ___pcpu_freelist_push()
[all …]
Dhashtab.c79 struct hlist_nulls_head head; member
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
615 return &__select_bucket(htab, hash)->head; in select_bucket()
619 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, in lookup_elem_raw() argument
625 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) in lookup_elem_raw()
636 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, in lookup_nulls_elem_raw() argument
644 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) in lookup_nulls_elem_raw()
662 struct hlist_nulls_head *head; in __htab_map_lookup_elem() local
673 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
675 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
[all …]
Dqueue_stack_maps.c20 u32 head, tail; member
33 return qs->head == qs->tail; in queue_stack_map_is_empty()
38 u32 head = qs->head + 1; in queue_stack_map_is_full() local
40 if (unlikely(head >= qs->size)) in queue_stack_map_is_full()
41 head = 0; in queue_stack_map_is_full()
43 return head == qs->tail; in queue_stack_map_is_full()
153 index = qs->head - 1; in __stack_map_get()
161 qs->head = index; in __stack_map_get()
227 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem()
230 if (unlikely(++qs->head >= qs->size)) in queue_stack_map_push_elem()
[all …]
Ddevmap.c211 struct hlist_head *head; in dev_map_free() local
214 head = dev_map_index_hash(dtab, i); in dev_map_free()
216 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
270 struct hlist_head *head = dev_map_index_hash(dtab, key); in __dev_map_hash_lookup_elem() local
273 hlist_for_each_entry_rcu(dev, head, index_hlist, in __dev_map_hash_lookup_elem()
287 struct hlist_head *head; in dev_map_hash_get_next_key() local
312 head = dev_map_index_hash(dtab, i); in dev_map_hash_get_next_key()
314 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), in dev_map_hash_get_next_key()
591 struct hlist_head *head; in dev_map_enqueue_multi() local
625 head = dev_map_index_hash(dtab, i); in dev_map_enqueue_multi()
[all …]
/kernel/rcu/
Dtiny.c85 static inline bool rcu_reclaim_tiny(struct rcu_head *head) in rcu_reclaim_tiny() argument
88 unsigned long offset = (unsigned long)head->func; in rcu_reclaim_tiny()
92 trace_rcu_invoke_kvfree_callback("", head, offset); in rcu_reclaim_tiny()
93 kvfree((void *)head - offset); in rcu_reclaim_tiny()
98 trace_rcu_invoke_callback("", head); in rcu_reclaim_tiny()
99 f = head->func; in rcu_reclaim_tiny()
100 WRITE_ONCE(head->func, (rcu_callback_t)0L); in rcu_reclaim_tiny()
101 f(head); in rcu_reclaim_tiny()
170 void call_rcu(struct rcu_head *head, rcu_callback_t func) in call_rcu() argument
175 if (debug_rcu_head_queue(head)) { in call_rcu()
[all …]
Drcu_segcblist.c20 rclp->head = NULL; in rcu_cblist_init()
21 rclp->tail = &rclp->head; in rcu_cblist_init()
47 drclp->head = srclp->head; in rcu_cblist_flush_enqueue()
48 if (drclp->head) in rcu_cblist_flush_enqueue()
51 drclp->tail = &drclp->head; in rcu_cblist_flush_enqueue()
57 srclp->head = rhp; in rcu_cblist_flush_enqueue()
71 rhp = rclp->head; in rcu_cblist_dequeue()
75 rclp->head = rhp->next; in rcu_cblist_dequeue()
76 if (!rclp->head) in rcu_cblist_dequeue()
77 rclp->tail = &rclp->head; in rcu_cblist_dequeue()
[all …]
Dupdate.c471 void wakeme_after_rcu(struct rcu_head *head) in wakeme_after_rcu() argument
475 rcu = container_of(head, struct rcu_synchronize, head); in wakeme_after_rcu()
497 init_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
499 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); in __wait_rcu_gp()
513 destroy_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp()
527 void init_rcu_head(struct rcu_head *head) in init_rcu_head() argument
529 debug_object_init(head, &rcuhead_debug_descr); in init_rcu_head()
533 void destroy_rcu_head(struct rcu_head *head) in destroy_rcu_head() argument
535 debug_object_free(head, &rcuhead_debug_descr); in destroy_rcu_head()
554 void init_rcu_head_on_stack(struct rcu_head *head) in init_rcu_head_on_stack() argument
[all …]
Drcu.h185 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
189 r1 = debug_object_activate(head, &rcuhead_debug_descr); in debug_rcu_head_queue()
190 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_queue()
196 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
198 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_unqueue()
201 debug_object_deactivate(head, &rcuhead_debug_descr); in debug_rcu_head_unqueue()
204 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument
209 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument
Dtree.c2328 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), in rcu_do_batch()
2688 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core() argument
2722 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core()
2780 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) in __call_rcu_common() argument
2789 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); in __call_rcu_common()
2791 if (debug_rcu_head_queue(head)) { in __call_rcu_common()
2798 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in __call_rcu_common()
2799 mem_dump_obj(head); in __call_rcu_common()
2801 WRITE_ONCE(head->func, rcu_leak_callback); in __call_rcu_common()
2804 head->func = func; in __call_rcu_common()
[all …]
/kernel/gcov/
Dclang.c58 struct list_head head; member
68 struct list_head head; member
89 INIT_LIST_HEAD(&info->head); in llvm_gcov_init()
94 list_add_tail(&info->head, &clang_gcov_list); in llvm_gcov_init()
120 INIT_LIST_HEAD(&info->head); in llvm_gcda_emit_function()
124 list_add_tail(&info->head, &current_info->functions); in llvm_gcda_emit_function()
131 struct gcov_fn_info, head); in llvm_gcda_emit_arcs()
177 struct gcov_info, head); in gcov_info_next()
178 if (list_is_last(&info->head, &clang_gcov_list)) in gcov_info_next()
180 return list_next_entry(info, head); in gcov_info_next()
[all …]
/kernel/
Dnotifier.c150 ret = notifier_chain_register(&nh->head, n, false); in atomic_notifier_chain_register()
173 ret = notifier_chain_register(&nh->head, n, true); in atomic_notifier_chain_register_unique_prio()
195 ret = notifier_chain_unregister(&nh->head, n); in atomic_notifier_chain_unregister()
225 ret = notifier_call_chain(&nh->head, val, v, -1, NULL); in atomic_notifier_call_chain()
243 return !rcu_access_pointer(nh->head); in atomic_notifier_call_chain_is_empty()
263 return notifier_chain_register(&nh->head, n, unique_priority); in __blocking_notifier_chain_register()
266 ret = notifier_chain_register(&nh->head, n, unique_priority); in __blocking_notifier_chain_register()
326 return notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
329 ret = notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister()
345 if (rcu_access_pointer(nh->head)) { in blocking_notifier_call_chain_robust()
[all …]
Dtask_work.c45 struct callback_head *head; in task_work_add() local
50 head = READ_ONCE(task->task_works); in task_work_add()
52 if (unlikely(head == &work_exited)) in task_work_add()
54 work->next = head; in task_work_add()
55 } while (!try_cmpxchg(&task->task_works, &head, work)); in task_work_add()
149 struct callback_head *work, *head, *next; in task_work_run() local
158 head = NULL; in task_work_run()
161 head = &work_exited; in task_work_run()
165 } while (!try_cmpxchg(&task->task_works, &work, head)); in task_work_run()
Dsoftirq.c776 struct tasklet_struct *head; member
787 struct tasklet_head *head; in __tasklet_schedule_common() local
791 head = this_cpu_ptr(headp); in __tasklet_schedule_common()
793 *head->tail = t; in __tasklet_schedule_common()
794 head->tail = &(t->next); in __tasklet_schedule_common()
834 list = tl_head->head; in tasklet_action_common()
835 tl_head->head = NULL; in tasklet_action_common()
836 tl_head->tail = &tl_head->head; in tasklet_action_common()
968 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
970 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
[all …]
Duser-return-notifier.c39 struct hlist_head *head; in fire_user_return_notifiers() local
41 head = &get_cpu_var(return_notifier_list); in fire_user_return_notifiers()
42 hlist_for_each_entry_safe(urn, tmp2, head, link) in fire_user_return_notifiers()
Dkprobes.c378 struct hlist_head *head; in get_kprobe() local
381 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; in get_kprobe()
382 hlist_for_each_entry_rcu(p, head, hlist, in get_kprobe()
886 struct hlist_head *head; in optimize_all_kprobes() local
898 head = &kprobe_table[i]; in optimize_all_kprobes()
899 hlist_for_each_entry(p, head, hlist) in optimize_all_kprobes()
912 struct hlist_head *head; in unoptimize_all_kprobes() local
926 head = &kprobe_table[i]; in unoptimize_all_kprobes()
927 hlist_for_each_entry(p, head, hlist) { in unoptimize_all_kprobes()
1880 static void free_rp_inst_rcu(struct rcu_head *head) in free_rp_inst_rcu() argument
[all …]
/kernel/futex/
Dsyscalls.c30 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, in SYSCALL_DEFINE2() argument
36 if (unlikely(len != sizeof(*head))) in SYSCALL_DEFINE2()
39 current->robust_list = head; in SYSCALL_DEFINE2()
54 struct robust_list_head __user *head; in SYSCALL_DEFINE3() local
73 head = p->robust_list; in SYSCALL_DEFINE3()
76 if (put_user(sizeof(*head), len_ptr)) in SYSCALL_DEFINE3()
78 return put_user(head, head_ptr); in SYSCALL_DEFINE3()
312 struct compat_robust_list_head __user *, head, in COMPAT_SYSCALL_DEFINE2() argument
315 if (unlikely(len != sizeof(*head))) in COMPAT_SYSCALL_DEFINE2()
318 current->compat_robust_list = head; in COMPAT_SYSCALL_DEFINE2()
[all …]
Dcore.c767 struct robust_list __user * __user *head, in fetch_robust_entry() argument
772 if (get_user(uentry, (unsigned long __user *)head)) in fetch_robust_entry()
789 struct robust_list_head __user *head = curr->robust_list; in exit_robust_list() local
800 if (fetch_robust_entry(&entry, &head->list.next, &pi)) in exit_robust_list()
805 if (get_user(futex_offset, &head->futex_offset)) in exit_robust_list()
811 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) in exit_robust_list()
815 while (entry != &head->list) { in exit_robust_list()
864 compat_uptr_t __user *head, unsigned int *pi) in compat_fetch_robust_entry() argument
866 if (get_user(*uentry, head)) in compat_fetch_robust_entry()
883 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list() local
[all …]
/kernel/events/
Dring_buffer.c53 unsigned long head; in perf_output_put_handle() local
76 head = local_read(&rb->head); in perf_output_put_handle()
110 WRITE_ONCE(rb->user_page->data_head, head); in perf_output_put_handle()
125 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
138 ring_buffer_has_space(unsigned long head, unsigned long tail, in ring_buffer_has_space() argument
143 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space()
145 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space()
155 unsigned long tail, offset, head; in __perf_output_begin() local
196 offset = head = local_read(&rb->head); in __perf_output_begin()
198 if (unlikely(!ring_buffer_has_space(head, tail, in __perf_output_begin()
[all …]
/kernel/power/
Dconsole.c23 struct list_head head; member
51 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required()
66 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required()
83 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister()
85 list_del(&tmp->head); in pm_vt_switch_unregister()
119 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch()
Dwakeup_reason.c98 static void delete_list(struct list_head *head) in delete_list() argument
102 while (!list_empty(head)) { in delete_list()
103 n = list_first_entry(head, struct wakeup_irq_node, siblings); in delete_list()
109 static bool add_sibling_node_sorted(struct list_head *head, int irq) in add_sibling_node_sorted() argument
112 struct list_head *predecessor = head; in add_sibling_node_sorted()
114 if (unlikely(WARN_ON(!head))) in add_sibling_node_sorted()
117 if (!list_empty(head)) in add_sibling_node_sorted()
118 list_for_each_entry(n, head, siblings) { in add_sibling_node_sorted()
136 static struct wakeup_irq_node *find_node_in_list(struct list_head *head, in find_node_in_list() argument
141 if (unlikely(WARN_ON(!head))) in find_node_in_list()
[all …]
/kernel/trace/
Drethook.c36 static void rethook_free_rcu(struct rcu_head *head) in rethook_free_rcu() argument
38 struct rethook *rh = container_of(head, struct rethook, rcu); in rethook_free_rcu()
43 node = rh->pool.head; in rethook_free_rcu()
112 rh->pool.head = NULL; in rethook_alloc()
133 static void free_rethook_node_rcu(struct rcu_head *head) in free_rethook_node_rcu() argument
135 struct rethook_node *node = container_of(head, struct rethook_node, rcu); in free_rethook_node_rcu()
Dtrace_events_inject.c141 struct list_head *head; in trace_get_entry_size() local
144 head = trace_get_fields(call); in trace_get_entry_size()
145 list_for_each_entry(field, head, link) { in trace_get_entry_size()
157 struct list_head *head; in trace_alloc_entry() local
165 head = trace_get_fields(call); in trace_alloc_entry()
166 list_for_each_entry(field, head, link) { in trace_alloc_entry()
Dring_buffer.c528 unsigned long head; member
1304 struct buffer_page *head; in rb_head_page_activate() local
1306 head = cpu_buffer->head_page; in rb_head_page_activate()
1307 if (!head) in rb_head_page_activate()
1313 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1339 struct buffer_page *head, in rb_head_page_set() argument
1344 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1362 struct buffer_page *head, in rb_head_page_set_update() argument
1366 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1371 struct buffer_page *head, in rb_head_page_set_head() argument
[all …]
Dtrace_syscalls.c575 struct hlist_head *head; in perf_syscall_enter() local
592 head = this_cpu_ptr(sys_data->enter_event->perf_events); in perf_syscall_enter()
594 if (!valid_prog_array && hlist_empty(head)) in perf_syscall_enter()
612 hlist_empty(head)) { in perf_syscall_enter()
619 head, NULL); in perf_syscall_enter()
675 struct hlist_head *head; in perf_syscall_exit() local
691 head = this_cpu_ptr(sys_data->exit_event->perf_events); in perf_syscall_exit()
693 if (!valid_prog_array && hlist_empty(head)) in perf_syscall_exit()
709 hlist_empty(head)) { in perf_syscall_exit()
715 1, regs, head, NULL); in perf_syscall_exit()
/kernel/sched/
Dwait.c13 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head()
96 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
98 if (&curr->entry == &wq_head->head) in __wake_up_common()
101 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
115 (&next->entry != &wq_head->head)) { in __wake_up_common()
291 was_empty = list_empty(&wq_head->head); in prepare_to_wait_exclusive()

123