/kernel/bpf/ |
D | percpu_freelist.c | 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() local 17 raw_spin_lock_init(&head->lock); in pcpu_freelist_init() 18 head->first = NULL; in pcpu_freelist_init() 28 static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, in ___pcpu_freelist_push() argument 31 raw_spin_lock(&head->lock); in ___pcpu_freelist_push() 32 node->next = head->first; in ___pcpu_freelist_push() 33 head->first = node; in ___pcpu_freelist_push() 34 raw_spin_unlock(&head->lock); in ___pcpu_freelist_push() 40 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); in __pcpu_freelist_push() local 42 ___pcpu_freelist_push(head, node); in __pcpu_freelist_push() [all …]
|
D | hashtab.c | 21 struct hlist_nulls_head head; member 373 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_map_alloc() 417 return &__select_bucket(htab, hash)->head; in select_bucket() 421 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, in lookup_elem_raw() argument 427 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) in lookup_elem_raw() 438 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, in lookup_nulls_elem_raw() argument 446 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) in lookup_nulls_elem_raw() 464 struct hlist_nulls_head *head; in __htab_map_lookup_elem() local 475 head = select_bucket(htab, hash); in __htab_map_lookup_elem() 477 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem() [all …]
|
D | queue_stack_maps.c | 19 u32 head, tail; member 32 return qs->head == qs->tail; in queue_stack_map_is_empty() 37 u32 head = qs->head + 1; in queue_stack_map_is_full() local 39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full() 40 head = 0; in queue_stack_map_is_full() 42 return head == qs->tail; in queue_stack_map_is_full() 159 index = qs->head - 1; in __stack_map_get() 167 qs->head = index; in __stack_map_get() 228 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem() 231 if (unlikely(++qs->head >= qs->size)) in queue_stack_map_push_elem() [all …]
|
D | devmap.c | 239 struct hlist_head *head; in dev_map_free() local 242 head = dev_map_index_hash(dtab, i); in dev_map_free() 244 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free() 293 struct hlist_head *head = dev_map_index_hash(dtab, key); in __dev_map_hash_lookup_elem() local 296 hlist_for_each_entry_rcu(dev, head, index_hlist) in __dev_map_hash_lookup_elem() 309 struct hlist_head *head; in dev_map_hash_get_next_key() local 334 head = dev_map_index_hash(dtab, i); in dev_map_hash_get_next_key() 336 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), in dev_map_hash_get_next_key() 749 struct hlist_head *head; in dev_map_hash_remove_netdev() local 752 head = dev_map_index_hash(dtab, i); in dev_map_hash_remove_netdev() [all …]
|
/kernel/rcu/ |
D | rcu_segcblist.c | 20 rclp->head = NULL; in rcu_cblist_init() 21 rclp->tail = &rclp->head; in rcu_cblist_init() 51 drclp->head = srclp->head; in rcu_cblist_flush_enqueue() 52 if (drclp->head) in rcu_cblist_flush_enqueue() 55 drclp->tail = &drclp->head; in rcu_cblist_flush_enqueue() 62 srclp->head = rhp; in rcu_cblist_flush_enqueue() 80 rhp = rclp->head; in rcu_cblist_dequeue() 84 rclp->head = rhp->next; in rcu_cblist_dequeue() 85 if (!rclp->head) in rcu_cblist_dequeue() 86 rclp->tail = &rclp->head; in rcu_cblist_dequeue() [all …]
|
D | rcu.h | 172 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument 176 r1 = debug_object_activate(head, &rcuhead_debug_descr); in debug_rcu_head_queue() 177 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_queue() 183 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument 185 debug_object_active_state(head, &rcuhead_debug_descr, in debug_rcu_head_unqueue() 188 debug_object_deactivate(head, &rcuhead_debug_descr); in debug_rcu_head_unqueue() 191 static inline int debug_rcu_head_queue(struct rcu_head *head) in debug_rcu_head_queue() argument 196 static inline void debug_rcu_head_unqueue(struct rcu_head *head) in debug_rcu_head_unqueue() argument 207 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) in __rcu_reclaim() argument 210 unsigned long offset = (unsigned long)head->func; in __rcu_reclaim() [all …]
|
D | update.c | 334 void wakeme_after_rcu(struct rcu_head *head) in wakeme_after_rcu() argument 338 rcu = container_of(head, struct rcu_synchronize, head); in wakeme_after_rcu() 356 init_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp() 362 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); in __wait_rcu_gp() 375 destroy_rcu_head_on_stack(&rs_array[i].head); in __wait_rcu_gp() 381 void init_rcu_head(struct rcu_head *head) in init_rcu_head() argument 383 debug_object_init(head, &rcuhead_debug_descr); in init_rcu_head() 387 void destroy_rcu_head(struct rcu_head *head) in destroy_rcu_head() argument 389 debug_object_free(head, &rcuhead_debug_descr); in destroy_rcu_head() 408 void init_rcu_head_on_stack(struct rcu_head *head) in init_rcu_head_on_stack() argument [all …]
|
D | tiny.c | 133 void call_rcu(struct rcu_head *head, rcu_callback_t func) in call_rcu() argument 137 debug_rcu_head_queue(head); in call_rcu() 138 head->func = func; in call_rcu() 139 head->next = NULL; in call_rcu() 142 *rcu_ctrlblk.curtail = head; in call_rcu() 143 rcu_ctrlblk.curtail = &head->next; in call_rcu()
|
D | srcutiny.c | 185 init_rcu_head_on_stack(&rs.head); in synchronize_srcu() 187 call_srcu(ssp, &rs.head, wakeme_after_rcu); in synchronize_srcu() 189 destroy_rcu_head_on_stack(&rs.head); in synchronize_srcu()
|
D | tree.c | 2187 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), in rcu_do_batch() 2499 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core() argument 2533 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core() 2555 __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy) in __call_rcu() argument 2562 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); in __call_rcu() 2564 if (debug_rcu_head_queue(head)) { in __call_rcu() 2571 head, head->func); in __call_rcu() 2572 WRITE_ONCE(head->func, rcu_leak_callback); in __call_rcu() 2575 head->func = func; in __call_rcu() 2576 head->next = NULL; in __call_rcu() [all …]
|
/kernel/gcov/ |
D | clang.c | 59 struct list_head head; member 69 struct list_head head; member 92 INIT_LIST_HEAD(&info->head); in llvm_gcov_init() 97 list_add_tail(&info->head, &clang_gcov_list); in llvm_gcov_init() 125 INIT_LIST_HEAD(&info->head); in llvm_gcda_emit_function() 133 list_add_tail(&info->head, ¤t_info->functions); in llvm_gcda_emit_function() 140 struct gcov_fn_info, head); in llvm_gcda_emit_arcs() 186 struct gcov_info, head); in gcov_info_next() 187 if (list_is_last(&info->head, &clang_gcov_list)) in gcov_info_next() 189 return list_next_entry(info, head); in gcov_info_next() [all …]
|
/kernel/ |
D | notifier.c | 130 ret = notifier_chain_register(&nh->head, n); in atomic_notifier_chain_register() 152 ret = notifier_chain_unregister(&nh->head, n); in atomic_notifier_chain_unregister() 185 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); in __atomic_notifier_call_chain() 226 return notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register() 229 ret = notifier_chain_register(&nh->head, n); in blocking_notifier_chain_register() 252 ret = notifier_chain_cond_register(&nh->head, n); in blocking_notifier_chain_cond_register() 279 return notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister() 282 ret = notifier_chain_unregister(&nh->head, n); in blocking_notifier_chain_unregister() 317 if (rcu_access_pointer(nh->head)) { in __blocking_notifier_call_chain() 319 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, in __blocking_notifier_call_chain() [all …]
|
D | task_work.c | 30 struct callback_head *head; in task_work_add() local 33 head = READ_ONCE(task->task_works); in task_work_add() 34 if (unlikely(head == &work_exited)) in task_work_add() 36 work->next = head; in task_work_add() 37 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add() 93 struct callback_head *work, *head, *next; in task_work_run() local 103 head = !work && (task->flags & PF_EXITING) ? in task_work_run() 105 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
|
D | softirq.c | 464 struct tasklet_struct *head; member 475 struct tasklet_head *head; in __tasklet_schedule_common() local 479 head = this_cpu_ptr(headp); in __tasklet_schedule_common() 481 *head->tail = t; in __tasklet_schedule_common() 482 head->tail = &(t->next); in __tasklet_schedule_common() 508 list = tl_head->head; in tasklet_action_common() 509 tl_head->head = NULL; in tasklet_action_common() 510 tl_head->tail = &tl_head->head; in tasklet_action_common() 581 &per_cpu(tasklet_vec, cpu).head; in softirq_init() 583 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init() [all …]
|
D | kprobes.c | 325 struct hlist_head *head; in get_kprobe() local 328 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; in get_kprobe() 329 hlist_for_each_entry_rcu(p, head, hlist) { in get_kprobe() 818 struct hlist_head *head; in optimize_all_kprobes() local 830 head = &kprobe_table[i]; in optimize_all_kprobes() 831 hlist_for_each_entry_rcu(p, head, hlist) in optimize_all_kprobes() 843 struct hlist_head *head; in unoptimize_all_kprobes() local 857 head = &kprobe_table[i]; in unoptimize_all_kprobes() 858 hlist_for_each_entry_rcu(p, head, hlist) { in unoptimize_all_kprobes() 1158 struct hlist_head *head) in recycle_rp_inst() argument [all …]
|
D | user-return-notifier.c | 39 struct hlist_head *head; in fire_user_return_notifiers() local 41 head = &get_cpu_var(return_notifier_list); in fire_user_return_notifiers() 42 hlist_for_each_entry_safe(urn, tmp2, head, link) in fire_user_return_notifiers()
|
D | futex.c | 901 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() local 914 while (!list_empty(head)) { in exit_pi_state_list() 915 next = head->next; in exit_pi_state_list() 945 if (head->next != next) { in exit_pi_state_list() 3480 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, in SYSCALL_DEFINE2() argument 3488 if (unlikely(len != sizeof(*head))) in SYSCALL_DEFINE2() 3491 current->robust_list = head; in SYSCALL_DEFINE2() 3506 struct robust_list_head __user *head; in SYSCALL_DEFINE3() local 3528 head = p->robust_list; in SYSCALL_DEFINE3() 3531 if (put_user(sizeof(*head), len_ptr)) in SYSCALL_DEFINE3() [all …]
|
D | tracepoint.c | 63 static void srcu_free_old_probes(struct rcu_head *head) in srcu_free_old_probes() argument 65 kfree(container_of(head, struct tp_probes, rcu)); in srcu_free_old_probes() 68 static void rcu_free_old_probes(struct rcu_head *head) in rcu_free_old_probes() argument 70 call_srcu(&tracepoint_srcu, head, srcu_free_old_probes); in rcu_free_old_probes()
|
/kernel/events/ |
D | ring_buffer.c | 53 unsigned long head; in perf_output_put_handle() local 76 head = local_read(&rb->head); in perf_output_put_handle() 110 WRITE_ONCE(rb->user_page->data_head, head); in perf_output_put_handle() 125 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle() 138 ring_buffer_has_space(unsigned long head, unsigned long tail, in ring_buffer_has_space() argument 143 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space() 145 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space() 154 unsigned long tail, offset, head; in __perf_output_begin() local 193 offset = head = local_read(&rb->head); in __perf_output_begin() 195 if (unlikely(!ring_buffer_has_space(head, tail, in __perf_output_begin() [all …]
|
/kernel/power/ |
D | console.c | 23 struct list_head head; member 51 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required() 66 list_add(&entry->head, &pm_vt_switch_list); in pm_vt_switch_required() 83 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister() 85 list_del(&tmp->head); in pm_vt_switch_unregister() 119 list_for_each_entry(entry, &pm_vt_switch_list, head) { in pm_vt_switch()
|
/kernel/trace/ |
D | ring_buffer.c | 507 unsigned long head; member 914 struct buffer_page *head; in rb_head_page_activate() local 916 head = cpu_buffer->head_page; in rb_head_page_activate() 917 if (!head) in rb_head_page_activate() 923 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate() 949 struct buffer_page *head, in rb_head_page_set() argument 954 unsigned long val = (unsigned long)&head->list; in rb_head_page_set() 972 struct buffer_page *head, in rb_head_page_set_update() argument 976 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update() 981 struct buffer_page *head, in rb_head_page_set_head() argument [all …]
|
D | trace_syscalls.c | 587 struct hlist_head *head; in perf_syscall_enter() local 604 head = this_cpu_ptr(sys_data->enter_event->perf_events); in perf_syscall_enter() 606 if (!valid_prog_array && hlist_empty(head)) in perf_syscall_enter() 624 hlist_empty(head)) { in perf_syscall_enter() 631 head, NULL); in perf_syscall_enter() 687 struct hlist_head *head; in perf_syscall_exit() local 703 head = this_cpu_ptr(sys_data->exit_event->perf_events); in perf_syscall_exit() 705 if (!valid_prog_array && hlist_empty(head)) in perf_syscall_exit() 721 hlist_empty(head)) { in perf_syscall_exit() 727 1, regs, head, NULL); in perf_syscall_exit()
|
D | trace_events.c | 75 __find_event_field(struct list_head *head, char *name) in __find_event_field() argument 79 list_for_each_entry(field, head, link) { in __find_event_field() 91 struct list_head *head; in trace_find_event_field() local 93 head = trace_get_fields(call); in trace_find_event_field() 94 field = __find_event_field(head, name); in trace_find_event_field() 105 static int __trace_define_field(struct list_head *head, const char *type, in __trace_define_field() argument 127 list_add(&field->link, head); in __trace_define_field() 136 struct list_head *head; in trace_define_field() local 141 head = trace_get_fields(call); in trace_define_field() 142 return __trace_define_field(head, type, name, offset, size, in trace_define_field() [all …]
|
/kernel/sched/ |
D | wait.c | 13 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head() 81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common() 83 if (&curr->entry == &wq_head->head) in __wake_up_common() 86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common() 100 (&next->entry != &wq_head->head)) { in __wake_up_common()
|
/kernel/time/ |
D | posix-timers.c | 117 static struct k_itimer *__posix_timers_find(struct hlist_head *head, in __posix_timers_find() argument 123 hlist_for_each_entry_rcu(timer, head, t_hash) { in __posix_timers_find() 133 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; in posix_timer_by_id() local 135 return __posix_timers_find(head, sig, id); in posix_timer_by_id() 142 struct hlist_head *head; in posix_timer_add() local 147 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; in posix_timer_add() 148 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { in posix_timer_add() 149 hlist_add_head_rcu(&timer->t_hash, head); in posix_timer_add() 443 static void k_itimer_rcu_free(struct rcu_head *head) in k_itimer_rcu_free() argument 445 struct k_itimer *tmr = container_of(head, struct k_itimer, rcu); in k_itimer_rcu_free()
|