Home
last modified time | relevance | path

Searched refs:next (Results 1 – 25 of 134) sorted by relevance

123456

/kernel/locking/
Dosq_lock.c46 struct optimistic_spin_node *next = NULL; in osq_wait_next() local
78 if (node->next) { in osq_wait_next()
79 next = xchg(&node->next, NULL); in osq_wait_next()
80 if (next) in osq_wait_next()
87 return next; in osq_wait_next()
93 struct optimistic_spin_node *prev, *next; in osq_lock() local
98 node->next = NULL; in osq_lock()
126 WRITE_ONCE(prev->next, node); in osq_lock()
161 if (data_race(prev->next) == node && in osq_lock()
162 cmpxchg(&prev->next, node, NULL) == node) in osq_lock()
[all …]
Dmcs_spinlock.h19 struct mcs_spinlock *next; member
71 node->next = NULL; in mcs_spin_lock()
91 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
104 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock() local
106 if (likely(!next)) { in mcs_spin_unlock()
113 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock()
Dqspinlock.c318 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local
438 node->next = NULL; in queued_spin_lock_slowpath()
464 next = NULL; in queued_spin_lock_slowpath()
474 WRITE_ONCE(prev->next, node); in queued_spin_lock_slowpath()
485 next = READ_ONCE(node->next); in queued_spin_lock_slowpath()
486 if (next) in queued_spin_lock_slowpath()
487 prefetchw(next); in queued_spin_lock_slowpath()
553 if (!next) in queued_spin_lock_slowpath()
554 next = smp_cond_load_relaxed(&node->next, (VAL)); in queued_spin_lock_slowpath()
556 arch_mcs_spin_unlock_contended(&next->locked); in queued_spin_lock_slowpath()
[all …]
Dlockdep.c1613 __calc_dep_bit(struct held_lock *prev, struct held_lock *next) in __calc_dep_bit() argument
1615 return (prev->read == 0) + ((next->read != 2) << 1); in __calc_dep_bit()
1618 static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next) in calc_dep() argument
1620 return 1U << __calc_dep_bit(prev, next); in calc_dep()
1628 __calc_dep_bitb(struct held_lock *prev, struct held_lock *next) in __calc_dep_bitb() argument
1630 return (next->read != 2) + ((prev->read == 0) << 1); in __calc_dep_bitb()
1633 static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next) in calc_depb() argument
1635 return 1U << __calc_dep_bitb(prev, next); in calc_depb()
2543 struct held_lock *next, in print_bad_irq_dependency() argument
2563 print_lock(next); in print_bad_irq_dependency()
[all …]
/kernel/printk/
Dprintk_ringbuffer.c326 LPOS_DATALESS((blk)->next))
620 lpos_begin = blk_lpos->next; in data_make_reusable()
812 if (!data_push_tail(rb, desc.text_blk_lpos.next)) in desc_push_tail()
1035 blk_lpos->next = NO_LPOS; in data_alloc()
1049 blk_lpos->next = FAILED_LPOS; in data_alloc()
1088 blk_lpos->next = next_lpos; in data_alloc()
1116 if (head_lpos != blk_lpos->next) in data_realloc()
1120 wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); in data_realloc()
1165 (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id)); in data_realloc()
1169 blk_lpos->next = next_lpos; in data_realloc()
[all …]
/kernel/trace/
Dpid_list.c22 pid_list->lower_list = chunk->next; in get_lower_chunk()
25 chunk->next = NULL; in get_lower_chunk()
46 pid_list->upper_list = chunk->next; in get_upper_chunk()
49 chunk->next = NULL; in get_upper_chunk()
65 chunk->next = pid_list->lower_list; in put_lower_chunk()
75 chunk->next = pid_list->upper_list; in put_upper_chunk()
274 unsigned int *next) in trace_pid_list_next() argument
313 *next = pid_join(upper1, upper2, lower); in trace_pid_list_next()
361 upper_next = &chunk->next; in pid_list_refill_irq()
372 lower_next = &chunk->next; in pid_list_refill_irq()
[all …]
Drethook.c29 node = node->next; in rethook_flush_task()
46 node = node->next; in rethook_free_rcu()
224 node = node->next; in __rethook_find_ret_addr()
232 node = node->next; in __rethook_find_ret_addr()
326 first = first->next; in rethook_trampoline_handler()
334 current->rethooks.first = node->next; in rethook_trampoline_handler()
335 node->next = NULL; in rethook_trampoline_handler()
339 first = first->next; in rethook_trampoline_handler()
Dftrace.c197 } else if (rcu_dereference_protected(ftrace_ops_list->next, in update_ftrace_function()
258 rcu_assign_pointer(ops->next, *list); in add_ftrace_ops()
280 rcu_dereference_protected(ops->next, in remove_ftrace_ops()
286 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) in remove_ftrace_ops()
293 *p = (*p)->next; in remove_ftrace_ops()
393 struct ftrace_profile_page *next; member
435 pg = pg->next; in function_stat_next()
574 pg = pg->next; in ftrace_profile_reset()
614 pg->next = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
615 if (!pg->next) in ftrace_profile_pages_init()
[all …]
Dtrace_sched_wakeup.c376 struct task_struct *next, in tracing_sched_switch_trace() argument
392 entry->next_pid = next->pid; in tracing_sched_switch_trace()
393 entry->next_prio = next->prio; in tracing_sched_switch_trace()
394 entry->next_state = task_state_index(next); in tracing_sched_switch_trace()
395 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
431 struct task_struct *prev, struct task_struct *next, in probe_wakeup_sched_switch() argument
455 if (next != wakeup_task) in probe_wakeup_sched_switch()
470 if (unlikely(!tracer_enabled || next != wakeup_task)) in probe_wakeup_sched_switch()
477 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx); in probe_wakeup_sched_switch()
/kernel/
Dtask_work.c54 work->next = head; in task_work_add()
106 pprev = &work->next; in task_work_cancel_match()
108 } else if (try_cmpxchg(pprev, &work, work->next)) in task_work_cancel_match()
149 struct callback_head *work, *head, *next; in task_work_run() local
178 next = work->next; in task_work_run()
180 work = next; in task_work_run()
Djump_label.c518 struct static_key_mod *next; member
542 key->next = mod; in static_key_set_mod()
574 for (mod = static_key_mod(key); mod; mod = mod->next) { in __jump_label_mod_update()
639 jlm2->next = NULL; in jump_label_add_module()
645 jlm->next = static_key_mod(key); in jump_label_add_module()
678 prev = &key->next; in jump_label_del_module()
682 prev = &jlm->next; in jump_label_del_module()
683 jlm = jlm->next; in jump_label_del_module()
690 if (prev == &key->next) in jump_label_del_module()
691 static_key_set_mod(key, jlm->next); in jump_label_del_module()
[all …]
Dresource.c137 .next = r_next,
603 goto next; in __find_resource()
625 next: if (!this || this->end == root->end) in __find_resource()
770 struct resource *first, *next; in __insert_resource() local
788 for (next = first; ; next = next->sibling) { in __insert_resource()
790 if (next->start < new->start || next->end > new->end) in __insert_resource()
791 return next; in __insert_resource()
792 if (!next->sibling) in __insert_resource()
794 if (next->sibling->start > new->end) in __insert_resource()
799 new->sibling = next->sibling; in __insert_resource()
[all …]
Dstatic_call_inline.c146 .next = static_call_key_next(key), in __static_call_update()
151 for (site_mod = &first; site_mod; site_mod = site_mod->next) { in __static_call_update()
258 site_mod->next = NULL; in __static_call_init()
270 site_mod->next = static_call_key_next(key); in __static_call_init()
410 prev = &site_mod->next, site_mod = site_mod->next) in static_call_del_module()
416 *prev = site_mod->next; in static_call_del_module()
Daudit_tree.c544 struct audit_krule *rule, *next; in kill_rules() local
547 list_for_each_entry_safe(rule, next, &tree->rules, rlist) { in kill_rules()
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked()
620 q = p->next; in trim_marked()
683 while (cursor.next != &tree_list) { in audit_trim_trees()
690 tree = container_of(cursor.next, struct audit_tree, list); in audit_trim_trees()
768 victim = list_entry(prune_list.next, in prune_tree_thread()
896 while (cursor.next != &tree_list) { in audit_tag_tree()
900 tree = container_of(cursor.next, struct audit_tree, list); in audit_tag_tree()
982 victim = list_entry(list->next, struct audit_tree, list); in audit_kill_trees()
[all …]
Dnotifier.c36 nl = &((*nl)->next); in notifier_chain_register()
38 n->next = *nl; in notifier_chain_register()
48 rcu_assign_pointer(*nl, n->next); in notifier_chain_unregister()
51 nl = &((*nl)->next); in notifier_chain_unregister()
78 next_nb = rcu_dereference_raw(nb->next); in notifier_call_chain()
/kernel/rcu/
Dtiny.c109 struct rcu_head *next, *list; in rcu_process_callbacks() local
129 next = list->next; in rcu_process_callbacks()
130 prefetch(next); in rcu_process_callbacks()
135 list = next; in rcu_process_callbacks()
187 head->next = NULL; in call_rcu()
191 rcu_ctrlblk.curtail = &head->next; in call_rcu()
Drcu_segcblist.c31 rclp->tail = &rhp->next; in rcu_cblist_enqueue()
56 rhp->next = NULL; in rcu_cblist_flush_enqueue()
58 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue()
75 rclp->head = rhp->next; in rcu_cblist_dequeue()
345 rhp->next = NULL; in rcu_segcblist_enqueue()
347 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); in rcu_segcblist_enqueue()
369 rhp->next = NULL; in rcu_segcblist_entrain()
376 WRITE_ONCE(rsclp->tails[i], &rhp->next); in rcu_segcblist_entrain()
/kernel/time/
Dtick-common.c112 ktime_t next = dev->next_event; in tick_handle_periodic() local
133 next = ktime_add_ns(next, TICK_NSEC); in tick_handle_periodic()
135 if (!clockevents_program_event(dev, next, false)) in tick_handle_periodic()
167 ktime_t next; in tick_setup_periodic() local
171 next = tick_next_period; in tick_setup_periodic()
177 if (!clockevents_program_event(dev, next, false)) in tick_setup_periodic()
179 next = ktime_add_ns(next, TICK_NSEC); in tick_setup_periodic()
/kernel/sched/
Dstats.h111 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
193 struct task_struct *next, in psi_sched_switch() argument
199 psi_task_switch(prev, next, sleep); in psi_sched_switch()
207 struct task_struct *next, in psi_sched_switch() argument
290 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument
300 if (next != rq->idle) in sched_info_switch()
301 sched_info_arrive(rq, next); in sched_info_switch()
307 # define sched_info_switch(rq, t, next) do { } while (0) argument
/kernel/gcov/
Dgcc_4_7.c96 struct gcov_info *next; member
138 return info->next; in gcov_info_next()
147 info->next = gcov_info_head; in gcov_info_link()
159 prev->next = info->next; in gcov_info_unlink()
161 gcov_info_head = info->next; in gcov_info_unlink()
293 dup->next = NULL; in gcov_info_dup()
/kernel/events/
Duprobes.c759 uc->next = uprobe->consumers; in consumer_add()
775 for (con = &uprobe->consumers; *con; con = &(*con)->next) { in consumer_del()
777 *con = uc->next; in consumer_del()
882 for (uc = uprobe->consumers; uc; uc = uc->next) { in filter_chain()
949 struct map_info *next; member
956 struct map_info *next = info->next; in free_map_info() local
958 return next; in free_map_info()
985 prev->next = NULL; in build_map_info()
996 prev = prev->next; in build_map_info()
997 info->next = curr; in build_map_info()
[all …]
/kernel/irq/
Dipi.c56 unsigned int next; in irq_reserve_ipi() local
69 next = cpumask_next_zero(offset, dest); in irq_reserve_ipi()
70 if (next < nr_cpu_ids) in irq_reserve_ipi()
71 next = cpumask_next(next, dest); in irq_reserve_ipi()
72 if (next < nr_cpu_ids) { in irq_reserve_ipi()
/kernel/bpf/
Dpercpu_freelist.c33 node->next = head->first; in pcpu_freelist_push_node()
134 WRITE_ONCE(head->first, node->next); in ___pcpu_freelist_pop()
147 WRITE_ONCE(s->extralist.first, node->next); in ___pcpu_freelist_pop()
166 WRITE_ONCE(head->first, node->next); in ___pcpu_freelist_pop_nmi()
179 WRITE_ONCE(s->extralist.first, node->next); in ___pcpu_freelist_pop_nmi()
Ddevmap.c212 struct hlist_node *next; in dev_map_free() local
216 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
250 u32 *next = next_key; in dev_map_get_next_key() local
253 *next = 0; in dev_map_get_next_key()
259 *next = index + 1; in dev_map_get_next_key()
285 u32 idx, *next = next_key; in dev_map_hash_get_next_key() local
303 *next = next_dev->idx; in dev_map_hash_get_next_key()
318 *next = next_dev->idx; in dev_map_hash_get_next_key()
709 struct hlist_node *next; in dev_map_redirect_multi() local
745 hlist_for_each_entry_safe(dst, next, head, index_hlist) { in dev_map_redirect_multi()
[all …]
/kernel/futex/
Dcore.c800 if (fetch_robust_entry(&entry, &head->list.next, &pi)) in exit_robust_list()
820 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); in exit_robust_list()
895 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) in compat_exit_robust_list()
917 (compat_uptr_t __user *)&entry->next, &next_pi); in compat_exit_robust_list()
959 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() local
971 next = head->next; in exit_pi_state_list()
972 pi_state = list_entry(next, struct futex_pi_state, list); in exit_pi_state_list()
1001 if (head->next != next) { in exit_pi_state_list()

123456