Home
last modified time | relevance | path

Searched refs:next (Results 1 – 25 of 123) sorted by relevance

12345

/kernel/locking/
Dosq_lock.c46 struct optimistic_spin_node *next = NULL; in osq_wait_next() local
78 if (node->next) { in osq_wait_next()
79 next = xchg(&node->next, NULL); in osq_wait_next()
80 if (next) in osq_wait_next()
87 return next; in osq_wait_next()
93 struct optimistic_spin_node *prev, *next; in osq_lock() local
98 node->next = NULL; in osq_lock()
126 WRITE_ONCE(prev->next, node); in osq_lock()
161 if (data_race(prev->next) == node && in osq_lock()
162 cmpxchg(&prev->next, node, NULL) == node) in osq_lock()
[all …]
Dmcs_spinlock.h19 struct mcs_spinlock *next; member
71 node->next = NULL; in mcs_spin_lock()
91 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
104 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock() local
106 if (likely(!next)) { in mcs_spin_unlock()
113 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock()
Dqspinlock.c317 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local
435 node->next = NULL; in queued_spin_lock_slowpath()
461 next = NULL; in queued_spin_lock_slowpath()
471 WRITE_ONCE(prev->next, node); in queued_spin_lock_slowpath()
482 next = READ_ONCE(node->next); in queued_spin_lock_slowpath()
483 if (next) in queued_spin_lock_slowpath()
484 prefetchw(next); in queued_spin_lock_slowpath()
550 if (!next) in queued_spin_lock_slowpath()
551 next = smp_cond_load_relaxed(&node->next, (VAL)); in queued_spin_lock_slowpath()
553 arch_mcs_spin_unlock_contended(&next->locked); in queued_spin_lock_slowpath()
[all …]
Dlockdep.c1576 __calc_dep_bit(struct held_lock *prev, struct held_lock *next) in __calc_dep_bit() argument
1578 return (prev->read == 0) + ((next->read != 2) << 1); in __calc_dep_bit()
1581 static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next) in calc_dep() argument
1583 return 1U << __calc_dep_bit(prev, next); in calc_dep()
1591 __calc_dep_bitb(struct held_lock *prev, struct held_lock *next) in __calc_dep_bitb() argument
1593 return (next->read != 2) + ((prev->read == 0) << 1); in __calc_dep_bitb()
1596 static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next) in calc_depb() argument
1598 return 1U << __calc_dep_bitb(prev, next); in calc_depb()
2506 struct held_lock *next, in print_bad_irq_dependency() argument
2526 print_lock(next); in print_bad_irq_dependency()
[all …]
/kernel/
Dcfi.c112 struct cfi_shadow *next) in prepare_next_shadow() argument
117 memset(next->shadow, 0xFF, SHADOW_ARR_SIZE); in prepare_next_shadow()
123 if (prev->base == next->base) { in prepare_next_shadow()
124 memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE); in prepare_next_shadow()
133 index = ptr_to_shadow(next, shadow_to_ptr(prev, i)); in prepare_next_shadow()
137 check = ptr_to_shadow(next, in prepare_next_shadow()
142 next->shadow[index] = (shadow_t)check; in prepare_next_shadow()
201 struct cfi_shadow *next; in update_shadow() local
204 next = vmalloc(SHADOW_SIZE); in update_shadow()
210 if (next) { in update_shadow()
[all …]
Dtask_work.c44 work->next = head; in task_work_add()
92 pprev = &work->next; in task_work_cancel_match()
93 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel_match()
134 struct callback_head *work, *head, *next; in task_work_run() local
163 next = work->next; in task_work_run()
165 work = next; in task_work_run()
Dresource.c125 .next = r_next,
590 goto next; in __find_resource()
612 next: if (!this || this->end == root->end) in __find_resource()
757 struct resource *first, *next; in __insert_resource() local
775 for (next = first; ; next = next->sibling) { in __insert_resource()
777 if (next->start < new->start || next->end > new->end) in __insert_resource()
778 return next; in __insert_resource()
779 if (!next->sibling) in __insert_resource()
781 if (next->sibling->start > new->end) in __insert_resource()
786 new->sibling = next->sibling; in __insert_resource()
[all …]
Djump_label.c522 struct static_key_mod *next; member
546 key->next = mod; in static_key_set_mod()
578 for (mod = static_key_mod(key); mod; mod = mod->next) { in __jump_label_mod_update()
668 jlm2->next = NULL; in jump_label_add_module()
674 jlm->next = static_key_mod(key); in jump_label_add_module()
707 prev = &key->next; in jump_label_del_module()
711 prev = &jlm->next; in jump_label_del_module()
712 jlm = jlm->next; in jump_label_del_module()
719 if (prev == &key->next) in jump_label_del_module()
720 static_key_set_mod(key, jlm->next); in jump_label_del_module()
[all …]
Dstatic_call_inline.c146 .next = static_call_key_next(key), in __static_call_update()
151 for (site_mod = &first; site_mod; site_mod = site_mod->next) { in __static_call_update()
258 site_mod->next = NULL; in __static_call_init()
270 site_mod->next = static_call_key_next(key); in __static_call_init()
410 prev = &site_mod->next, site_mod = site_mod->next) in static_call_del_module()
416 *prev = site_mod->next; in static_call_del_module()
Dnotifier.c32 nl = &((*nl)->next); in notifier_chain_register()
34 n->next = *nl; in notifier_chain_register()
44 rcu_assign_pointer(*nl, n->next); in notifier_chain_unregister()
47 nl = &((*nl)->next); in notifier_chain_unregister()
74 next_nb = rcu_dereference_raw(nb->next); in notifier_call_chain()
Daudit_tree.c544 struct audit_krule *rule, *next; in kill_rules() local
547 list_for_each_entry_safe(rule, next, &tree->rules, rlist) { in kill_rules()
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked()
620 q = p->next; in trim_marked()
683 while (cursor.next != &tree_list) { in audit_trim_trees()
690 tree = container_of(cursor.next, struct audit_tree, list); in audit_trim_trees()
767 victim = list_entry(prune_list.next, in prune_tree_thread()
895 while (cursor.next != &tree_list) { in audit_tag_tree()
899 tree = container_of(cursor.next, struct audit_tree, list); in audit_tag_tree()
981 victim = list_entry(list->next, struct audit_tree, list); in audit_kill_trees()
[all …]
/kernel/printk/
Dprintk_ringbuffer.c326 LPOS_DATALESS((blk)->next))
620 lpos_begin = blk_lpos->next; in data_make_reusable()
812 if (!data_push_tail(rb, desc.text_blk_lpos.next)) in desc_push_tail()
1035 blk_lpos->next = NO_LPOS; in data_alloc()
1049 blk_lpos->next = FAILED_LPOS; in data_alloc()
1088 blk_lpos->next = next_lpos; in data_alloc()
1116 if (head_lpos != blk_lpos->next) in data_realloc()
1120 wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); in data_realloc()
1165 (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id)); in data_realloc()
1169 blk_lpos->next = next_lpos; in data_realloc()
[all …]
/kernel/rcu/
Dtiny.c109 struct rcu_head *next, *list; in rcu_process_callbacks() local
129 next = list->next; in rcu_process_callbacks()
130 prefetch(next); in rcu_process_callbacks()
135 list = next; in rcu_process_callbacks()
169 head->next = NULL; in call_rcu()
173 rcu_ctrlblk.curtail = &head->next; in call_rcu()
Drcu_segcblist.c31 rclp->tail = &rhp->next; in rcu_cblist_enqueue()
56 rhp->next = NULL; in rcu_cblist_flush_enqueue()
58 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue()
75 rclp->head = rhp->next; in rcu_cblist_dequeue()
347 rhp->next = NULL; in rcu_segcblist_enqueue()
349 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); in rcu_segcblist_enqueue()
371 rhp->next = NULL; in rcu_segcblist_entrain()
378 WRITE_ONCE(rsclp->tails[i], &rhp->next); in rcu_segcblist_entrain()
/kernel/time/
Dtick-common.c112 ktime_t next = dev->next_event; in tick_handle_periodic() local
133 next = ktime_add_ns(next, TICK_NSEC); in tick_handle_periodic()
135 if (!clockevents_program_event(dev, next, false)) in tick_handle_periodic()
167 ktime_t next; in tick_setup_periodic() local
171 next = tick_next_period; in tick_setup_periodic()
177 if (!clockevents_program_event(dev, next, false)) in tick_setup_periodic()
179 next = ktime_add_ns(next, TICK_NSEC); in tick_setup_periodic()
/kernel/sched/
Dstats.h156 struct task_struct *next, in psi_sched_switch() argument
162 psi_task_switch(prev, next, sleep); in psi_sched_switch()
170 struct task_struct *next, in psi_sched_switch() argument
252 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument
262 if (next != rq->idle) in sched_info_switch()
263 sched_info_arrive(rq, next); in sched_info_switch()
269 # define sched_info_switch(rq, t, next) do { } while (0) argument
Dcore.c903 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) in __wake_q_add()
910 head->lastp = &node->next; in __wake_q_add()
965 node = node->next; in wake_up_q()
966 task->wake_q.next = NULL; in wake_up_q()
4728 struct task_struct *next) in __fire_sched_out_preempt_notifiers() argument
4733 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
4738 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
4741 __fire_sched_out_preempt_notifiers(curr, next); in fire_sched_out_preempt_notifiers()
4752 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
4758 static inline void prepare_task(struct task_struct *next) in prepare_task() argument
[all …]
Dtopology.c118 group = group->next; in sched_domain_debug_one()
182 (sd->groups != sd->groups->next)) in sd_degenerate()
204 if (parent->groups == parent->groups->next) in sd_parent_degenerate()
253 tmp = pd->next; in free_pd()
264 pd = pd->next; in find_pd()
302 pd = pd->next; in perf_domain_debug()
403 tmp->next = pd; in build_perf_domains()
586 tmp = sg->next; in free_sched_groups()
1054 last->next = sg; in build_overlap_sched_groups()
1056 last->next = first; in build_overlap_sched_groups()
[all …]
/kernel/gcov/
Dgcc_4_7.c96 struct gcov_info *next; member
138 return info->next; in gcov_info_next()
147 info->next = gcov_info_head; in gcov_info_link()
159 prev->next = info->next; in gcov_info_unlink()
161 gcov_info_head = info->next; in gcov_info_unlink()
293 dup->next = NULL; in gcov_info_dup()
/kernel/events/
Duprobes.c762 uc->next = uprobe->consumers; in consumer_add()
778 for (con = &uprobe->consumers; *con; con = &(*con)->next) { in consumer_del()
780 *con = uc->next; in consumer_del()
885 for (uc = uprobe->consumers; uc; uc = uc->next) { in filter_chain()
952 struct map_info *next; member
959 struct map_info *next = info->next; in free_map_info() local
961 return next; in free_map_info()
988 prev->next = NULL; in build_map_info()
999 prev = prev->next; in build_map_info()
1000 info->next = curr; in build_map_info()
[all …]
/kernel/irq/
Dipi.c56 unsigned int next; in irq_reserve_ipi() local
69 next = cpumask_next_zero(offset, dest); in irq_reserve_ipi()
70 if (next < nr_cpu_ids) in irq_reserve_ipi()
71 next = cpumask_next(next, dest); in irq_reserve_ipi()
72 if (next < nr_cpu_ids) { in irq_reserve_ipi()
/kernel/trace/
Dftrace.c200 } else if (rcu_dereference_protected(ftrace_ops_list->next, in update_ftrace_function()
261 rcu_assign_pointer(ops->next, *list); in add_ftrace_ops()
283 rcu_dereference_protected(ops->next, in remove_ftrace_ops()
289 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) in remove_ftrace_ops()
296 *p = (*p)->next; in remove_ftrace_ops()
396 struct ftrace_profile_page *next; member
438 pg = pg->next; in function_stat_next()
577 pg = pg->next; in ftrace_profile_reset()
617 pg->next = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
618 if (!pg->next) in ftrace_profile_pages_init()
[all …]
Dtrace_sched_wakeup.c376 struct task_struct *next, in tracing_sched_switch_trace() argument
392 entry->next_pid = next->pid; in tracing_sched_switch_trace()
393 entry->next_prio = next->prio; in tracing_sched_switch_trace()
394 entry->next_state = task_state_index(next); in tracing_sched_switch_trace()
395 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
431 struct task_struct *prev, struct task_struct *next) in probe_wakeup_sched_switch() argument
454 if (next != wakeup_task) in probe_wakeup_sched_switch()
469 if (unlikely(!tracer_enabled || next != wakeup_task)) in probe_wakeup_sched_switch()
476 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx); in probe_wakeup_sched_switch()
/kernel/bpf/
Dpercpu_freelist.c33 node->next = head->first; in pcpu_freelist_push_node()
135 head->first = node->next; in ___pcpu_freelist_pop()
151 s->extralist.first = node->next; in ___pcpu_freelist_pop()
169 head->first = node->next; in ___pcpu_freelist_pop_nmi()
187 s->extralist.first = node->next; in ___pcpu_freelist_pop_nmi()
Ddevmap.c211 struct hlist_node *next; in dev_map_free() local
215 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
249 u32 *next = next_key; in dev_map_get_next_key() local
252 *next = 0; in dev_map_get_next_key()
258 *next = index + 1; in dev_map_get_next_key()
284 u32 idx, *next = next_key; in dev_map_hash_get_next_key() local
302 *next = next_dev->idx; in dev_map_hash_get_next_key()
317 *next = next_dev->idx; in dev_map_hash_get_next_key()
718 struct hlist_node *next; in dev_map_redirect_multi() local
754 hlist_for_each_entry_safe(dst, next, head, index_hlist) { in dev_map_redirect_multi()
[all …]

12345