Home
last modified time | relevance | path

Searched refs:next (Results 1 – 25 of 117) sorted by relevance

12345

/kernel/locking/
Dosq_lock.c46 struct optimistic_spin_node *next = NULL; in osq_wait_next() local
78 if (node->next) { in osq_wait_next()
79 next = xchg(&node->next, NULL); in osq_wait_next()
80 if (next) in osq_wait_next()
87 return next; in osq_wait_next()
93 struct optimistic_spin_node *prev, *next; in osq_lock() local
98 node->next = NULL; in osq_lock()
126 WRITE_ONCE(prev->next, node); in osq_lock()
160 if (prev->next == node && in osq_lock()
161 cmpxchg(&prev->next, node, NULL) == node) in osq_lock()
[all …]
Dmcs_spinlock.h19 struct mcs_spinlock *next; member
71 node->next = NULL; in mcs_spin_lock()
91 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
104 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock() local
106 if (likely(!next)) { in mcs_spin_unlock()
113 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock()
Dqspinlock.c316 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local
434 node->next = NULL; in queued_spin_lock_slowpath()
460 next = NULL; in queued_spin_lock_slowpath()
470 WRITE_ONCE(prev->next, node); in queued_spin_lock_slowpath()
481 next = READ_ONCE(node->next); in queued_spin_lock_slowpath()
482 if (next) in queued_spin_lock_slowpath()
483 prefetchw(next); in queued_spin_lock_slowpath()
549 if (!next) in queued_spin_lock_slowpath()
550 next = smp_cond_load_relaxed(&node->next, (VAL)); in queued_spin_lock_slowpath()
552 arch_mcs_spin_unlock_contended(&next->locked); in queued_spin_lock_slowpath()
[all …]
/kernel/
Dcfi.c99 struct cfi_shadow *next) in prepare_next_shadow() argument
104 memset(next->shadow, 0xFF, sizeof(next->shadow)); in prepare_next_shadow()
110 if (prev->r.min_page == next->r.min_page) { in prepare_next_shadow()
111 memcpy(next->shadow, prev->shadow, sizeof(next->shadow)); in prepare_next_shadow()
120 index = ptr_to_shadow(next, shadow_to_page(prev, i)); in prepare_next_shadow()
124 check = ptr_to_shadow(next, in prepare_next_shadow()
129 next->shadow[index] = (u16)check; in prepare_next_shadow()
189 struct cfi_shadow *next = (struct cfi_shadow *) in update_shadow() local
192 next->r.mod_min_addr = min_addr; in update_shadow()
193 next->r.mod_max_addr = max_addr; in update_shadow()
[all …]
Dtask_work.c36 work->next = head; in task_work_add()
73 pprev = &work->next; in task_work_cancel()
74 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel()
93 struct callback_head *work, *head, *next; in task_work_run() local
112 next = work->next; in task_work_run()
114 work = next; in task_work_run()
Dresource.c134 .next = r_next,
616 goto next; in __find_resource()
638 next: if (!this || this->end == root->end) in __find_resource()
783 struct resource *first, *next; in __insert_resource() local
801 for (next = first; ; next = next->sibling) { in __insert_resource()
803 if (next->start < new->start || next->end > new->end) in __insert_resource()
804 return next; in __insert_resource()
805 if (!next->sibling) in __insert_resource()
807 if (next->sibling->start > new->end) in __insert_resource()
812 new->sibling = next->sibling; in __insert_resource()
[all …]
Djump_label.c511 struct static_key_mod *next; member
535 key->next = mod; in static_key_set_mod()
561 for (mod = static_key_mod(key); mod; mod = mod->next) { in __jump_label_mod_update()
650 jlm2->next = NULL; in jump_label_add_module()
656 jlm->next = static_key_mod(key); in jump_label_add_module()
689 prev = &key->next; in jump_label_del_module()
693 prev = &jlm->next; in jump_label_del_module()
694 jlm = jlm->next; in jump_label_del_module()
701 if (prev == &key->next) in jump_label_del_module()
702 static_key_set_mod(key, jlm->next); in jump_label_del_module()
[all …]
Dnotifier.c29 nl = &((*nl)->next); in notifier_chain_register()
31 n->next = *nl; in notifier_chain_register()
44 nl = &((*nl)->next); in notifier_chain_cond_register()
46 n->next = *nl; in notifier_chain_cond_register()
56 rcu_assign_pointer(*nl, n->next); in notifier_chain_unregister()
59 nl = &((*nl)->next); in notifier_chain_unregister()
86 next_nb = rcu_dereference_raw(nb->next); in notifier_call_chain()
Dsoftirq.c480 t->next = NULL; in __tasklet_schedule_common()
482 head->tail = &(t->next); in __tasklet_schedule_common()
516 list = list->next; in tasklet_action_common()
531 t->next = NULL; in tasklet_action_common()
533 tl_head->tail = &t->next; in tasklet_action_common()
552 t->next = NULL; in tasklet_init()
632 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { in tasklet_kill_immediate()
634 *i = t->next; in tasklet_kill_immediate()
/kernel/debug/kdb/
Dkdb_support.c693 u32 next; /* offset of next header from start of pool */ member
768 if (!h->next) in debug_kmalloc()
771 h = (struct debug_alloc_header *)(debug_alloc_pool + h->next); in debug_kmalloc()
777 if (best->next == 0 && bestprev == NULL && rem < dah_overhead) in debug_kmalloc()
785 h->next = best->next; in debug_kmalloc()
787 h_offset = best->next; in debug_kmalloc()
792 bestprev->next = h_offset; in debug_kmalloc()
825 h->next = dah_first; in debug_kfree()
833 if (!prev->next || prev->next > h_offset) in debug_kfree()
836 (debug_alloc_pool + prev->next); in debug_kfree()
[all …]
/kernel/rcu/
Dtiny.c79 struct rcu_head *next, *list; in rcu_process_callbacks() local
99 next = list->next; in rcu_process_callbacks()
100 prefetch(next); in rcu_process_callbacks()
105 list = next; in rcu_process_callbacks()
139 head->next = NULL; in call_rcu()
143 rcu_ctrlblk.curtail = &head->next; in call_rcu()
Drcu_segcblist.c35 rclp->tail = &rhp->next; in rcu_cblist_enqueue()
61 rhp->next = NULL; in rcu_cblist_flush_enqueue()
63 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue()
84 rclp->head = rhp->next; in rcu_cblist_dequeue()
262 rhp->next = NULL; in rcu_segcblist_enqueue()
264 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); in rcu_segcblist_enqueue()
288 rhp->next = NULL; in rcu_segcblist_entrain()
294 WRITE_ONCE(rsclp->tails[i], &rhp->next); in rcu_segcblist_entrain()
Dupdate.c527 rhp->next = NULL; in call_rcu_tasks()
532 rcu_tasks_cbs_tail = &rhp->next; in call_rcu_tasks()
636 struct rcu_head *next; in rcu_tasks_kthread() local
777 next = list->next; in rcu_tasks_kthread()
781 list = next; in rcu_tasks_kthread()
/kernel/time/
Dtick-common.c106 ktime_t next = dev->next_event; in tick_handle_periodic() local
127 next = ktime_add(next, tick_period); in tick_handle_periodic()
129 if (!clockevents_program_event(dev, next, false)) in tick_handle_periodic()
161 ktime_t next; in tick_setup_periodic() local
165 next = tick_next_period; in tick_setup_periodic()
171 if (!clockevents_program_event(dev, next, false)) in tick_setup_periodic()
173 next = ktime_add(next, tick_period); in tick_setup_periodic()
Dntp.c504 struct timespec64 next; in sched_sync_hw_clock() local
506 ktime_get_real_ts64(&next); in sched_sync_hw_clock()
508 next.tv_sec = 659; in sched_sync_hw_clock()
516 next.tv_sec = 0; in sched_sync_hw_clock()
520 next.tv_nsec = target_nsec - next.tv_nsec; in sched_sync_hw_clock()
521 if (next.tv_nsec <= 0) in sched_sync_hw_clock()
522 next.tv_nsec += NSEC_PER_SEC; in sched_sync_hw_clock()
523 if (next.tv_nsec >= NSEC_PER_SEC) { in sched_sync_hw_clock()
524 next.tv_sec++; in sched_sync_hw_clock()
525 next.tv_nsec -= NSEC_PER_SEC; in sched_sync_hw_clock()
[all …]
/kernel/sched/
Dstats.h226 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in __sched_info_switch() argument
236 if (next != rq->idle) in __sched_info_switch()
237 sched_info_arrive(rq, next); in __sched_info_switch()
241 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument
244 __sched_info_switch(rq, prev, next); in sched_info_switch()
252 # define sched_info_arrive(rq, next) do { } while (0) argument
253 # define sched_info_switch(rq, t, next) do { } while (0) argument
Dcore.c427 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) in __wake_q_add()
434 head->lastp = &node->next; in __wake_q_add()
489 node = node->next; in wake_up_q()
490 task->wake_q.next = NULL; in wake_up_q()
3041 struct task_struct *next) in __fire_sched_out_preempt_notifiers() argument
3046 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
3051 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
3054 __fire_sched_out_preempt_notifiers(curr, next); in fire_sched_out_preempt_notifiers()
3065 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
3071 static inline void prepare_task(struct task_struct *next) in prepare_task() argument
[all …]
/kernel/events/
Duprobes.c769 uc->next = uprobe->consumers; in consumer_add()
785 for (con = &uprobe->consumers; *con; con = &(*con)->next) { in consumer_del()
787 *con = uc->next; in consumer_del()
896 for (uc = uprobe->consumers; uc; uc = uc->next) { in filter_chain()
963 struct map_info *next; member
970 struct map_info *next = info->next; in free_map_info() local
972 return next; in free_map_info()
999 prev->next = NULL; in build_map_info()
1010 prev = prev->next; in build_map_info()
1011 info->next = curr; in build_map_info()
[all …]
/kernel/irq/
Dipi.c56 unsigned int next; in irq_reserve_ipi() local
69 next = cpumask_next_zero(offset, dest); in irq_reserve_ipi()
70 if (next < nr_cpu_ids) in irq_reserve_ipi()
71 next = cpumask_next(next, dest); in irq_reserve_ipi()
72 if (next < nr_cpu_ids) { in irq_reserve_ipi()
/kernel/trace/
Dftrace.c206 } else if (rcu_dereference_protected(ftrace_ops_list->next, in update_ftrace_function()
267 rcu_assign_pointer(ops->next, *list); in add_ftrace_ops()
289 rcu_dereference_protected(ops->next, in remove_ftrace_ops()
295 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) in remove_ftrace_ops()
302 *p = (*p)->next; in remove_ftrace_ops()
400 struct ftrace_profile_page *next; member
442 pg = pg->next; in function_stat_next()
581 pg = pg->next; in ftrace_profile_reset()
621 pg->next = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
622 if (!pg->next) in ftrace_profile_pages_init()
[all …]
Dtrace_sched_wakeup.c377 struct task_struct *next, in tracing_sched_switch_trace() argument
393 entry->next_pid = next->pid; in tracing_sched_switch_trace()
394 entry->next_prio = next->prio; in tracing_sched_switch_trace()
395 entry->next_state = task_state_index(next); in tracing_sched_switch_trace()
396 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
432 struct task_struct *prev, struct task_struct *next) in probe_wakeup_sched_switch() argument
455 if (next != wakeup_task) in probe_wakeup_sched_switch()
470 if (unlikely(!tracer_enabled || next != wakeup_task)) in probe_wakeup_sched_switch()
477 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); in probe_wakeup_sched_switch()
/kernel/gcov/
Dgcc_4_7.c89 struct gcov_info *next; member
127 return info->next; in gcov_info_next()
136 info->next = gcov_info_head; in gcov_info_link()
148 prev->next = info->next; in gcov_info_unlink()
150 gcov_info_head = info->next; in gcov_info_unlink()
282 dup->next = NULL; in gcov_info_dup()
Dgcc_3_4.c75 struct gcov_info *next; member
114 return info->next; in gcov_info_next()
123 info->next = gcov_info_head; in gcov_info_link()
135 prev->next = info->next; in gcov_info_unlink()
137 gcov_info_head = info->next; in gcov_info_unlink()
/kernel/bpf/
Ddevmap.c240 struct hlist_node *next; in dev_map_free() local
244 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
277 u32 *next = next_key; in dev_map_get_next_key() local
280 *next = 0; in dev_map_get_next_key()
286 *next = index + 1; in dev_map_get_next_key()
307 u32 idx, *next = next_key; in dev_map_hash_get_next_key() local
325 *next = next_dev->idx; in dev_map_hash_get_next_key()
340 *next = next_dev->idx; in dev_map_hash_get_next_key()
750 struct hlist_node *next; in dev_map_hash_remove_netdev() local
754 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_hash_remove_netdev()
/kernel/power/
Dsnapshot.c122 struct linked_page *next; member
180 safe_pages_list = safe_pages_list->next; in __get_safe_page()
208 lp->next = safe_pages_list; in recycle_safe_page()
239 struct linked_page *lp = list->next; in free_list_of_pages()
287 lp->next = ca->chain; in chain_alloc()
550 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, in memory_bm_position_reset()
552 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in memory_bm_position_reset()
848 bm->cur.node = list_entry(bm->cur.node->list.next, in rtree_next_node()
858 bm->cur.zone = list_entry(bm->cur.zone->list.next, in rtree_next_node()
860 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in rtree_next_node()
[all …]

12345