• Home
  • Raw
  • Download

Lines Matching refs:next

543 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))  in __wake_q_add()
550 head->lastp = &node->next; in __wake_q_add()
606 node = node->next; in wake_up_q()
607 task->wake_q.next = NULL; in wake_up_q()
3690 struct task_struct *next) in __fire_sched_out_preempt_notifiers() argument
3695 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
3700 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
3703 __fire_sched_out_preempt_notifiers(curr, next); in fire_sched_out_preempt_notifiers()
3714 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
3720 static inline void prepare_task(struct task_struct *next) in prepare_task() argument
3729 WRITE_ONCE(next->on_cpu, 1); in prepare_task()
3752 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
3764 rq->lock.owner = next; in prepare_lock_switch()
3784 # define prepare_arch_switch(next) do { } while (0) argument
3806 struct task_struct *next) in prepare_task_switch() argument
3809 sched_info_switch(rq, prev, next); in prepare_task_switch()
3810 perf_event_task_sched_out(prev, next); in prepare_task_switch()
3812 fire_sched_out_preempt_notifiers(prev, next); in prepare_task_switch()
3813 prepare_task(next); in prepare_task_switch()
3814 prepare_arch_switch(next); in prepare_task_switch()
3923 struct callback_head *head, *next; in __balance_callback() local
3932 next = head->next; in __balance_callback()
3933 head->next = NULL; in __balance_callback()
3934 head = next; in __balance_callback()
3988 struct task_struct *next, struct rq_flags *rf) in context_switch() argument
3990 prepare_task_switch(rq, prev, next); in context_switch()
4006 if (!next->mm) { // to kernel in context_switch()
4007 enter_lazy_tlb(prev->active_mm, next); in context_switch()
4009 next->active_mm = prev->active_mm; in context_switch()
4015 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
4024 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
4035 prepare_lock_switch(rq, next, rf); in context_switch()
4038 switch_to(prev, next, prev); in context_switch()
4681 struct task_struct *prev, *next; in __schedule() local
4765 next = pick_next_task(rq, prev, &rf); in __schedule()
4769 trace_android_rvh_schedule(prev, next, rq); in __schedule()
4770 if (likely(prev != next)) { in __schedule()
4776 RCU_INIT_POINTER(rq->curr, next); in __schedule()
4793 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); in __schedule()
4795 trace_sched_switch(preempt, prev, next); in __schedule()
4798 rq = context_switch(rq, prev, next, &rf); in __schedule()
6986 struct task_struct *next; in __pick_migrate_task() local
6989 next = class->pick_next_task(rq); in __pick_migrate_task()
6990 if (next) { in __pick_migrate_task()
6991 next->sched_class->put_prev_task(rq, next); in __pick_migrate_task()
6992 return next; in __pick_migrate_task()
7013 struct task_struct *next, *tmp, *stop = rq->stop; in migrate_tasks() local
7049 next = __pick_migrate_task(rq); in migrate_tasks()
7055 if (!force && is_per_cpu_kthread(next)) { in migrate_tasks()
7056 INIT_LIST_HEAD(&next->percpu_kthread_node); in migrate_tasks()
7057 list_add(&next->percpu_kthread_node, &percpu_kthreads); in migrate_tasks()
7060 deactivate_task(rq, next, in migrate_tasks()
7075 raw_spin_lock(&next->pi_lock); in migrate_tasks()
7083 if (task_rq(next) != rq || !task_on_rq_queued(next)) { in migrate_tasks()
7091 raw_spin_unlock(&next->pi_lock); in migrate_tasks()
7096 dest_cpu = select_fallback_rq(dead_rq->cpu, next); in migrate_tasks()
7097 rq = __migrate_task(rq, rf, next, dest_cpu); in migrate_tasks()
7104 raw_spin_unlock(&next->pi_lock); in migrate_tasks()
7107 list_for_each_entry_safe(next, tmp, &percpu_kthreads, in migrate_tasks()
7111 activate_task(rq, next, ENQUEUE_NOCLOCK); in migrate_tasks()
7112 list_del(&next->percpu_kthread_node); in migrate_tasks()