• Home
  • Raw
  • Download

Lines Matching refs:next

427 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))  in __wake_q_add()
434 head->lastp = &node->next; in __wake_q_add()
489 node = node->next; in wake_up_q()
490 task->wake_q.next = NULL; in wake_up_q()
3041 struct task_struct *next) in __fire_sched_out_preempt_notifiers() argument
3046 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
3051 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
3054 __fire_sched_out_preempt_notifiers(curr, next); in fire_sched_out_preempt_notifiers()
3065 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
3071 static inline void prepare_task(struct task_struct *next) in prepare_task() argument
3078 next->on_cpu = 1; in prepare_task()
3100 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
3112 rq->lock.owner = next; in prepare_lock_switch()
3132 # define prepare_arch_switch(next) do { } while (0) argument
3154 struct task_struct *next) in prepare_task_switch() argument
3157 sched_info_switch(rq, prev, next); in prepare_task_switch()
3158 perf_event_task_sched_out(prev, next); in prepare_task_switch()
3160 fire_sched_out_preempt_notifiers(prev, next); in prepare_task_switch()
3161 prepare_task(next); in prepare_task_switch()
3162 prepare_arch_switch(next); in prepare_task_switch()
3270 struct callback_head *head, *next; in __balance_callback() local
3279 next = head->next; in __balance_callback()
3280 head->next = NULL; in __balance_callback()
3281 head = next; in __balance_callback()
3335 struct task_struct *next, struct rq_flags *rf) in context_switch() argument
3337 prepare_task_switch(rq, prev, next); in context_switch()
3353 if (!next->mm) { // to kernel in context_switch()
3354 enter_lazy_tlb(prev->active_mm, next); in context_switch()
3356 next->active_mm = prev->active_mm; in context_switch()
3362 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
3371 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
3382 prepare_lock_switch(rq, next, rf); in context_switch()
3385 switch_to(prev, next, prev); in context_switch()
4001 struct task_struct *prev, *next; in __schedule() local
4049 next = pick_next_task(rq, prev, &rf); in __schedule()
4053 if (likely(prev != next)) { in __schedule()
4059 RCU_INIT_POINTER(rq->curr, next); in __schedule()
4076 trace_sched_switch(preempt, prev, next); in __schedule()
4079 rq = context_switch(rq, prev, next, &rf); in __schedule()
6211 struct task_struct *next; in __pick_migrate_task() local
6214 next = class->pick_next_task(rq, NULL, NULL); in __pick_migrate_task()
6215 if (next) { in __pick_migrate_task()
6216 next->sched_class->put_prev_task(rq, next); in __pick_migrate_task()
6217 return next; in __pick_migrate_task()
6236 struct task_struct *next, *stop = rq->stop; in migrate_tasks() local
6266 next = __pick_migrate_task(rq); in migrate_tasks()
6278 raw_spin_lock(&next->pi_lock); in migrate_tasks()
6286 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { in migrate_tasks()
6287 raw_spin_unlock(&next->pi_lock); in migrate_tasks()
6292 dest_cpu = select_fallback_rq(dead_rq->cpu, next); in migrate_tasks()
6293 rq = __migrate_task(rq, rf, next, dest_cpu); in migrate_tasks()
6300 raw_spin_unlock(&next->pi_lock); in migrate_tasks()