• Home
  • Raw
  • Download

Lines Matching refs:next

420 	struct sched_entity *curr, *next, *last;  member
865 # define prepare_arch_switch(next) do { } while (0) argument
882 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) in prepare_lock_switch() argument
912 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) in prepare_lock_switch() argument
920 next->oncpu = 1; in prepare_lock_switch()
1380 struct task_struct *(*next)(void *); member
1667 for (class = sched_class_highest; class; class = class->next)
1851 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
2146 } while (group = group->next, group != sd->groups); in find_idlest_group()
2526 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
2532 notifier->ops->sched_out(notifier, next); in fire_sched_out_preempt_notifiers()
2543 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument
2564 struct task_struct *next) in prepare_task_switch() argument
2566 fire_sched_out_preempt_notifiers(prev, next); in prepare_task_switch()
2567 prepare_lock_switch(rq, next); in prepare_task_switch()
2568 prepare_arch_switch(next); in prepare_task_switch()
2645 void qemu_trace_cs(struct task_struct *next);
2654 struct task_struct *next) in context_switch() argument
2658 prepare_task_switch(rq, prev, next); in context_switch()
2659 trace_sched_switch(rq, prev, next); in context_switch()
2660 mm = next->mm; in context_switch()
2670 next->active_mm = oldmm; in context_switch()
2672 enter_lazy_tlb(oldmm, next); in context_switch()
2674 switch_mm(oldmm, mm, next); in context_switch()
2692 qemu_trace_cs(next); in context_switch()
2696 switch_to(prev, next, prev); in context_switch()
2985 next: in balance_tasks()
2991 p = iterator->next(iterator->arg); in balance_tasks()
2992 goto next; in balance_tasks()
3005 p = iterator->next(iterator->arg); in balance_tasks()
3006 goto next; in balance_tasks()
3043 class = class->next; in move_tasks()
3073 p = iterator->next(iterator->arg); in iter_move_one_task()
3091 for (class = sched_class_highest; class; class = class->next) in move_one_task()
3291 group = group->next; in find_busiest_group()
4597 class = class->next; in pick_next_task()
4606 struct task_struct *prev, *next; in schedule() local
4648 next = pick_next_task(rq, prev); in schedule()
4650 if (likely(prev != next)) { in schedule()
4651 sched_info_switch(prev, next); in schedule()
4654 rq->curr = next; in schedule()
4657 context_switch(rq, prev, next); /* unlocks the rq */ in schedule()
4756 wait_queue_t *curr, *next; in __wake_up_common() local
4758 list_for_each_entry_safe(curr, next, &q->task_list, task_list) { in __wake_up_common()
6290 req = list_entry(head->next, struct migration_req, list); in migration_thread()
6291 list_del_init(head->next); in migration_thread()
6478 struct task_struct *next; in migrate_dead_tasks() local
6484 next = pick_next_task(rq, rq->curr); in migrate_dead_tasks()
6485 if (!next) in migrate_dead_tasks()
6487 next->sched_class->put_prev_task(rq, next); in migrate_dead_tasks()
6488 migrate_dead(dead_cpu, next); in migrate_dead_tasks()
6775 req = list_entry(rq->migration_queue.next, in migration_call()
6891 group = group->next; in sched_domain_debug_one()
6948 if (sd->groups != sd->groups->next) in sd_degenerate()
6977 if (parent->groups == parent->groups->next) { in sd_parent_degenerate()
7188 last->next = sg; in init_sched_build_groups()
7191 last->next = first; in init_sched_build_groups()
7402 sg = sg->next; in init_numa_sched_groups_power()
7430 sg = sg->next; in free_sched_groups()
7433 sg = sg->next; in free_sched_groups()
7497 group = group->next; in init_sched_groups_power()
7771 sg->next = sg; in __build_sched_domains()
7798 sg->next = prev->next; in __build_sched_domains()
7800 prev->next = sg; in __build_sched_domains()