• Home
  • Raw
  • Download

Lines Matching refs:prev

868 # define finish_arch_switch(prev)	do { } while (0)  argument
886 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) in finish_lock_switch() argument
929 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) in finish_lock_switch() argument
938 prev->oncpu = 0; in finish_lock_switch()
2563 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
2566 fire_sched_out_preempt_notifiers(prev, next); in prepare_task_switch()
2586 static void finish_task_switch(struct rq *rq, struct task_struct *prev) in finish_task_switch() argument
2605 prev_state = prev->state; in finish_task_switch()
2606 finish_arch_switch(prev); in finish_task_switch()
2607 finish_lock_switch(rq, prev); in finish_task_switch()
2621 kprobe_flush_task(prev); in finish_task_switch()
2622 put_task_struct(prev); in finish_task_switch()
2630 asmlinkage void schedule_tail(struct task_struct *prev) in schedule_tail() argument
2635 finish_task_switch(rq, prev); in schedule_tail()
2653 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
2658 prepare_task_switch(rq, prev, next); in context_switch()
2659 trace_sched_switch(rq, prev, next); in context_switch()
2661 oldmm = prev->active_mm; in context_switch()
2676 if (unlikely(!prev->mm)) { in context_switch()
2677 prev->active_mm = NULL; in context_switch()
2696 switch_to(prev, next, prev); in context_switch()
2704 finish_task_switch(this_rq(), prev); in context_switch()
4527 static noinline void __schedule_bug(struct task_struct *prev) in __schedule_bug() argument
4532 prev->comm, prev->pid, preempt_count()); in __schedule_bug()
4534 debug_show_held_locks(prev); in __schedule_bug()
4537 print_irqtrace_events(prev); in __schedule_bug()
4548 static inline void schedule_debug(struct task_struct *prev) in schedule_debug() argument
4555 if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) in schedule_debug()
4556 __schedule_bug(prev); in schedule_debug()
4562 if (unlikely(prev->lock_depth >= 0)) { in schedule_debug()
4564 schedstat_inc(prev, sched_info.bkl_count); in schedule_debug()
4573 pick_next_task(struct rq *rq, struct task_struct *prev) in pick_next_task() argument
4606 struct task_struct *prev, *next; in schedule() local
4616 prev = rq->curr; in schedule()
4617 switch_count = &prev->nivcsw; in schedule()
4619 release_kernel_lock(prev); in schedule()
4622 schedule_debug(prev); in schedule()
4629 clear_tsk_need_resched(prev); in schedule()
4631 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { in schedule()
4632 if (unlikely(signal_pending_state(prev->state, prev))) in schedule()
4633 prev->state = TASK_RUNNING; in schedule()
4635 deactivate_task(rq, prev, 1); in schedule()
4636 switch_count = &prev->nvcsw; in schedule()
4640 if (prev->sched_class->pre_schedule) in schedule()
4641 prev->sched_class->pre_schedule(rq, prev); in schedule()
4647 prev->sched_class->put_prev_task(rq, prev); in schedule()
4648 next = pick_next_task(rq, prev); in schedule()
4650 if (likely(prev != next)) { in schedule()
4651 sched_info_switch(prev, next); in schedule()
4657 context_switch(rq, prev, next); /* unlocks the rq */ in schedule()
7742 struct sched_group *sg, *prev; in __build_sched_domains() local
7773 prev = sg; in __build_sched_domains()
7798 sg->next = prev->next; in __build_sched_domains()
7800 prev->next = sg; in __build_sched_domains()
7801 prev = sg; in __build_sched_domains()