• Home
  • Raw
  • Download

Lines Matching refs:state

1682 	if (task_running(rq, p) || p->state == TASK_WAKING) {  in __set_cpus_allowed_ptr()
1714 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && in set_task_cpu()
1722 WARN_ON_ONCE(p->state == TASK_RUNNING && in set_task_cpu()
1920 if (match_state && unlikely(p->state != match_state)) in wait_task_inactive()
1935 if (!match_state || p->state == match_state) in wait_task_inactive()
2035 enum { cpuset, possible, fail } state = cpuset; in select_fallback_rq() local
2065 switch (state) { in select_fallback_rq()
2069 state = possible; in select_fallback_rq()
2075 state = fail; in select_fallback_rq()
2085 if (state != cpuset) { in select_fallback_rq()
2221 p->state = TASK_RUNNING; in ttwu_do_wakeup()
2517 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) in try_to_wake_up() argument
2535 if (!(p->state & state)) in try_to_wake_up()
2541 p->state = TASK_RUNNING; in try_to_wake_up()
2554 if (!(p->state & state)) in try_to_wake_up()
2621 p->state = TASK_WAKING; in try_to_wake_up()
2672 int wake_up_state(struct task_struct *p, unsigned int state) in wake_up_state() argument
2674 return try_to_wake_up(p, state, 0); in wake_up_state()
2743 int state = static_branch_likely(&sched_numa_balancing); in sysctl_numa_balancing() local
2749 t.data = &state; in sysctl_numa_balancing()
2754 set_numabalancing_state(state); in sysctl_numa_balancing()
2818 int state = static_branch_likely(&sched_schedstats); in sysctl_schedstats() local
2824 t.data = &state; in sysctl_schedstats()
2829 set_schedstats(state); in sysctl_schedstats()
2850 p->state = TASK_NEW; in sched_fork()
2950 p->state = TASK_RUNNING; in wake_up_new_task()
3220 prev_state = prev->state; in finish_task_switch()
3620 atomic_t state; member
3699 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); in sched_tick_remote()
3716 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); in sched_tick_start()
3738 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); in sched_tick_stop()
3883 if (!preempt && prev->state && prev->non_block_count) { in schedule_debug()
4035 if (!preempt && prev->state) { in __schedule()
4036 if (signal_pending_state(prev->state, prev)) { in __schedule()
4037 prev->state = TASK_RUNNING; in __schedule()
4106 if (!tsk->state) in sched_submit_work()
4172 WARN_ON_ONCE(current->state); in schedule_idle()
5727 if (task_running(p_rq, p) || p->state) in yield_to()
5930 if (p->state == TASK_RUNNING) in sched_show_task()
5958 if (!(p->state & state_filter)) in state_filter_match()
5965 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) in state_filter_match()
6028 idle->state = TASK_RUNNING; in init_idle()
6738 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, in __might_sleep()
6741 current->state, in __might_sleep()
7153 if (task->state == TASK_NEW) in cpu_cgroup_can_attach()