Home
last modified time | relevance | path

Searched refs:state (Results 1 – 25 of 72) sorted by relevance

123

/kernel/power/
Dsuspend.c105 static bool valid_state(suspend_state_t state) in valid_state() argument
112 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); in valid_state()
173 int suspend_valid_only_mem(suspend_state_t state) in suspend_valid_only_mem() argument
175 return state == PM_SUSPEND_MEM; in suspend_valid_only_mem()
179 static bool sleep_state_supported(suspend_state_t state) in sleep_state_supported() argument
181 return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter); in sleep_state_supported()
184 static int platform_suspend_prepare(suspend_state_t state) in platform_suspend_prepare() argument
186 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ? in platform_suspend_prepare()
190 static int platform_suspend_prepare_late(suspend_state_t state) in platform_suspend_prepare_late() argument
192 return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ? in platform_suspend_prepare_late()
[all …]
Dsuspend_test.c63 static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) in test_wakealarm() argument
97 if (state == PM_SUSPEND_MEM) { in test_wakealarm()
98 printk(info_test, pm_states[state]); in test_wakealarm()
99 status = pm_suspend(state); in test_wakealarm()
101 state = PM_SUSPEND_STANDBY; in test_wakealarm()
103 if (state == PM_SUSPEND_STANDBY) { in test_wakealarm()
104 printk(info_test, pm_states[state]); in test_wakealarm()
105 status = pm_suspend(state); in test_wakealarm()
107 state = PM_SUSPEND_FREEZE; in test_wakealarm()
109 if (state == PM_SUSPEND_FREEZE) { in test_wakealarm()
[all …]
Dmain.c331 suspend_state_t state; in decode_state() local
344 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { in decode_state()
345 const char *label = pm_states[state]; in decode_state()
348 return state; in decode_state()
358 suspend_state_t state; in state_store() local
370 state = decode_state(buf, n); in state_store()
371 if (state < PM_SUSPEND_MAX) in state_store()
372 error = pm_suspend(state); in state_store()
373 else if (state == PM_SUSPEND_MAX) in state_store()
383 power_attr(state);
[all …]
Dautosleep.c89 int pm_autosleep_set_state(suspend_state_t state) in pm_autosleep_set_state() argument
93 if (state >= PM_SUSPEND_MAX) in pm_autosleep_set_state()
101 autosleep_state = state; in pm_autosleep_set_state()
105 if (state > PM_SUSPEND_ON) { in pm_autosleep_set_state()
Dpower.h195 extern int suspend_devices_and_enter(suspend_state_t state);
197 static inline int suspend_devices_and_enter(suspend_state_t state) in suspend_devices_and_enter() argument
292 extern int pm_autosleep_set_state(suspend_state_t state);
/kernel/
Dcpu.c49 enum cpuhp_state state; member
103 static bool cpuhp_is_ap_state(enum cpuhp_state state) in cpuhp_is_ap_state() argument
109 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; in cpuhp_is_ap_state()
112 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) in cpuhp_get_step() argument
116 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; in cpuhp_get_step()
117 return sp + state; in cpuhp_get_step()
128 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument
132 struct cpuhp_step *step = cpuhp_get_step(state); in cpuhp_invoke_callback()
141 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
143 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
[all …]
Dcgroup_freezer.c45 unsigned int state; member
70 ret = task_freezer(task)->state & CGROUP_FREEZING; in cgroup_freezing()
76 static const char *freezer_state_strs(unsigned int state) in freezer_state_strs() argument
78 if (state & CGROUP_FROZEN) in freezer_state_strs()
80 if (state & CGROUP_FREEZING) in freezer_state_strs()
112 freezer->state |= CGROUP_FREEZER_ONLINE; in freezer_css_online()
114 if (parent && (parent->state & CGROUP_FREEZING)) { in freezer_css_online()
115 freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; in freezer_css_online()
136 if (freezer->state & CGROUP_FREEZING) in freezer_css_offline()
139 freezer->state = 0; in freezer_css_offline()
[all …]
Dcontext_tracking.c61 void __context_tracking_enter(enum ctx_state state) in __context_tracking_enter() argument
69 if ( __this_cpu_read(context_tracking.state) != state) { in __context_tracking_enter()
78 if (state == CONTEXT_USER) { in __context_tracking_enter()
97 __this_cpu_write(context_tracking.state, state); in __context_tracking_enter()
104 void context_tracking_enter(enum ctx_state state) in context_tracking_enter() argument
120 __context_tracking_enter(state); in context_tracking_enter()
144 void __context_tracking_exit(enum ctx_state state) in __context_tracking_exit() argument
149 if (__this_cpu_read(context_tracking.state) == state) { in __context_tracking_exit()
156 if (state == CONTEXT_USER) { in __context_tracking_exit()
161 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); in __context_tracking_exit()
[all …]
Dsoftirq.c76 if (tsk && tsk->state != TASK_RUNNING) in wakeup_softirqd()
88 return tsk && (tsk->state == TASK_RUNNING); in ksoftirqd_running()
517 &t->state)) in tasklet_action()
553 &t->state)) in tasklet_hi_action()
575 t->state = 0; in tasklet_init()
587 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { in tasklet_kill()
590 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); in tasklet_kill()
593 clear_bit(TASKLET_STATE_SCHED, &t->state); in tasklet_kill()
699 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); in tasklet_kill_immediate()
701 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) in tasklet_kill_immediate()
Dauditsc.c443 enum audit_state *state, in audit_filter_rules() argument
700 *state = AUDIT_DISABLED; in audit_filter_rules()
703 *state = AUDIT_RECORD_CONTEXT; in audit_filter_rules()
716 enum audit_state state; in audit_filter_task() local
721 &state, true)) { in audit_filter_task()
722 if (state == AUDIT_RECORD_CONTEXT) in audit_filter_task()
725 return state; in audit_filter_task()
758 enum audit_state state; in audit_filter_syscall() local
768 &state, false)) { in audit_filter_syscall()
770 ctx->current_state = state; in audit_filter_syscall()
[all …]
Dpanic.c47 static long no_blink(int state) in no_blink() argument
53 long (*panic_blink)(int state);
135 int state = 0; in panic() local
257 i += panic_blink(state ^= 1); in panic()
292 i += panic_blink(state ^= 1); in panic()
Dmodule.c320 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); in strong_try_module_get()
321 if (mod && mod->state == MODULE_STATE_COMING) in strong_try_module_get()
479 if (mod->state == MODULE_STATE_UNFORMED) in each_symbol_section()
598 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) in find_module_all()
680 if (mod->state == MODULE_STATE_UNFORMED) in is_module_percpu_address()
908 mod->state = MODULE_STATE_GOING; in try_stop_module()
961 if (mod->state != MODULE_STATE_LIVE) { in SYSCALL_DEFINE2()
1165 const char *state = "unknown"; in show_initstate() local
1167 switch (mk->mod->state) { in show_initstate()
1169 state = "live"; in show_initstate()
[all …]
/kernel/livepatch/
Dcore.c352 if (WARN_ON(func->state != KLP_ENABLED)) in klp_disable_func()
378 func->state = KLP_DISABLED; in klp_disable_func()
389 if (WARN_ON(func->state != KLP_DISABLED)) in klp_enable_func()
437 func->state = KLP_ENABLED; in klp_enable_func()
453 if (func->state == KLP_ENABLED) in klp_disable_object()
456 obj->state = KLP_DISABLED; in klp_disable_object()
464 if (WARN_ON(obj->state != KLP_DISABLED)) in klp_enable_object()
477 obj->state = KLP_ENABLED; in klp_enable_object()
488 list_next_entry(patch, list)->state == KLP_ENABLED) in __klp_disable_patch()
494 if (obj->state == KLP_ENABLED) in __klp_disable_patch()
[all …]
/kernel/sched/
Dcompletion.c62 long (*action)(long), long timeout, int state) in do_wait_for_common() argument
69 if (signal_pending_state(state, current)) { in do_wait_for_common()
73 __set_current_state(state); in do_wait_for_common()
88 long (*action)(long), long timeout, int state) in __wait_for_common() argument
93 timeout = do_wait_for_common(x, action, timeout, state); in __wait_for_common()
99 wait_for_common(struct completion *x, long timeout, int state) in wait_for_common() argument
101 return __wait_for_common(x, schedule_timeout, timeout, state); in wait_for_common()
105 wait_for_common_io(struct completion *x, long timeout, int state) in wait_for_common_io() argument
107 return __wait_for_common(x, io_schedule_timeout, timeout, state); in wait_for_common_io()
Dswait.c82 void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait() argument
88 set_current_state(state); in prepare_to_swait()
93 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait_event() argument
95 if (signal_pending_state(state, current)) in prepare_to_swait_event()
98 prepare_to_swait(q, wait, state); in prepare_to_swait_event()
Dwait.c172 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) in prepare_to_wait() argument
180 set_current_state(state); in prepare_to_wait()
186 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) in prepare_to_wait_exclusive() argument
194 set_current_state(state); in prepare_to_wait_exclusive()
208 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) in prepare_to_wait_event() argument
214 if (unlikely(signal_pending_state(state, current))) { in prepare_to_wait_event()
236 set_current_state(state); in prepare_to_wait_event()
/kernel/locking/
Dqspinlock_paravirt.h54 u8 state; member
266 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early()
279 pn->state = vcpu_running; in pv_init_node()
314 smp_store_mb(pn->state, vcpu_halted); in pv_wait_node()
319 pv_wait(&pn->state, vcpu_halted); in pv_wait_node()
327 cmpxchg(&pn->state, vcpu_halted, vcpu_running); in pv_wait_node()
365 if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted) in pv_kick_node()
399 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock()
412 WRITE_ONCE(pn->state, vcpu_running); in pv_wait_head_or_lock()
452 WRITE_ONCE(pn->state, vcpu_hashed); in pv_wait_head_or_lock()
Drtmutex.c1192 __rt_mutex_slowlock(struct rt_mutex *lock, int state, in __rt_mutex_slowlock() argument
1207 if (unlikely(state == TASK_INTERRUPTIBLE)) { in __rt_mutex_slowlock()
1224 set_current_state(state); in __rt_mutex_slowlock()
1255 rt_mutex_slowlock(struct rt_mutex *lock, int state, in rt_mutex_slowlock() argument
1283 set_current_state(state); in rt_mutex_slowlock()
1293 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); in rt_mutex_slowlock()
1430 rt_mutex_fastlock(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument
1431 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_fastlock()
1439 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); in rt_mutex_fastlock()
1443 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument
[all …]
/kernel/debug/
Ddebug_core.c113 [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
256 if (kgdb_break[i].state != BP_SET) in dbg_activate_sw_breakpoints()
268 kgdb_break[i].state = BP_ACTIVE; in dbg_activate_sw_breakpoints()
283 if ((kgdb_break[i].state == BP_SET) && in dbg_set_sw_break()
288 if (kgdb_break[i].state == BP_REMOVED && in dbg_set_sw_break()
297 if (kgdb_break[i].state == BP_UNDEFINED) { in dbg_set_sw_break()
307 kgdb_break[breakno].state = BP_SET; in dbg_set_sw_break()
321 if (kgdb_break[i].state != BP_ACTIVE) in dbg_deactivate_sw_breakpoints()
331 kgdb_break[i].state = BP_SET; in dbg_deactivate_sw_breakpoints()
341 if ((kgdb_break[i].state == BP_SET) && in dbg_remove_sw_break()
[all …]
/kernel/debug/kdb/
Dkdb_support.c624 char state; in kdb_task_state_char() local
631 state = (p->state == 0) ? 'R' : in kdb_task_state_char()
632 (p->state < 0) ? 'U' : in kdb_task_state_char()
633 (p->state & TASK_UNINTERRUPTIBLE) ? 'D' : in kdb_task_state_char()
634 (p->state & TASK_STOPPED) ? 'T' : in kdb_task_state_char()
635 (p->state & TASK_TRACED) ? 'C' : in kdb_task_state_char()
638 (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; in kdb_task_state_char()
644 state = 'I'; /* idle task */ in kdb_task_state_char()
646 } else if (!p->mm && state == 'S') { in kdb_task_state_char()
647 state = 'M'; /* sleeping system daemon */ in kdb_task_state_char()
[all …]
/kernel/bpf/
Dverifier.c193 static void print_verifier_state(struct bpf_verifier_state *state) in print_verifier_state() argument
200 reg = &state->regs[i]; in print_verifier_state()
227 if (state->stack_slot_type[i] == STACK_SPILL) in print_verifier_state()
229 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); in print_verifier_state()
543 static int check_stack_write(struct bpf_verifier_state *state, int off, in check_stack_write() argument
552 is_spillable_regtype(state->regs[value_regno].type)) { in check_stack_write()
561 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = in check_stack_write()
562 state->regs[value_regno]; in check_stack_write()
565 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; in check_stack_write()
568 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = in check_stack_write()
[all …]
/kernel/time/
Dhrtimer.c335 static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) in hrtimer_fixup_init() argument
339 switch (state) { in hrtimer_fixup_init()
354 static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) in hrtimer_fixup_activate() argument
356 switch (state) { in hrtimer_fixup_activate()
369 static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) in hrtimer_fixup_free() argument
373 switch (state) { in hrtimer_fixup_free()
833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) in hrtimer_forward()
873 timer->state = HRTIMER_STATE_ENQUEUED; in enqueue_hrtimer()
893 u8 state = timer->state; in __remove_hrtimer() local
895 timer->state = newstate; in __remove_hrtimer()
[all …]
Dtick-internal.h45 enum clock_event_state state) in clockevent_set_state() argument
47 dev->state_use_accessors = state; in clockevent_set_state()
54 enum clock_event_state state);
Dtick-sched.h76 extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
79 __tick_broadcast_oneshot_control(enum tick_broadcast_state state) in __tick_broadcast_oneshot_control() argument
/kernel/events/
Dcore.c881 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { in perf_cgroup_mark_enabled()
1417 if (event->state < PERF_EVENT_STATE_INACTIVE || in update_event_times()
1418 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) in update_event_times()
1440 if (event->state == PERF_EVENT_STATE_INACTIVE) in update_event_times()
1511 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1695 if (event->state > PERF_EVENT_STATE_OFF) in list_del_event()
1696 event->state = PERF_EVENT_STATE_OFF; in list_del_event()
1753 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
1807 if (event->state == PERF_EVENT_STATE_INACTIVE && in event_sched_out()
1814 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
[all …]

123