/kernel/power/ |
D | suspend.c | 162 static bool valid_state(suspend_state_t state) in valid_state() argument 169 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); in valid_state() 186 suspend_state_t state; in mem_sleep_default_setup() local 188 for (state = PM_SUSPEND_TO_IDLE; state <= PM_SUSPEND_MEM; state++) in mem_sleep_default_setup() 189 if (mem_sleep_labels[state] && in mem_sleep_default_setup() 190 !strcmp(str, mem_sleep_labels[state])) { in mem_sleep_default_setup() 191 mem_sleep_default = state; in mem_sleep_default_setup() 232 int suspend_valid_only_mem(suspend_state_t state) in suspend_valid_only_mem() argument 234 return state == PM_SUSPEND_MEM; in suspend_valid_only_mem() 238 static bool sleep_state_supported(suspend_state_t state) in sleep_state_supported() argument [all …]
|
D | main.c | 149 suspend_state_t state; in decode_suspend_state() local 156 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { in decode_suspend_state() 157 const char *label = mem_sleep_states[state]; in decode_suspend_state() 160 return state; in decode_suspend_state() 169 suspend_state_t state; in mem_sleep_store() local 181 state = decode_suspend_state(buf, n); in mem_sleep_store() 182 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON) in mem_sleep_store() 183 mem_sleep_current = state; in mem_sleep_store() 575 suspend_state_t state; in decode_state() local 588 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { in decode_state() [all …]
|
D | suspend_test.c | 62 static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) in test_wakealarm() argument 96 if (state == PM_SUSPEND_MEM) { in test_wakealarm() 97 printk(info_test, pm_states[state]); in test_wakealarm() 98 status = pm_suspend(state); in test_wakealarm() 100 state = PM_SUSPEND_STANDBY; in test_wakealarm() 102 if (state == PM_SUSPEND_STANDBY) { in test_wakealarm() 103 printk(info_test, pm_states[state]); in test_wakealarm() 104 status = pm_suspend(state); in test_wakealarm() 106 state = PM_SUSPEND_TO_IDLE; in test_wakealarm() 108 if (state == PM_SUSPEND_TO_IDLE) { in test_wakealarm() [all …]
|
D | autosleep.c | 90 int pm_autosleep_set_state(suspend_state_t state) in pm_autosleep_set_state() argument 94 if (state >= PM_SUSPEND_MAX) in pm_autosleep_set_state() 102 autosleep_state = state; in pm_autosleep_set_state() 106 if (state > PM_SUSPEND_ON) { in pm_autosleep_set_state()
|
/kernel/ |
D | cpu.c | 56 enum cpuhp_state state; member 132 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) in cpuhp_get_step() argument 134 return cpuhp_hp_states + state; in cpuhp_get_step() 147 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument 152 struct cpuhp_step *step = cpuhp_get_step(state); in cpuhp_invoke_callback() 157 if (st->fail == state) { in cpuhp_invoke_callback() 171 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback() 173 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback() 183 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 185 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback() [all …]
|
D | context_tracking.c | 62 void __context_tracking_enter(enum ctx_state state) in __context_tracking_enter() argument 70 if ( __this_cpu_read(context_tracking.state) != state) { in __context_tracking_enter() 79 if (state == CONTEXT_USER) { in __context_tracking_enter() 98 __this_cpu_write(context_tracking.state, state); in __context_tracking_enter() 105 void context_tracking_enter(enum ctx_state state) in context_tracking_enter() argument 121 __context_tracking_enter(state); in context_tracking_enter() 145 void __context_tracking_exit(enum ctx_state state) in __context_tracking_exit() argument 150 if (__this_cpu_read(context_tracking.state) == state) { in __context_tracking_exit() 157 if (state == CONTEXT_USER) { in __context_tracking_exit() 162 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); in __context_tracking_exit() [all …]
|
D | stackleak.c | 26 int state = !static_branch_unlikely(&stack_erasing_bypass); in stack_erasing_sysctl() local 27 int prev_state = state; in stack_erasing_sysctl() 29 table->data = &state; in stack_erasing_sysctl() 32 state = !!state; in stack_erasing_sysctl() 33 if (ret || !write || state == prev_state) in stack_erasing_sysctl() 36 if (state) in stack_erasing_sysctl() 42 state ? "enabled" : "disabled"); in stack_erasing_sysctl()
|
D | softirq.c | 75 if (tsk && tsk->state != TASK_RUNNING) in wakeup_softirqd() 91 return tsk && (tsk->state == TASK_RUNNING) && in ksoftirqd_running() 521 &t->state)) in tasklet_action_common() 553 t->state = 0; in tasklet_init() 565 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { in tasklet_kill() 568 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); in tasklet_kill() 571 clear_bit(TASKLET_STATE_SCHED, &t->state); in tasklet_kill() 626 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); in tasklet_kill_immediate() 628 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) in tasklet_kill_immediate()
|
D | auditsc.c | 442 enum audit_state *state, in audit_filter_rules() argument 727 *state = AUDIT_DISABLED; in audit_filter_rules() 730 *state = AUDIT_RECORD_CONTEXT; in audit_filter_rules() 743 enum audit_state state; in audit_filter_task() local 748 &state, true)) { in audit_filter_task() 749 if (state == AUDIT_RECORD_CONTEXT) in audit_filter_task() 752 return state; in audit_filter_task() 785 enum audit_state state; in audit_filter_syscall() local 794 &state, false)) { in audit_filter_syscall() 796 ctx->current_state = state; in audit_filter_syscall() [all …]
|
D | panic.c | 63 static long no_blink(int state) in no_blink() argument 69 long (*panic_blink)(int state); 172 int state = 0; in panic() local 311 i += panic_blink(state ^= 1); in panic() 347 i += panic_blink(state ^= 1); in panic()
|
D | module.c | 315 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); in strong_try_module_get() 316 if (mod && mod->state == MODULE_STATE_COMING) in strong_try_module_get() 474 if (mod->state == MODULE_STATE_UNFORMED) in each_symbol_section() 620 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) in find_module_all() 693 if (mod->state == MODULE_STATE_UNFORMED) in __is_module_percpu_address() 944 mod->state = MODULE_STATE_GOING; in try_stop_module() 999 if (mod->state != MODULE_STATE_LIVE) { in SYSCALL_DEFINE2() 1194 const char *state = "unknown"; in show_initstate() local 1196 switch (mk->mod->state) { in show_initstate() 1198 state = "live"; in show_initstate() [all …]
|
/kernel/cgroup/ |
D | legacy_freezer.c | 45 unsigned int state; member 70 ret = task_freezer(task)->state & CGROUP_FREEZING; in cgroup_freezing() 76 static const char *freezer_state_strs(unsigned int state) in freezer_state_strs() argument 78 if (state & CGROUP_FROZEN) in freezer_state_strs() 80 if (state & CGROUP_FREEZING) in freezer_state_strs() 112 freezer->state |= CGROUP_FREEZER_ONLINE; in freezer_css_online() 114 if (parent && (parent->state & CGROUP_FREEZING)) { in freezer_css_online() 115 freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; in freezer_css_online() 136 if (freezer->state & CGROUP_FREEZING) in freezer_css_offline() 139 freezer->state = 0; in freezer_css_offline() [all …]
|
/kernel/bpf/ |
D | verifier.c | 434 const struct bpf_func_state *state) in print_verifier_state() argument 440 if (state->frameno) in print_verifier_state() 441 verbose(env, " frame%d:", state->frameno); in print_verifier_state() 443 reg = &state->regs[i]; in print_verifier_state() 501 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state() 507 if (state->stack[i].slot_type[j] != STACK_INVALID) in print_verifier_state() 510 state->stack[i].slot_type[j]]; in print_verifier_state() 516 print_liveness(env, state->stack[i].spilled_ptr.live); in print_verifier_state() 517 if (state->stack[i].slot_type[0] == STACK_SPILL) { in print_verifier_state() 518 reg = &state->stack[i].spilled_ptr; in print_verifier_state() [all …]
|
/kernel/sched/ |
D | completion.c | 70 long (*action)(long), long timeout, int state) in do_wait_for_common() argument 77 if (signal_pending_state(state, current)) { in do_wait_for_common() 81 __set_current_state(state); in do_wait_for_common() 97 long (*action)(long), long timeout, int state) in __wait_for_common() argument 104 timeout = do_wait_for_common(x, action, timeout, state); in __wait_for_common() 113 wait_for_common(struct completion *x, long timeout, int state) in wait_for_common() argument 115 return __wait_for_common(x, schedule_timeout, timeout, state); in wait_for_common() 119 wait_for_common_io(struct completion *x, long timeout, int state) in wait_for_common_io() argument 121 return __wait_for_common(x, io_schedule_timeout, timeout, state); in wait_for_common_io()
|
D | swait.c | 79 void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait_exclusive() argument 85 set_current_state(state); in prepare_to_swait_exclusive() 90 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait_event() argument 96 if (signal_pending_state(state, current)) { in prepare_to_swait_event() 105 set_current_state(state); in prepare_to_swait_event()
|
D | psi.c | 216 static bool test_state(unsigned int *tasks, enum psi_states state) in test_state() argument 218 switch (state) { in test_state() 497 group->total[PSI_POLL][t->state], 0); in init_triggers() 517 if (group->polling_total[t->state] == total[t->state]) in update_triggers() 529 growth = window_update(&t->win, now, total[t->state]); in update_triggers() 1011 enum psi_states state; in psi_trigger_create() local 1019 state = PSI_IO_SOME + res * 2; in psi_trigger_create() 1021 state = PSI_IO_FULL + res * 2; in psi_trigger_create() 1025 if (state >= PSI_NONIDLE) in psi_trigger_create() 1041 t->state = state; in psi_trigger_create() [all …]
|
D | wait.c | 222 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) in prepare_to_wait() argument 230 set_current_state(state); in prepare_to_wait() 236 …re_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) in prepare_to_wait_exclusive() argument 244 set_current_state(state); in prepare_to_wait_exclusive() 258 …repare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) in prepare_to_wait_event() argument 264 if (signal_pending_state(state, current)) { in prepare_to_wait_event() 286 set_current_state(state); in prepare_to_wait_event()
|
D | cputime.c | 715 WARN_ON_ONCE(vtime->state == VTIME_INACTIVE); in get_vtime_delta() 763 vtime->state = VTIME_USER; in vtime_user_enter() 777 vtime->state = VTIME_SYS; in vtime_user_exit() 819 vtime->state = VTIME_INACTIVE; in arch_vtime_task_switch() 825 vtime->state = VTIME_SYS; in arch_vtime_task_switch() 837 vtime->state = VTIME_SYS; in vtime_init_idle() 856 if (vtime->state == VTIME_SYS && t->flags & PF_VCPU) in task_gtime() 888 if (vtime->state == VTIME_INACTIVE || is_idle_task(t)) in task_cputime() 897 if (vtime->state == VTIME_USER || t->flags & PF_VCPU) in task_cputime() 899 else if (vtime->state == VTIME_SYS) in task_cputime()
|
D | core.c | 1682 if (task_running(rq, p) || p->state == TASK_WAKING) { in __set_cpus_allowed_ptr() 1714 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && in set_task_cpu() 1722 WARN_ON_ONCE(p->state == TASK_RUNNING && in set_task_cpu() 1920 if (match_state && unlikely(p->state != match_state)) in wait_task_inactive() 1935 if (!match_state || p->state == match_state) in wait_task_inactive() 2035 enum { cpuset, possible, fail } state = cpuset; in select_fallback_rq() local 2065 switch (state) { in select_fallback_rq() 2069 state = possible; in select_fallback_rq() 2075 state = fail; in select_fallback_rq() 2085 if (state != cpuset) { in select_fallback_rq() [all …]
|
/kernel/locking/ |
D | qspinlock_paravirt.h | 53 u8 state; member 272 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early() 285 pn->state = vcpu_running; in pv_init_node() 320 smp_store_mb(pn->state, vcpu_halted); in pv_wait_node() 325 pv_wait(&pn->state, vcpu_halted); in pv_wait_node() 333 cmpxchg(&pn->state, vcpu_halted, vcpu_running); in pv_wait_node() 380 if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed) in pv_kick_node() 414 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock() 427 WRITE_ONCE(pn->state, vcpu_running); in pv_wait_head_or_lock() 467 WRITE_ONCE(pn->state, vcpu_hashed); in pv_wait_head_or_lock()
|
D | rwsem.c | 722 enum owner_state state; in rwsem_spin_on_owner() local 725 state = rwsem_owner_state(owner, flags, nonspinnable); in rwsem_spin_on_owner() 726 if (state != OWNER_WRITER) in rwsem_spin_on_owner() 727 return state; in rwsem_spin_on_owner() 739 state = rwsem_owner_state(new, new_flags, nonspinnable); in rwsem_spin_on_owner() 752 state = OWNER_NONSPINNABLE; in rwsem_spin_on_owner() 760 return state; in rwsem_spin_on_owner() 995 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state) in rwsem_down_read_slowpath() argument 1089 set_current_state(state); in rwsem_down_read_slowpath() 1094 if (signal_pending_state(state, current)) { in rwsem_down_read_slowpath() [all …]
|
D | rtmutex.c | 1167 __rt_mutex_slowlock(struct rt_mutex *lock, int state, in __rt_mutex_slowlock() argument 1182 if (likely(state == TASK_INTERRUPTIBLE)) { in __rt_mutex_slowlock() 1199 set_current_state(state); in __rt_mutex_slowlock() 1230 rt_mutex_slowlock(struct rt_mutex *lock, int state, in rt_mutex_slowlock() argument 1256 set_current_state(state); in rt_mutex_slowlock() 1266 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); in rt_mutex_slowlock() 1406 rt_mutex_fastlock(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument 1407 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_fastlock() 1414 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); in rt_mutex_fastlock() 1418 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument [all …]
|
/kernel/debug/kdb/ |
D | kdb_support.c | 624 char state; in kdb_task_state_char() local 631 state = (p->state == 0) ? 'R' : in kdb_task_state_char() 632 (p->state < 0) ? 'U' : in kdb_task_state_char() 633 (p->state & TASK_UNINTERRUPTIBLE) ? 'D' : in kdb_task_state_char() 634 (p->state & TASK_STOPPED) ? 'T' : in kdb_task_state_char() 635 (p->state & TASK_TRACED) ? 'C' : in kdb_task_state_char() 638 (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; in kdb_task_state_char() 644 state = 'I'; /* idle task */ in kdb_task_state_char() 646 } else if (!p->mm && state == 'S') { in kdb_task_state_char() 647 state = 'M'; /* sleeping system daemon */ in kdb_task_state_char() [all …]
|
/kernel/debug/ |
D | debug_core.c | 115 [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } 314 if (kgdb_break[i].state != BP_SET) in dbg_activate_sw_breakpoints() 326 kgdb_break[i].state = BP_ACTIVE; in dbg_activate_sw_breakpoints() 341 if ((kgdb_break[i].state == BP_SET) && in dbg_set_sw_break() 346 if (kgdb_break[i].state == BP_REMOVED && in dbg_set_sw_break() 355 if (kgdb_break[i].state == BP_UNDEFINED) { in dbg_set_sw_break() 365 kgdb_break[breakno].state = BP_SET; in dbg_set_sw_break() 379 if (kgdb_break[i].state != BP_ACTIVE) in dbg_deactivate_sw_breakpoints() 389 kgdb_break[i].state = BP_SET; in dbg_deactivate_sw_breakpoints() 399 if ((kgdb_break[i].state == BP_SET) && in dbg_remove_sw_break() [all …]
|
/kernel/time/ |
D | hrtimer.c | 352 static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) in hrtimer_fixup_init() argument 356 switch (state) { in hrtimer_fixup_init() 371 static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) in hrtimer_fixup_activate() argument 373 switch (state) { in hrtimer_fixup_activate() 386 static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) in hrtimer_fixup_free() argument 390 switch (state) { in hrtimer_fixup_free() 928 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) in hrtimer_forward() 970 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED); in enqueue_hrtimer() 990 u8 state = timer->state; in __remove_hrtimer() local 993 WRITE_ONCE(timer->state, newstate); in __remove_hrtimer() [all …]
|