/kernel/power/ |
D | suspend.c | 167 static bool valid_state(suspend_state_t state) in valid_state() argument 175 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state) && in valid_state() 193 suspend_state_t state; in mem_sleep_default_setup() local 195 for (state = PM_SUSPEND_TO_IDLE; state <= PM_SUSPEND_MEM; state++) in mem_sleep_default_setup() 196 if (mem_sleep_labels[state] && in mem_sleep_default_setup() 197 !strcmp(str, mem_sleep_labels[state])) { in mem_sleep_default_setup() 198 mem_sleep_default = state; in mem_sleep_default_setup() 242 int suspend_valid_only_mem(suspend_state_t state) in suspend_valid_only_mem() argument 244 return state == PM_SUSPEND_MEM; in suspend_valid_only_mem() 248 static bool sleep_state_supported(suspend_state_t state) in sleep_state_supported() argument [all …]
|
D | main.c | 155 suspend_state_t state; in decode_suspend_state() local 162 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { in decode_suspend_state() 163 const char *label = mem_sleep_states[state]; in decode_suspend_state() 166 return state; in decode_suspend_state() 175 suspend_state_t state; in mem_sleep_store() local 187 state = decode_suspend_state(buf, n); in mem_sleep_store() 188 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON) in mem_sleep_store() 189 mem_sleep_current = state; in mem_sleep_store() 595 suspend_state_t state; in decode_state() local 608 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { in decode_state() [all …]
|
D | suspend_test.c | 62 static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) in test_wakealarm() argument 96 if (state == PM_SUSPEND_MEM) { in test_wakealarm() 97 printk(info_test, pm_states[state]); in test_wakealarm() 98 status = pm_suspend(state); in test_wakealarm() 100 state = PM_SUSPEND_STANDBY; in test_wakealarm() 102 if (state == PM_SUSPEND_STANDBY) { in test_wakealarm() 103 printk(info_test, pm_states[state]); in test_wakealarm() 104 status = pm_suspend(state); in test_wakealarm() 106 state = PM_SUSPEND_TO_IDLE; in test_wakealarm() 108 if (state == PM_SUSPEND_TO_IDLE) { in test_wakealarm() [all …]
|
D | autosleep.c | 90 int pm_autosleep_set_state(suspend_state_t state) in pm_autosleep_set_state() argument 94 if (state >= PM_SUSPEND_MAX) in pm_autosleep_set_state() 102 autosleep_state = state; in pm_autosleep_set_state() 106 if (state > PM_SUSPEND_ON) { in pm_autosleep_set_state()
|
/kernel/cgroup/ |
D | legacy_freezer.c | 46 unsigned int state; member 69 unsigned int state; in cgroup_freezing() local 76 state = task_freezer(task)->state; in cgroup_freezing() 77 ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN); in cgroup_freezing() 83 static const char *freezer_state_strs(unsigned int state) in freezer_state_strs() argument 85 if (state & CGROUP_FROZEN) in freezer_state_strs() 87 if (state & CGROUP_FREEZING) in freezer_state_strs() 120 freezer->state |= CGROUP_FREEZER_ONLINE; in freezer_css_online() 122 if (parent && (parent->state & CGROUP_FREEZING)) { in freezer_css_online() 123 freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; in freezer_css_online() [all …]
|
/kernel/ |
D | cpu.c | 66 enum cpuhp_state state; member 145 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) in cpuhp_get_step() argument 147 return cpuhp_hp_states + state; in cpuhp_get_step() 167 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, in cpuhp_invoke_callback() argument 172 struct cpuhp_step *step = cpuhp_get_step(state); in cpuhp_invoke_callback() 177 if (st->fail == state) { in cpuhp_invoke_callback() 191 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback() 193 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback() 201 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback() 203 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback() [all …]
|
D | context_tracking.c | 34 .state = ATOMIC_INIT(RCU_DYNTICKS_IDX), 145 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_exit() 183 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_enter() 235 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_exit() 285 instrument_atomic_read(&ct->state, sizeof(ct->state)); in ct_nmi_enter() 287 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_enter() 466 void noinstr __ct_user_enter(enum ctx_state state) in __ct_user_enter() argument 477 if (__ct_state() != state) { in __ct_user_enter() 486 if (state == CONTEXT_USER) { in __ct_user_enter() 505 ct_kernel_exit(true, RCU_DYNTICKS_IDX + state); in __ct_user_enter() [all …]
|
D | freezer.c | 67 unsigned int state = get_current_state(); in __refrigerator() local 72 WARN_ON_ONCE(state && !(state & TASK_NORMAL)); in __refrigerator() 114 unsigned int state = READ_ONCE(p->__state); in __set_task_frozen() local 122 if (!(state & (TASK_FREEZABLE | __TASK_STOPPED | __TASK_TRACED))) in __set_task_frozen() 129 if (state & TASK_FREEZABLE) in __set_task_frozen() 130 WARN_ON_ONCE(!(state & TASK_NORMAL)); in __set_task_frozen() 136 if (!(state & __TASK_FREEZABLE_UNSAFE)) in __set_task_frozen() 190 unsigned int state = p->saved_state; in __restore_freezer_state() local 192 if (state != TASK_RUNNING) { in __restore_freezer_state() 193 WRITE_ONCE(p->__state, state); in __restore_freezer_state()
|
D | stackleak.c | 28 int state = !static_branch_unlikely(&stack_erasing_bypass); in stack_erasing_sysctl() local 29 int prev_state = state; in stack_erasing_sysctl() 31 table->data = &state; in stack_erasing_sysctl() 34 state = !!state; in stack_erasing_sysctl() 35 if (ret || !write || state == prev_state) in stack_erasing_sysctl() 38 if (state) in stack_erasing_sysctl() 44 state ? "enabled" : "disabled"); in stack_erasing_sysctl()
|
D | softirq.c | 815 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { in tasklet_clear_sched() 816 wake_up_var(&t->state); in tasklet_clear_sched() 886 t->state = 0; in tasklet_setup() 898 t->state = 0; in tasklet_init() 913 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { in tasklet_unlock_spin_wait() 937 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) in tasklet_kill() 938 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); in tasklet_kill() 949 clear_bit(TASKLET_STATE_RUN, &t->state); in tasklet_unlock() 951 wake_up_var(&t->state); in tasklet_unlock() 957 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); in tasklet_unlock_wait()
|
/kernel/livepatch/ |
D | state.c | 15 #define klp_for_each_state(patch, state) \ argument 16 for (state = patch->states; state && state->id; state++) 33 struct klp_state *state; in klp_get_state() local 35 klp_for_each_state(patch, state) { in klp_get_state() 36 if (state->id == id) in klp_get_state() 37 return state; in klp_get_state() 67 struct klp_state *state, *last_state = NULL; in klp_get_prev_state() local 76 state = klp_get_state(patch, id); in klp_get_prev_state() 77 if (state) in klp_get_prev_state() 78 last_state = state; in klp_get_prev_state() [all …]
|
/kernel/locking/ |
D | rwbase_rt.c | 69 unsigned int state) in __rwbase_read_lock() argument 121 ret = rwbase_rtmutex_slowlock_locked(rtm, state); in __rwbase_read_lock() 141 unsigned int state) in rwbase_read_lock() argument 146 return __rwbase_read_lock(rwb, state); in rwbase_read_lock() 150 unsigned int state) in __rwbase_read_unlock() argument 165 rt_mutex_wake_q_add_task(&wqh, owner, state); in __rwbase_read_unlock() 174 unsigned int state) in rwbase_read_unlock() argument 183 __rwbase_read_unlock(rwb, state); in rwbase_read_unlock() 237 unsigned int state) in rwbase_write_lock() argument 243 if (rwbase_rtmutex_lock_state(rtm, state)) in rwbase_write_lock() [all …]
|
D | qspinlock_paravirt.h | 53 u8 state; member 272 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early() 285 pn->state = vcpu_running; in pv_init_node() 320 smp_store_mb(pn->state, vcpu_halted); in pv_wait_node() 325 pv_wait(&pn->state, vcpu_halted); in pv_wait_node() 333 cmpxchg(&pn->state, vcpu_halted, vcpu_running); in pv_wait_node() 380 if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed) in pv_kick_node() 414 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock() 427 WRITE_ONCE(pn->state, vcpu_running); in pv_wait_head_or_lock() 467 WRITE_ONCE(pn->state, vcpu_hashed); in pv_wait_head_or_lock()
|
D | rwsem.c | 762 enum owner_state state; in rwsem_spin_on_owner() local 769 state = rwsem_owner_state(owner, flags); in rwsem_spin_on_owner() 770 if (state != OWNER_WRITER) in rwsem_spin_on_owner() 771 return state; in rwsem_spin_on_owner() 785 state = rwsem_owner_state(new, new_flags); in rwsem_spin_on_owner() 800 state = OWNER_NONSPINNABLE; in rwsem_spin_on_owner() 807 return state; in rwsem_spin_on_owner() 1021 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) in rwsem_down_read_slowpath() argument 1108 set_current_state(state); in rwsem_down_read_slowpath() 1113 if (signal_pending_state(state, current)) { in rwsem_down_read_slowpath() [all …]
|
D | semaphore.c | 209 static inline int __sched ___down_common(struct semaphore *sem, long state, in ___down_common() argument 219 if (signal_pending_state(state, current)) in ___down_common() 223 __set_current_state(state); in ___down_common() 240 static inline int __sched __down_common(struct semaphore *sem, long state, in __down_common() argument 246 ret = ___down_common(sem, state, timeout); in __down_common()
|
/kernel/sched/ |
D | cputime.c | 697 WARN_ON_ONCE(vtime->state == VTIME_INACTIVE); in get_vtime_delta() 727 if (vtime->state == VTIME_GUEST) in __vtime_account_kernel() 751 vtime->state = VTIME_USER; in vtime_user_enter() 765 vtime->state = VTIME_SYS; in vtime_user_exit() 782 vtime->state = VTIME_GUEST; in vtime_guest_enter() 794 vtime->state = VTIME_SYS; in vtime_guest_exit() 809 if (vtime->state == VTIME_IDLE) in vtime_task_switch_generic() 813 vtime->state = VTIME_INACTIVE; in vtime_task_switch_generic() 821 vtime->state = VTIME_IDLE; in vtime_task_switch_generic() 823 vtime->state = VTIME_GUEST; in vtime_task_switch_generic() [all …]
|
D | completion.c | 72 long (*action)(long), long timeout, int state) in do_wait_for_common() argument 78 if (signal_pending_state(state, current)) { in do_wait_for_common() 83 __set_current_state(state); in do_wait_for_common() 99 long (*action)(long), long timeout, int state) in __wait_for_common() argument 106 timeout = do_wait_for_common(x, action, timeout, state); in __wait_for_common() 115 wait_for_common(struct completion *x, long timeout, int state) in wait_for_common() argument 117 return __wait_for_common(x, schedule_timeout, timeout, state); in wait_for_common() 121 wait_for_common_io(struct completion *x, long timeout, int state) in wait_for_common_io() argument 123 return __wait_for_common(x, io_schedule_timeout, timeout, state); in wait_for_common_io() 252 int __sched wait_for_completion_state(struct completion *x, unsigned int state) in wait_for_completion_state() argument [all …]
|
D | swait.c | 91 void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait_exclusive() argument 97 set_current_state(state); in prepare_to_swait_exclusive() 102 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) in prepare_to_swait_event() argument 108 if (signal_pending_state(state, current)) { in prepare_to_swait_event() 117 set_current_state(state); in prepare_to_swait_event()
|
D | psi.c | 217 static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu) in test_state() argument 219 switch (state) { in test_state() 501 group->total[PSI_POLL][t->state], 0); in init_triggers() 521 new_stall = group->polling_total[t->state] != total[t->state]; in update_triggers() 542 growth = window_update(&t->win, now, total[t->state]); in update_triggers() 1235 enum psi_states state; in psi_trigger_create() local 1243 state = PSI_IO_SOME + res * 2; in psi_trigger_create() 1245 state = PSI_IO_FULL + res * 2; in psi_trigger_create() 1250 if (res == PSI_IRQ && --state != PSI_IRQ_FULL) in psi_trigger_create() 1254 if (state >= PSI_NONIDLE) in psi_trigger_create() [all …]
|
/kernel/bpf/ |
D | verifier.c | 228 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) in bpf_map_key_store() argument 232 aux->map_key_state = state | BPF_MAP_KEY_SEEN | in bpf_map_key_store() 627 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) in is_spi_bounds_valid() argument 629 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid() 712 struct bpf_func_state *state = func(env, reg); in mark_stack_slots_dynptr() local 718 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) in mark_stack_slots_dynptr() 722 state->stack[spi].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr() 723 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr() 730 state->stack[spi].spilled_ptr.dynptr.first_slot = true; in mark_stack_slots_dynptr() 731 state->stack[spi].spilled_ptr.dynptr.type = type; in mark_stack_slots_dynptr() [all …]
|
D | bpf_struct_ops.c | 23 enum bpf_struct_ops_state state 251 enum bpf_struct_ops_state state; in bpf_struct_ops_map_sys_lookup_elem() local 258 state = smp_load_acquire(&kvalue->state); in bpf_struct_ops_map_sys_lookup_elem() 259 if (state == BPF_STRUCT_OPS_STATE_INIT) { in bpf_struct_ops_map_sys_lookup_elem() 269 uvalue->state = state; in bpf_struct_ops_map_sys_lookup_elem() 381 if (uvalue->state || refcount_read(&uvalue->refcnt)) in bpf_struct_ops_map_update_elem() 393 if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) { in bpf_struct_ops_map_update_elem() 505 smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE); in bpf_struct_ops_map_update_elem() 534 prev_state = cmpxchg(&st_map->kvalue.state, in bpf_struct_ops_map_delete_elem()
|
/kernel/debug/kdb/ |
D | kdb_support.c | 488 char state; in kdb_task_state_char() local 495 state = task_state_to_char((struct task_struct *) p); in kdb_task_state_char() 503 state = '-'; /* idle task */ in kdb_task_state_char() 505 } else if (!p->mm && strchr("IMS", state)) { in kdb_task_state_char() 506 state = tolower(state); /* sleeping system daemon */ in kdb_task_state_char() 508 return state; in kdb_task_state_char() 524 char state = kdb_task_state_char(p); in kdb_task_state() local 531 return !strchr("-ims", state); in kdb_task_state() 537 return strchr(mask, state); in kdb_task_state()
|
/kernel/trace/ |
D | trace_events_synth.c | 1732 struct synth_event_trace_state state; in synth_event_trace() local 1736 ret = __synth_event_trace_init(file, &state); in synth_event_trace() 1743 if (state.event->n_dynamic_fields) { in synth_event_trace() 1746 for (i = 0; i < state.event->n_fields; i++) { in synth_event_trace() 1749 if (state.event->fields[i]->is_string && in synth_event_trace() 1750 state.event->fields[i]->is_dynamic) { in synth_event_trace() 1760 ret = __synth_event_trace_start(file, &state, data_size); in synth_event_trace() 1764 if (n_vals != state.event->n_fields) { in synth_event_trace() 1772 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { in synth_event_trace() 1777 if (state.event->fields[i]->is_string) { in synth_event_trace() [all …]
|
/kernel/debug/ |
D | debug_core.c | 102 [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } 300 if (kgdb_break[i].state != BP_SET) in dbg_activate_sw_breakpoints() 312 kgdb_break[i].state = BP_ACTIVE; in dbg_activate_sw_breakpoints() 328 if ((kgdb_break[i].state == BP_SET) && in dbg_set_sw_break() 333 if (kgdb_break[i].state == BP_REMOVED && in dbg_set_sw_break() 342 if (kgdb_break[i].state == BP_UNDEFINED) { in dbg_set_sw_break() 352 kgdb_break[breakno].state = BP_SET; in dbg_set_sw_break() 366 if (kgdb_break[i].state != BP_ACTIVE) in dbg_deactivate_sw_breakpoints() 376 kgdb_break[i].state = BP_SET; in dbg_deactivate_sw_breakpoints() 387 if ((kgdb_break[i].state == BP_SET) && in dbg_remove_sw_break() [all …]
|
/kernel/entry/ |
D | syscall_user_dispatch.c | 37 char state; in syscall_user_dispatch() local 50 if (unlikely(__get_user(state, sd->selector))) { in syscall_user_dispatch() 55 if (likely(state == SYSCALL_DISPATCH_FILTER_ALLOW)) in syscall_user_dispatch() 58 if (state != SYSCALL_DISPATCH_FILTER_BLOCK) { in syscall_user_dispatch()
|