/drivers/cpuidle/ |
D | cpuidle-psci-domain.c | 34 struct genpd_power_state *state = &pd->states[pd->state_idx]; in psci_pd_power_off() 50 static int psci_pd_parse_state_nodes(struct genpd_power_state *states, in psci_pd_parse_state_nodes() argument 57 ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode), in psci_pd_parse_state_nodes() 68 states[i].data = psci_state_buf; in psci_pd_parse_state_nodes() 76 kfree(states[i].data); in psci_pd_parse_state_nodes() 81 struct genpd_power_state **states, int *state_count) in psci_pd_parse_states() argument 86 ret = of_genpd_parse_idle_states(np, states, state_count); in psci_pd_parse_states() 91 ret = psci_pd_parse_state_nodes(*states, *state_count); in psci_pd_parse_states() 93 kfree(*states); in psci_pd_parse_states() 98 static void psci_pd_free_states(struct genpd_power_state *states, in psci_pd_free_states() argument [all …]
|
D | cpuidle-mvebu-v7.c | 36 if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE) in mvebu_v7_enter_idle() 50 .states[0] = ARM_CPUIDLE_WFI_STATE, 51 .states[1] = { 59 .states[2] = { 73 .states[0] = ARM_CPUIDLE_WFI_STATE, 74 .states[1] = { 88 .states[0] = ARM_CPUIDLE_WFI_STATE, 89 .states[1] = {
|
D | cpuidle-psci.c | 63 u32 *states = data->psci_states; in __psci_enter_domain_idle_state() local 86 state = states[idx]; in __psci_enter_domain_idle_state() 251 drv->states[state_count - 1].enter = psci_enter_domain_idle_state; in psci_dt_cpu_init_topology() 252 drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; in psci_dt_cpu_init_topology() 367 drv->states[0].enter = psci_enter_idle_state; in psci_idle_init_cpu() 368 drv->states[0].exit_latency = 1; in psci_idle_init_cpu() 369 drv->states[0].target_residency = 1; in psci_idle_init_cpu() 370 drv->states[0].power_usage = UINT_MAX; in psci_idle_init_cpu() 371 strcpy(drv->states[0].name, "WFI"); in psci_idle_init_cpu() 372 strcpy(drv->states[0].desc, "ARM WFI"); in psci_idle_init_cpu()
|
D | cpuidle.c | 72 if (drv->states[i].enter_dead) in cpuidle_play_dead() 73 return drv->states[i].enter_dead(dev, i); in cpuidle_play_dead() 88 struct cpuidle_state *s = &drv->states[i]; in find_deepest_state() 142 struct cpuidle_state *target_state = &drv->states[index]; in enter_s2idle_proper() 218 target_state = &drv->states[index]; in cpuidle_enter_state() 233 target_state = &drv->states[index]; in cpuidle_enter_state() 273 s64 diff, delay = drv->states[entered_state].exit_latency_ns; in cpuidle_enter_state() 287 if (diff < drv->states[entered_state].target_residency_ns) { in cpuidle_enter_state() 305 if (diff - delay >= drv->states[i].target_residency_ns) in cpuidle_enter_state() 423 state_limit = drv->states[i].target_residency_ns; in cpuidle_poll_time() [all …]
|
D | cpuidle-big_little.c | 62 .states[0] = ARM_CPUIDLE_WFI_STATE, 63 .states[1] = { 83 .states[0] = ARM_CPUIDLE_WFI_STATE, 84 .states[1] = {
|
D | cpuidle-at91.c | 40 .states[0] = ARM_CPUIDLE_WFI_STATE, 41 .states[1] = {
|
D | cpuidle-kirkwood.c | 44 .states[0] = ARM_CPUIDLE_WFI_STATE, 45 .states[1] = {
|
/drivers/regulator/ |
D | gpio-regulator.c | 39 struct gpio_regulator_state *states; member 51 if (data->states[ptr].gpios == data->state) in gpio_regulator_get_value() 52 return data->states[ptr].value; in gpio_regulator_get_value() 65 if (data->states[ptr].value < best_val && in gpio_regulator_set_voltage() 66 data->states[ptr].value >= min_uV && in gpio_regulator_set_voltage() 67 data->states[ptr].value <= max_uV) { in gpio_regulator_set_voltage() 68 target = data->states[ptr].gpios; in gpio_regulator_set_voltage() 69 best_val = data->states[ptr].value; in gpio_regulator_set_voltage() 94 return data->states[selector].value; in gpio_regulator_list_voltage() 104 if (data->states[ptr].value > best_val && in gpio_regulator_set_current_limit() [all …]
|
D | irq_helpers.c | 107 stat = &rid->states[i]; in regulator_notifier_isr_work() 132 stat = &rid->states[i]; in regulator_notifier_isr_work() 214 rdev = rid->states[i].rdev; in regulator_notifier_isr() 240 stat = &rid->states[i]; in regulator_notifier_isr() 286 h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) * in init_rdev_state() 288 if (!h->rdata.states) in init_rdev_state() 295 h->rdata.states[i].possible_errs = common_err; in init_rdev_state() 297 h->rdata.states[i].possible_errs |= *rdev_err++; in init_rdev_state() 298 h->rdata.states[i].rdev = *rdev++; in init_rdev_state() 309 if (h->rdata.states[i].possible_errs) in init_rdev_errors() [all …]
|
/drivers/cpuidle/governors/ |
D | ladder.c | 40 struct ladder_device_state states[CPUIDLE_STATE_MAX]; member 55 ldev->states[old_idx].stats.promotion_count = 0; in ladder_do_selection() 56 ldev->states[old_idx].stats.demotion_count = 0; in ladder_do_selection() 72 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; in ladder_select_state() 82 last_state = &ldev->states[last_idx]; in ladder_select_state() 84 last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns; in ladder_select_state() 90 drv->states[last_idx + 1].exit_latency_ns <= latency_req) { in ladder_select_state() 102 drv->states[last_idx].exit_latency_ns > latency_req)) { in ladder_select_state() 106 if (drv->states[i].exit_latency_ns <= latency_req) in ladder_select_state() 136 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; in ladder_enable_device() [all …]
|
D | menu.c | 294 ((data->next_timer_ns < drv->states[1].target_residency_ns || in menu_select() 295 latency_req < drv->states[1].exit_latency_ns) && in menu_select() 302 *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING); in menu_select() 344 struct cpuidle_state *s = &drv->states[i]; in menu_select() 357 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && in menu_select() 374 predicted_ns = drv->states[idx].target_residency_ns; in menu_select() 384 if (drv->states[idx].target_residency_ns < TICK_NSEC && in menu_select() 403 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || in menu_select() 407 if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) { in menu_select() 419 if (drv->states[i].target_residency_ns <= delta_tick) in menu_select() [all …]
|
D | teo.c | 174 u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns; in teo_update() 204 s64 target_residency_ns = drv->states[i].target_residency_ns; in teo_update() 251 return (drv->states[idx].target_residency_ns + in teo_middle_of_bin() 252 drv->states[idx+1].target_residency_ns) / 2; in teo_middle_of_bin() 273 if (drv->states[i].target_residency_ns <= duration_ns) in teo_find_shallower_state() 320 if (drv->states[1].target_residency_ns > duration_ns) in teo_select() 333 struct cpuidle_state *s = &drv->states[i]; in teo_select() 460 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || in teo_select() 471 drv->states[idx].target_residency_ns > delta_tick) in teo_select()
|
/drivers/cpufreq/ |
D | ia64-acpi-cpufreq.c | 97 if (value == data->acpi_data.states[i].status) in extract_clock() 98 return data->acpi_data.states[i].core_frequency; in extract_clock() 100 return data->acpi_data.states[i-1].core_frequency; in extract_clock() 159 value = (u32) data->acpi_data.states[state].control; in processor_set_freq() 254 if ((data->acpi_data.states[i].transition_latency * 1000) > in acpi_cpufreq_cpu_init() 257 data->acpi_data.states[i].transition_latency * 1000; in acpi_cpufreq_cpu_init() 266 data->acpi_data.states[i].core_frequency * 1000; in acpi_cpufreq_cpu_init() 282 (u32) data->acpi_data.states[i].core_frequency, in acpi_cpufreq_cpu_init() 283 (u32) data->acpi_data.states[i].power, in acpi_cpufreq_cpu_init() 284 (u32) data->acpi_data.states[i].transition_latency, in acpi_cpufreq_cpu_init() [all …]
|
D | acpi-cpufreq.c | 204 if (value == perf->states[i].status) in extract_io() 226 if (msr == perf->states[pos->driver_data].status) in extract_msr() 443 drv_write(data, mask, perf->states[next_perf_state].control); in acpi_cpufreq_target() 488 perf->states[next_perf_state].control); in acpi_cpufreq_fast_switch() 503 unsigned long freqn = perf->states[0].core_frequency * 1000; in acpi_cpufreq_guess_freq() 507 freqn = perf->states[i+1].core_frequency * 1000; in acpi_cpufreq_guess_freq() 518 return perf->states[0].core_frequency * 1000; in acpi_cpufreq_guess_freq() 805 if ((perf->states[i].transition_latency * 1000) > in acpi_cpufreq_cpu_init() 808 perf->states[i].transition_latency * 1000; in acpi_cpufreq_cpu_init() 820 if (i > 0 && perf->states[i].core_frequency >= in acpi_cpufreq_cpu_init() [all …]
|
/drivers/acpi/ |
D | processor_idle.c | 165 return cx - pr->power.states >= pr->power.timer_broadcast_on_state; in lapic_timer_needs_broadcast() 215 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; in acpi_processor_get_power_info_fadt() 216 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; in acpi_processor_get_power_info_fadt() 229 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; in acpi_processor_get_power_info_fadt() 230 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; in acpi_processor_get_power_info_fadt() 233 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; in acpi_processor_get_power_info_fadt() 234 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; in acpi_processor_get_power_info_fadt() 244 pr->power.states[ACPI_STATE_C2].address = 0; in acpi_processor_get_power_info_fadt() 255 pr->power.states[ACPI_STATE_C3].address = 0; in acpi_processor_get_power_info_fadt() 259 pr->power.states[ACPI_STATE_C2].address, in acpi_processor_get_power_info_fadt() [all …]
|
D | processor_perflib.c | 98 qos_value = pr->performance->states[index].core_frequency * 1000; in acpi_processor_get_platform_limit() 160 *limit = pr->performance->states[pr->performance_platform_limit]. in acpi_processor_get_bios_limit() 335 pr->performance->states = in acpi_processor_get_performance_states() 339 if (!pr->performance->states) { in acpi_processor_get_performance_states() 346 struct acpi_processor_px *px = &(pr->performance->states[i]); in acpi_processor_get_performance_states() 359 kfree(pr->performance->states); in acpi_processor_get_performance_states() 390 memcpy(&(pr->performance->states[last_invalid]), in acpi_processor_get_performance_states() 401 kfree(pr->performance->states); in acpi_processor_get_performance_states() 402 pr->performance->states = NULL; in acpi_processor_get_performance_states() 792 kfree(pr->performance->states); in acpi_processor_unregister_performance()
|
/drivers/gpu/drm/ |
D | drm_blend.c | 448 struct drm_plane_state **states; in drm_atomic_helper_crtc_normalize_zpos() local 456 states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); in drm_atomic_helper_crtc_normalize_zpos() 457 if (!states) in drm_atomic_helper_crtc_normalize_zpos() 471 states[n++] = plane_state; in drm_atomic_helper_crtc_normalize_zpos() 477 sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL); in drm_atomic_helper_crtc_normalize_zpos() 480 plane = states[i]->plane; in drm_atomic_helper_crtc_normalize_zpos() 482 states[i]->normalized_zpos = i; in drm_atomic_helper_crtc_normalize_zpos() 489 kfree(states); in drm_atomic_helper_crtc_normalize_zpos()
|
/drivers/i2c/muxes/ |
D | i2c-mux-pinctrl.c | 19 struct pinctrl_state *states[]; member 26 return pinctrl_select_state(mux->pinctrl, mux->states[chan]); in i2c_mux_pinctrl_select() 96 struct_size(mux, states, num_names), in i2c_mux_pinctrl_probe() 121 mux->states[i] = pinctrl_lookup_state(mux->pinctrl, name); in i2c_mux_pinctrl_probe() 122 if (IS_ERR(mux->states[i])) { in i2c_mux_pinctrl_probe() 123 ret = PTR_ERR(mux->states[i]); in i2c_mux_pinctrl_probe() 144 if (root != i2c_mux_pinctrl_root_adapter(mux->states[i])) { in i2c_mux_pinctrl_probe()
|
/drivers/base/power/ |
D | domain_governor.c | 163 min_sleep_ns = genpd->states[state].power_off_latency_ns + in next_wakeup_allows_state() 164 genpd->states[state].residency_ns; in next_wakeup_allows_state() 185 off_on_time_ns = genpd->states[state].power_off_latency_ns + in __default_power_down_ok() 186 genpd->states[state].power_on_latency_ns; in __default_power_down_ok() 255 genpd->states[state].power_on_latency_ns; in __default_power_down_ok() 386 if (idle_duration_ns >= (genpd->states[i].residency_ns + in cpu_power_down_ok() 387 genpd->states[i].power_off_latency_ns)) { in cpu_power_down_ok()
|
/drivers/xen/ |
D | xen-acpi-processor.c | 70 cx = &_pr->power.states[i]; in push_cxx_to_hypervisor() 110 set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states); in push_cxx_to_hypervisor() 118 cx = &_pr->power.states[i]; in push_cxx_to_hypervisor() 153 memcpy(&(dst_states[i]), &(_pr->performance->states[i]), in xen_copy_pss_data() 227 set_xen_guest_handle(dst_perf->states, dst_states); in push_pxx_to_hypervisor() 252 (u32) perf->states[i].core_frequency, in push_pxx_to_hypervisor() 253 (u32) perf->states[i].power, in push_pxx_to_hypervisor() 254 (u32) perf->states[i].transition_latency); in push_pxx_to_hypervisor() 280 if (_pr->performance && _pr->performance->states) in upload_pm_data()
|
/drivers/mux/ |
D | gpio.c | 73 mux_chip->mux->states = BIT(pins); in mux_gpio_probe() 77 if (idle_state < 0 || idle_state >= mux_chip->mux->states) { in mux_gpio_probe() 90 mux_chip->mux->states); in mux_gpio_probe()
|
D | adgs1408.c | 83 mux->states = 8; in adgs1408_probe() 85 mux->states = 4; in adgs1408_probe() 92 if (idle_state < mux->states) { in adgs1408_probe()
|
/drivers/thermal/ |
D | thermal_sysfs.c | 736 int i, states = stats->max_states; in reset_store() local 743 states * states * sizeof(*stats->trans_table)); in reset_store() 819 unsigned long states; in cooling_device_stats_setup() local 831 if (cdev->ops->get_max_state(cdev, &states)) in cooling_device_stats_setup() 834 states++; /* Total number of states is highest state + 1 */ in cooling_device_stats_setup() 837 var += sizeof(*stats->time_in_state) * states; in cooling_device_stats_setup() 838 var += sizeof(*stats->trans_table) * states * states; in cooling_device_stats_setup() 845 stats->trans_table = (unsigned int *)(stats->time_in_state + states); in cooling_device_stats_setup() 848 stats->max_states = states; in cooling_device_stats_setup()
|
/drivers/tty/serial/ |
D | st-asc.c | 41 struct pinctrl_state *states[2]; member 564 ascport->states[DEFAULT]); in asc_set_termios() 568 if (!ascport->rts && ascport->states[NO_HW_FLOWCTRL]) { in asc_set_termios() 570 ascport->states[NO_HW_FLOWCTRL]); in asc_set_termios() 758 ascport->states[DEFAULT] = in asc_init_port() 760 if (IS_ERR(ascport->states[DEFAULT])) { in asc_init_port() 761 ret = PTR_ERR(ascport->states[DEFAULT]); in asc_init_port() 768 ascport->states[NO_HW_FLOWCTRL] = in asc_init_port() 770 if (IS_ERR(ascport->states[NO_HW_FLOWCTRL])) in asc_init_port() 771 ascport->states[NO_HW_FLOWCTRL] = NULL; in asc_init_port()
|
/drivers/input/ |
D | ff-memless.c | 49 struct ml_effect_state states[FF_MEMLESS_EFFECTS]; member 125 state = &ml->states[i]; in ml_schedule_timer() 341 state = &ml->states[i]; in ml_get_combo_effect() 427 __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); in ml_ff_set_gain() 438 struct ml_effect_state *state = &ml->states[effect_id]; in ml_ff_playback() 469 struct ml_effect_state *state = &ml->states[effect->id]; in ml_ff_upload() 552 ml->states[i].effect = &ff->effects[i]; in input_ff_create_memless()
|