/drivers/base/power/ |
D | generic_ops.c | 24 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; in pm_generic_runtime_suspend() local 27 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; in pm_generic_runtime_suspend() 43 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; in pm_generic_runtime_resume() local 46 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; in pm_generic_runtime_resume() 65 if (drv && drv->pm && drv->pm->prepare) in pm_generic_prepare() 66 ret = drv->pm->prepare(dev); in pm_generic_prepare() 77 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; in pm_generic_suspend_noirq() local 79 return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; in pm_generic_suspend_noirq() 89 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; in pm_generic_suspend_late() local 91 return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; in pm_generic_suspend_late() [all …]
|
D | main.c | 494 } else if (dev->type && dev->type->pm) { in device_resume_noirq() 496 callback = pm_noirq_op(dev->type->pm, state); in device_resume_noirq() 497 } else if (dev->class && dev->class->pm) { in device_resume_noirq() 499 callback = pm_noirq_op(dev->class->pm, state); in device_resume_noirq() 500 } else if (dev->bus && dev->bus->pm) { in device_resume_noirq() 502 callback = pm_noirq_op(dev->bus->pm, state); in device_resume_noirq() 505 if (!callback && dev->driver && dev->driver->pm) { in device_resume_noirq() 507 callback = pm_noirq_op(dev->driver->pm, state); in device_resume_noirq() 623 } else if (dev->type && dev->type->pm) { in device_resume_early() 625 callback = pm_late_early_op(dev->type->pm, state); in device_resume_early() [all …]
|
/drivers/input/touchscreen/ |
D | penmount.c | 54 struct pm { struct 63 void (*parse_packet)(struct pm *); argument 70 static void pm_mtevent(struct pm *pm, struct input_dev *input) in pm_mtevent() argument 74 for (i = 0; i < pm->maxcontacts; ++i) { in pm_mtevent() 77 pm->slots[i].active); in pm_mtevent() 78 if (pm->slots[i].active) { in pm_mtevent() 79 input_event(input, EV_ABS, ABS_MT_POSITION_X, pm->slots[i].x); in pm_mtevent() 80 input_event(input, EV_ABS, ABS_MT_POSITION_Y, pm->slots[i].y); in pm_mtevent() 103 static void pm_parse_9000(struct pm *pm) in pm_parse_9000() argument 105 struct input_dev *dev = pm->dev; in pm_parse_9000() [all …]
|
/drivers/input/misc/ |
D | powermate.c | 87 struct powermate_device *pm = urb->context; in powermate_irq() local 88 struct device *dev = &pm->intf->dev; in powermate_irq() 109 input_report_key(pm->input, BTN_0, pm->data[0] & 0x01); in powermate_irq() 110 input_report_rel(pm->input, REL_DIAL, pm->data[1]); in powermate_irq() 111 input_sync(pm->input); in powermate_irq() 121 static void powermate_sync_state(struct powermate_device *pm) in powermate_sync_state() argument 123 if (pm->requires_update == 0) in powermate_sync_state() 125 if (pm->config->status == -EINPROGRESS) in powermate_sync_state() 128 if (pm->requires_update & UPDATE_PULSE_ASLEEP){ in powermate_sync_state() 129 pm->configcr->wValue = cpu_to_le16( SET_PULSE_ASLEEP ); in powermate_sync_state() [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_pm.c | 57 for (i = 0; i < rdev->pm.num_power_states; i++) { in radeon_pm_get_type_index() 58 if (rdev->pm.power_state[i].type == ps_type) { in radeon_pm_get_type_index() 65 return rdev->pm.default_power_state_index; in radeon_pm_get_type_index() 70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in radeon_pm_acpi_event_handler() 71 mutex_lock(&rdev->pm.mutex); in radeon_pm_acpi_event_handler() 73 rdev->pm.dpm.ac_power = true; in radeon_pm_acpi_event_handler() 75 rdev->pm.dpm.ac_power = false; in radeon_pm_acpi_event_handler() 78 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); in radeon_pm_acpi_event_handler() 80 mutex_unlock(&rdev->pm.mutex); in radeon_pm_acpi_event_handler() 81 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { in radeon_pm_acpi_event_handler() [all …]
|
D | r600.c | 286 rdev->pm.dynpm_can_upclock = true; in r600_pm_get_dynpm_state() 287 rdev->pm.dynpm_can_downclock = true; in r600_pm_get_dynpm_state() 293 if (rdev->pm.num_power_states > 2) in r600_pm_get_dynpm_state() 296 switch (rdev->pm.dynpm_planned_action) { in r600_pm_get_dynpm_state() 298 rdev->pm.requested_power_state_index = min_power_state_index; in r600_pm_get_dynpm_state() 299 rdev->pm.requested_clock_mode_index = 0; in r600_pm_get_dynpm_state() 300 rdev->pm.dynpm_can_downclock = false; in r600_pm_get_dynpm_state() 303 if (rdev->pm.current_power_state_index == min_power_state_index) { in r600_pm_get_dynpm_state() 304 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; in r600_pm_get_dynpm_state() 305 rdev->pm.dynpm_can_downclock = false; in r600_pm_get_dynpm_state() [all …]
|
D | r600_dpm.c | 145 if (rps == rdev->pm.dpm.current_ps) in r600_dpm_print_ps_status() 147 if (rps == rdev->pm.dpm.requested_ps) in r600_dpm_print_ps_status() 149 if (rps == rdev->pm.dpm.boot_ps) in r600_dpm_print_ps_status() 756 rdev->pm.dpm.thermal.min_temp = low_temp; in r600_set_thermal_temperature_range() 757 rdev->pm.dpm.thermal.max_temp = high_temp; in r600_set_thermal_temperature_range() 790 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { in r600_dpm_late_enable() 855 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); in r600_get_platform_caps() 856 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); in r600_get_platform_caps() 857 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); in r600_get_platform_caps() 892 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; in r600_parse_extended_power_table() [all …]
|
D | r420.c | 42 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; in r420_pm_init_profile() 43 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; in r420_pm_init_profile() 44 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; in r420_pm_init_profile() 45 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; in r420_pm_init_profile() 47 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; in r420_pm_init_profile() 48 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; in r420_pm_init_profile() 49 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; in r420_pm_init_profile() 50 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; in r420_pm_init_profile() 52 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; in r420_pm_init_profile() 53 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; in r420_pm_init_profile() [all …]
|
D | rs690.c | 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); in rs690_pm_info() 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); in rs690_pm_info() 83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); in rs690_pm_info() 85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); in rs690_pm_info() 86 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); in rs690_pm_info() 88 rdev->pm.igp_system_mclk.full = dfixed_const(400); in rs690_pm_info() 89 rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); in rs690_pm_info() 90 rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); in rs690_pm_info() 94 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); in rs690_pm_info() 95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); in rs690_pm_info() [all …]
|
D | radeon_atombios.c | 2021 rdev->pm.power_state[state_index].misc = misc; in radeon_atombios_parse_misc_flags_1_3() 2022 rdev->pm.power_state[state_index].misc2 = misc2; in radeon_atombios_parse_misc_flags_1_3() 2025 rdev->pm.power_state[state_index].type = in radeon_atombios_parse_misc_flags_1_3() 2028 rdev->pm.power_state[state_index].type = in radeon_atombios_parse_misc_flags_1_3() 2031 rdev->pm.power_state[state_index].type = in radeon_atombios_parse_misc_flags_1_3() 2034 rdev->pm.power_state[state_index].type = in radeon_atombios_parse_misc_flags_1_3() 2037 rdev->pm.power_state[state_index].type = in radeon_atombios_parse_misc_flags_1_3() 2039 rdev->pm.power_state[state_index].flags &= in radeon_atombios_parse_misc_flags_1_3() 2043 rdev->pm.power_state[state_index].type = in radeon_atombios_parse_misc_flags_1_3() 2046 rdev->pm.power_state[state_index].type = in radeon_atombios_parse_misc_flags_1_3() [all …]
|
D | btc_dpm.c | 1231 return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_mclk_values, in btc_get_valid_mclk() 1238 return btc_find_valid_clock(&rdev->pm.dpm.dyn_state.valid_sclk_values, in btc_get_valid_sclk() 1281 if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > rdev->pm.dpm.dyn_state.mclk_sclk_ratio) in btc_adjust_clock_combinations() 1285 (rdev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / in btc_adjust_clock_combinations() 1286 rdev->pm.dpm.dyn_state.mclk_sclk_ratio); in btc_adjust_clock_combinations() 1288 if ((pl->sclk - pl->mclk) > rdev->pm.dpm.dyn_state.sclk_mclk_delta) in btc_adjust_clock_combinations() 1292 rdev->pm.dpm.dyn_state.sclk_mclk_delta); in btc_adjust_clock_combinations() 1319 if ((*vddc - *vddci) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) { in btc_apply_voltage_delta_rules() 1321 (*vddc - rdev->pm.dpm.dyn_state.vddc_vddci_delta)); in btc_apply_voltage_delta_rules() 1325 if ((*vddci - *vddc) > rdev->pm.dpm.dyn_state.vddc_vddci_delta) { in btc_apply_voltage_delta_rules() [all …]
|
D | ci_dpm.c | 189 struct ci_power_info *pi = rdev->pm.dpm.priv; in ci_get_pi() 271 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) in ci_populate_bapm_vddc_vid_sidd() 273 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8) in ci_populate_bapm_vddc_vid_sidd() 275 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count != in ci_populate_bapm_vddc_vid_sidd() 276 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) in ci_populate_bapm_vddc_vid_sidd() 279 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { in ci_populate_bapm_vddc_vid_sidd() 280 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { in ci_populate_bapm_vddc_vid_sidd() 281 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); in ci_populate_bapm_vddc_vid_sidd() 282 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); in ci_populate_bapm_vddc_vid_sidd() 283 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); in ci_populate_bapm_vddc_vid_sidd() [all …]
|
D | si_dpm.c | 1761 struct si_power_info *pi = rdev->pm.dpm.priv; in si_get_pi() 1835 u32 p_limit1 = rdev->pm.dpm.tdp_limit; in si_update_dte_from_pl2() 1836 u32 p_limit2 = rdev->pm.dpm.near_tdp_limit; in si_update_dte_from_pl2() 2125 if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit) in si_calculate_adjusted_tdp_limits() 2128 max_tdp_limit = ((100 + 100) * rdev->pm.dpm.tdp_limit) / 100; in si_calculate_adjusted_tdp_limits() 2131 *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100; in si_calculate_adjusted_tdp_limits() 2132 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - rdev->pm.dpm.tdp_limit); in si_calculate_adjusted_tdp_limits() 2134 *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100; in si_calculate_adjusted_tdp_limits() 2135 adjustment_delta = rdev->pm.dpm.tdp_limit - *tdp_limit; in si_calculate_adjusted_tdp_limits() 2136 if (adjustment_delta < rdev->pm.dpm.near_tdp_limit_adjusted) in si_calculate_adjusted_tdp_limits() [all …]
|
D | r100.c | 210 rdev->pm.dynpm_can_upclock = true; in r100_pm_get_dynpm_state() 211 rdev->pm.dynpm_can_downclock = true; in r100_pm_get_dynpm_state() 213 switch (rdev->pm.dynpm_planned_action) { in r100_pm_get_dynpm_state() 215 rdev->pm.requested_power_state_index = 0; in r100_pm_get_dynpm_state() 216 rdev->pm.dynpm_can_downclock = false; in r100_pm_get_dynpm_state() 219 if (rdev->pm.current_power_state_index == 0) { in r100_pm_get_dynpm_state() 220 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; in r100_pm_get_dynpm_state() 221 rdev->pm.dynpm_can_downclock = false; in r100_pm_get_dynpm_state() 223 if (rdev->pm.active_crtc_count > 1) { in r100_pm_get_dynpm_state() 224 for (i = 0; i < rdev->pm.num_power_states; i++) { in r100_pm_get_dynpm_state() [all …]
|
D | ni_dpm.c | 728 struct ni_power_info *pi = rdev->pm.dpm.priv; in ni_get_pi() 795 if ((rdev->pm.dpm.new_active_crtc_count > 1) || in ni_apply_state_adjust_rules() 801 if (rdev->pm.dpm.ac_power) in ni_apply_state_adjust_rules() 802 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; in ni_apply_state_adjust_rules() 804 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; in ni_apply_state_adjust_rules() 806 if (rdev->pm.dpm.ac_power == false) { in ni_apply_state_adjust_rules() 873 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, in ni_apply_state_adjust_rules() 876 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, in ni_apply_state_adjust_rules() 879 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, in ni_apply_state_adjust_rules() 882 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, in ni_apply_state_adjust_rules() [all …]
|
/drivers/hid/ |
D | hid-prodikeys.c | 43 struct pcmidi_snd *pm; /* pcmidi device context */ member 48 struct pcmidi_snd *pm; member 109 dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel); in show_channel() 111 return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel, in show_channel() 126 pk->pm->midi_channel = channel; in store_channel() 146 dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain); in show_sustain() 148 return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain, in show_sustain() 163 pk->pm->midi_sustain = sustain; in store_sustain() 164 pk->pm->midi_sustain_mode = in store_sustain() 165 (0 == sustain || !pk->pm->midi_mode) ? 0 : 1; in store_sustain() [all …]
|
/drivers/media/platform/s5p-mfc/ |
D | s5p_mfc_pm.c | 29 static struct s5p_mfc_pm *pm; variable 40 pm = &dev->pm; in s5p_mfc_init_pm() 42 pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME); in s5p_mfc_init_pm() 43 if (IS_ERR(pm->clock_gate)) { in s5p_mfc_init_pm() 45 ret = PTR_ERR(pm->clock_gate); in s5p_mfc_init_pm() 49 ret = clk_prepare(pm->clock_gate); in s5p_mfc_init_pm() 56 pm->clock = clk_get(&dev->plat_dev->dev, MFC_SCLK_NAME); in s5p_mfc_init_pm() 57 if (IS_ERR(pm->clock)) { in s5p_mfc_init_pm() 60 clk_set_rate(pm->clock, MFC_SCLK_RATE); in s5p_mfc_init_pm() 61 ret = clk_prepare_enable(pm->clock); in s5p_mfc_init_pm() [all …]
|
/drivers/scsi/ |
D | scsi_pm.c | 21 static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm) in do_scsi_suspend() argument 23 return pm && pm->suspend ? pm->suspend(dev) : 0; in do_scsi_suspend() 26 static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm) in do_scsi_freeze() argument 28 return pm && pm->freeze ? pm->freeze(dev) : 0; in do_scsi_freeze() 31 static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm) in do_scsi_poweroff() argument 33 return pm && pm->poweroff ? pm->poweroff(dev) : 0; in do_scsi_poweroff() 36 static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm) in do_scsi_resume() argument 38 return pm && pm->resume ? pm->resume(dev) : 0; in do_scsi_resume() 41 static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm) in do_scsi_thaw() argument 43 return pm && pm->thaw ? pm->thaw(dev) : 0; in do_scsi_thaw() [all …]
|
/drivers/pci/ |
D | pci-driver.c | 645 WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n", in pci_has_legacy_pm_support() 665 if (drv && drv->pm && drv->pm->prepare) in pci_pm_prepare() 666 error = drv->pm->prepare(dev); in pci_pm_prepare() 683 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; in pci_pm_suspend() local 688 if (!pm) { in pci_pm_suspend() 704 if (pm->suspend) { in pci_pm_suspend() 708 error = pm->suspend(dev); in pci_pm_suspend() 709 suspend_report_result(pm->suspend, error); in pci_pm_suspend() 717 pm->suspend); in pci_pm_suspend() 730 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; in pci_pm_suspend_noirq() local [all …]
|
/drivers/ide/ |
D | ide-pm.c | 83 struct request_pm_state *pm = rq->special; in ide_complete_power_step() local 87 drive->name, pm->pm_step); in ide_complete_power_step() 92 switch (pm->pm_step) { in ide_complete_power_step() 94 if (pm->pm_state == PM_EVENT_FREEZE) in ide_complete_power_step() 95 pm->pm_step = IDE_PM_COMPLETED; in ide_complete_power_step() 97 pm->pm_step = IDE_PM_STANDBY; in ide_complete_power_step() 100 pm->pm_step = IDE_PM_COMPLETED; in ide_complete_power_step() 103 pm->pm_step = IDE_PM_IDLE; in ide_complete_power_step() 106 pm->pm_step = IDE_PM_RESTORE_DMA; in ide_complete_power_step() 113 struct request_pm_state *pm = rq->special; in ide_start_power_step() local [all …]
|
/drivers/net/wireless/cw1200/ |
D | pm.c | 99 int cw1200_pm_init(struct cw1200_pm_state *pm, in cw1200_pm_init() argument 102 spin_lock_init(&pm->lock); in cw1200_pm_init() 104 init_timer(&pm->stay_awake); in cw1200_pm_init() 105 pm->stay_awake.data = (unsigned long)pm; in cw1200_pm_init() 106 pm->stay_awake.function = cw1200_pm_stay_awake_tmo; in cw1200_pm_init() 111 void cw1200_pm_deinit(struct cw1200_pm_state *pm) in cw1200_pm_deinit() argument 113 del_timer_sync(&pm->stay_awake); in cw1200_pm_deinit() 116 void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, in cw1200_pm_stay_awake() argument 120 spin_lock_bh(&pm->lock); in cw1200_pm_stay_awake() 121 cur_tmo = pm->stay_awake.expires - jiffies; in cw1200_pm_stay_awake() [all …]
|
D | pm.h | 29 int cw1200_pm_init(struct cw1200_pm_state *pm, 31 void cw1200_pm_deinit(struct cw1200_pm_state *pm); 36 void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, 39 static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, in cw1200_pm_stay_awake() argument
|
/drivers/gpio/ |
D | gpio-amd8111.c | 72 void __iomem *pm; member 84 agp->orig[offset] = ioread8(agp->pm + AMD_REG_GPIO(offset)) & in amd_gpio_request() 98 iowrite8(agp->orig[offset], agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_free() 108 temp = ioread8(agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_set() 110 iowrite8(temp, agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_set() 121 temp = ioread8(agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_get() 135 temp = ioread8(agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_dirout() 137 iowrite8(temp, agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_dirout() 152 temp = ioread8(agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_dirin() 154 iowrite8(temp, agp->pm + AMD_REG_GPIO(offset)); in amd_gpio_dirin() [all …]
|
D | gpio-pch.c | 43 u32 pm; member 137 u32 pm; in pch_gpio_direction_output() local 150 pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1); in pch_gpio_direction_output() 151 pm |= (1 << nr); in pch_gpio_direction_output() 152 iowrite32(pm, &chip->reg->pm); in pch_gpio_direction_output() 162 u32 pm; in pch_gpio_direction_input() local 166 pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1); in pch_gpio_direction_input() 167 pm &= ~(1 << nr); in pch_gpio_direction_input() 168 iowrite32(pm, &chip->reg->pm); in pch_gpio_direction_input() 183 chip->pch_gpio_reg.pm_reg = ioread32(&chip->reg->pm); in pch_gpio_save_reg_conf() [all …]
|
/drivers/net/fddi/skfp/ |
D | smtdef.c | 167 struct fddi_mib_p *pm ; in smt_init_mib() local 253 pm = mib->p ; in smt_init_mib() 263 pm->fddiPORTIndex = port+INDEX_PORT ; in smt_init_mib() 264 pm->fddiPORTHardwarePresent = TRUE ; in smt_init_mib() 266 pm->fddiPORTLer_Alarm = DEFAULT_LEM_ALARM ; in smt_init_mib() 267 pm->fddiPORTLer_Cutoff = DEFAULT_LEM_CUTOFF ; in smt_init_mib() 273 pm->fddiPORTRequestedPaths[1] = 0 ; in smt_init_mib() 274 pm->fddiPORTRequestedPaths[2] = 0 ; in smt_init_mib() 275 pm->fddiPORTRequestedPaths[3] = 0 ; in smt_init_mib() 276 pm->fddiPORTAvailablePaths = MIB_PATH_P ; in smt_init_mib() [all …]
|