/drivers/clk/sunxi/ |
D | clk-sun9i-cpus.c | 55 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_recalc_rate() local 60 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_recalc_rate() 155 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_set_rate() local 162 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_set_rate() 170 writel(reg, cpus->reg); in sun9i_a80_cpus_clk_set_rate() 188 struct sun9i_a80_cpus_clk *cpus; in sun9i_a80_cpus_setup() local 193 cpus = kzalloc(sizeof(*cpus), GFP_KERNEL); in sun9i_a80_cpus_setup() 194 if (!cpus) in sun9i_a80_cpus_setup() 197 cpus->reg = of_io_request_and_map(node, 0, of_node_full_name(node)); in sun9i_a80_cpus_setup() 198 if (IS_ERR(cpus->reg)) in sun9i_a80_cpus_setup() [all …]
|
/drivers/cpufreq/ |
D | cpufreq-dt.c | 30 cpumask_var_t cpus; member 50 if (cpumask_test_cpu(cpu, priv->cpus)) in cpufreq_dt_find_data() 119 cpumask_copy(policy->cpus, priv->cpus); in cpufreq_init() 138 if (!dev_pm_opp_of_cpumask_add_table(policy->cpus)) in cpufreq_init() 180 dev_pm_opp_of_register_em(cpu_dev, policy->cpus); in cpufreq_init() 188 dev_pm_opp_of_cpumask_remove_table(policy->cpus); in cpufreq_init() 254 if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) in dt_cpufreq_early_init() 287 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); in dt_cpufreq_early_init() 297 if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) { in dt_cpufreq_early_init() 298 cpumask_setall(priv->cpus); in dt_cpufreq_early_init() [all …]
|
D | tegra186-cpufreq.c | 22 int cpus[4]; member 31 .cpus = { 1, 2, NO_CPU, NO_CPU }, 37 .cpus = { 0, 3, 4, 5 }, 67 for (core = 0; core < ARRAY_SIZE(info->cpus); core++) { in tegra186_cpufreq_init() 68 if (info->cpus[core] == policy->cpu) in tegra186_cpufreq_init() 71 if (core == ARRAY_SIZE(info->cpus)) in tegra186_cpufreq_init() 116 for (core = 0; core < ARRAY_SIZE(cluster->info->cpus); core++) { in tegra186_cpufreq_get() 117 if (cluster->info->cpus[core] != policy->cpu) in tegra186_cpufreq_get()
|
D | armada-8k-cpufreq.c | 128 struct cpumask cpus; in armada_8k_cpufreq_init() local 141 cpumask_copy(&cpus, cpu_possible_mask); in armada_8k_cpufreq_init() 148 for_each_cpu(cpu, &cpus) { in armada_8k_cpufreq_init() 178 cpumask_andnot(&cpus, &cpus, &shared_cpus); in armada_8k_cpufreq_init()
|
D | mediatek-cpufreq.c | 36 struct cpumask cpus; member 56 if (cpumask_test_cpu(cpu, &info->cpus)) in mtk_cpu_dvfs_info_lookup() 360 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); in mtk_cpu_dvfs_info_init() 367 ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); in mtk_cpu_dvfs_info_init() 399 dev_pm_opp_of_cpumask_remove_table(&info->cpus); in mtk_cpu_dvfs_info_init() 425 dev_pm_opp_of_cpumask_remove_table(&info->cpus); in mtk_cpu_dvfs_info_release() 448 cpumask_copy(policy->cpus, &info->cpus); in mtk_cpufreq_init() 453 dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus); in mtk_cpufreq_init()
|
D | cpufreq.c | 183 cpumask_setall(policy->cpus); in cpufreq_generic_init() 191 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; in cpufreq_cpu_get_raw() 383 cpumask_pr_args(policy->cpus)); in cpufreq_notify_transition() 385 for_each_cpu(cpu, policy->cpus) in cpufreq_notify_transition() 886 return cpufreq_show_cpus(policy->cpus, buf); in show_affected_cpus() 1129 if (cpumask_test_cpu(cpu, policy->cpus)) in cpufreq_add_policy_cpu() 1136 cpumask_set_cpu(cpu, policy->cpus); in cpufreq_add_policy_cpu() 1221 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) in cpufreq_policy_alloc() 1253 ret, cpumask_pr_args(policy->cpus)); in cpufreq_policy_alloc() 1261 ret, cpumask_pr_args(policy->cpus)); in cpufreq_policy_alloc() [all …]
|
D | speedstep-ich.c | 263 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_target() 296 cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); in speedstep_cpu_init() 298 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_cpu_init()
|
D | scpi-cpufreq.c | 111 ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus); in scpi_cpufreq_init() 117 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); in scpi_cpufreq_init() 166 dev_pm_opp_of_register_em(cpu_dev, policy->cpus); in scpi_cpufreq_init()
|
D | cpufreq_governor.c | 102 for_each_cpu(j, policy_dbs->policy->cpus) { in gov_update_cpu_data() 137 for_each_cpu(j, policy->cpus) { in dbs_update() 331 for_each_cpu(cpu, policy->cpus) { in gov_set_update_util() 343 for_each_cpu(i, policy->cpus) in gov_clear_update_util() 523 for_each_cpu(j, policy->cpus) { in cpufreq_dbs_governor_start()
|
D | scmi-cpufreq.c | 141 ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); in scmi_cpufreq_init() 147 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); in scmi_cpufreq_init() 192 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus, in scmi_cpufreq_init()
|
D | acpi-cpufreq.c | 132 on_each_cpu_mask(policy->cpus, boost_set_msr_each, in set_boost() 135 cpumask_pr_args(policy->cpus), val ? "en" : "dis"); in set_boost() 441 cpumask_of(policy->cpu) : policy->cpus; in acpi_cpufreq_target() 721 cpumask_copy(policy->cpus, perf->shared_cpu_map); in acpi_cpufreq_cpu_init() 729 cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); in acpi_cpufreq_cpu_init() 734 cpumask_clear(policy->cpus); in acpi_cpufreq_cpu_init() 735 cpumask_set_cpu(cpu, policy->cpus); in acpi_cpufreq_cpu_init()
|
D | qcom-cpufreq-hw.c | 227 dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); in qcom_cpufreq_hw_read_lut() 344 qcom_get_related_cpus(index, policy->cpus); in qcom_cpufreq_hw_cpu_init() 345 if (!cpumask_weight(policy->cpus)) { in qcom_cpufreq_hw_cpu_init() 366 dev_pm_opp_of_register_em(cpu_dev, policy->cpus); in qcom_cpufreq_hw_cpu_init()
|
D | vexpress-spc-cpufreq.c | 429 dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus); in ve_spc_cpufreq_init() 431 for_each_cpu(cpu, policy->cpus) in ve_spc_cpufreq_init() 438 ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus); in ve_spc_cpufreq_init() 445 dev_pm_opp_of_register_em(cpu_dev, policy->cpus); in ve_spc_cpufreq_init()
|
D | p4-clockmod.c | 111 for_each_cpu(i, policy->cpus) in cpufreq_p4_target() 162 cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); in cpufreq_p4_cpu_init()
|
/drivers/irqchip/ |
D | irq-bcm7038-l1.c | 45 struct bcm7038_l1_cpu *cpus[NR_CPUS]; member 131 cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; in bcm7038_l1_irq_handle() 133 cpu = intc->cpus[0]; in bcm7038_l1_irq_handle() 163 intc->cpus[cpu_idx]->mask_cache[word] &= ~mask; in __bcm7038_l1_unmask() 164 l1_writel(mask, intc->cpus[cpu_idx]->map_base + in __bcm7038_l1_unmask() 174 intc->cpus[cpu_idx]->mask_cache[word] |= mask; in __bcm7038_l1_mask() 175 l1_writel(mask, intc->cpus[cpu_idx]->map_base + in __bcm7038_l1_mask() 213 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity() 283 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), in bcm7038_l1_init_one() 343 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); in bcm7038_l1_suspend() [all …]
|
D | irq-bcm6345-l1.c | 81 struct bcm6345_l1_cpu *cpus[NR_CPUS]; member 154 intc->cpus[cpu_idx]->enable_cache[word] |= mask; in __bcm6345_l1_unmask() 155 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word], in __bcm6345_l1_unmask() 156 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); in __bcm6345_l1_unmask() 166 intc->cpus[cpu_idx]->enable_cache[word] &= ~mask; in __bcm6345_l1_mask() 167 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word], in __bcm6345_l1_mask() 168 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); in __bcm6345_l1_mask() 215 enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; in bcm6345_l1_set_affinity() 250 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), in bcm6345_l1_init_one() 336 struct bcm6345_l1_cpu *cpu = intc->cpus[idx]; in bcm6345_l1_of_init() [all …]
|
/drivers/powercap/ |
D | dtpm_cpu.c | 77 struct cpumask cpus; in set_pd_power_limit() local 84 cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus)); in set_pd_power_limit() 86 nr_cpus = cpumask_weight(&cpus); in set_pd_power_limit() 110 struct cpumask cpus; in get_pd_power_uw() local 116 cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus)); in get_pd_power_uw() 117 nr_cpus = cpumask_weight(&cpus); in get_pd_power_uw() 166 if (cpumask_weight(policy->cpus) != 1) in cpuhp_dtpm_cpu_offline()
|
/drivers/hwmon/ |
D | amd_energy.c | 210 int i, num_siblings, cpus, sockets; in amd_create_sensor() local 223 cpus = num_present_cpus() / num_siblings; in amd_create_sensor() 225 s_config = devm_kcalloc(dev, cpus + sockets + 1, in amd_create_sensor() 230 accums = devm_kcalloc(dev, cpus + sockets, in amd_create_sensor() 236 label_l = devm_kcalloc(dev, cpus + sockets, in amd_create_sensor() 244 data->nr_cpus = cpus; in amd_create_sensor() 249 for (i = 0; i < cpus + sockets; i++) { in amd_create_sensor() 251 if (i < cpus) in amd_create_sensor() 254 scnprintf(label_l[i], 10, "Esocket%u", (i - cpus)); in amd_create_sensor()
|
/drivers/leds/trigger/ |
D | ledtrig-activity.c | 44 int cpus; in led_activity_function() local 56 cpus = 0; in led_activity_function() 69 cpus++; in led_activity_function() 77 curr_boot = ktime_get_boottime_ns() * cpus; in led_activity_function() 134 target = (cpus > 1) ? (100 / cpus) : 50; in led_activity_function()
|
/drivers/crypto/caam/ |
D | qi.c | 403 const cpumask_t *cpus = qman_affine_cpus(); in caam_drv_ctx_init() local 435 if (!cpumask_test_cpu(*cpu, cpus)) { in caam_drv_ctx_init() 438 *pcpu = cpumask_next(*pcpu, cpus); in caam_drv_ctx_init() 440 *pcpu = cpumask_first(cpus); in caam_drv_ctx_init() 515 const cpumask_t *cpus = qman_affine_cpus(); in caam_qi_shutdown() local 517 for_each_cpu(i, cpus) { in caam_qi_shutdown() 694 const cpumask_t *cpus = qman_affine_cpus(); in alloc_rsp_fqs() local 697 for_each_cpu(i, cpus) { in alloc_rsp_fqs() 711 const cpumask_t *cpus = qman_affine_cpus(); in free_rsp_fqs() local 713 for_each_cpu(i, cpus) in free_rsp_fqs() [all …]
|
/drivers/cpuidle/ |
D | coupled.c | 442 cpumask_t cpus; in cpuidle_coupled_any_pokes_pending() local 445 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_any_pokes_pending() 446 ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); in cpuidle_coupled_any_pokes_pending() 629 cpumask_t cpus; in cpuidle_coupled_update_online_cpus() local 630 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_update_online_cpus() 631 coupled->online_count = cpumask_weight(&cpus); in cpuidle_coupled_update_online_cpus()
|
/drivers/misc/sgi-gru/ |
D | grutlbpurge.c | 297 int cpus, shift = 0, n; in gru_tgh_flush_init() local 299 cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id); in gru_tgh_flush_init() 302 if (cpus) { in gru_tgh_flush_init() 303 n = 1 << fls(cpus - 1); in gru_tgh_flush_init() 316 gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift; in gru_tgh_flush_init()
|
/drivers/base/ |
D | arch_topology.c | 31 __weak bool arch_freq_counters_available(const struct cpumask *cpus) in arch_freq_counters_available() argument 38 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, in topology_set_freq_scale() argument 52 if (arch_freq_counters_available(cpus)) in topology_set_freq_scale() 57 trace_android_vh_arch_set_freq_scale(cpus, cur_freq, max_freq, &scale); in topology_set_freq_scale() 59 for_each_cpu(i, cpus) in topology_set_freq_scale() 74 void topology_set_thermal_pressure(const struct cpumask *cpus, in topology_set_thermal_pressure() argument 79 for_each_cpu(cpu, cpus) in topology_set_thermal_pressure()
|
/drivers/thermal/ |
D | cpufreq_cooling.c | 278 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); in cpufreq_state2power() 433 struct cpumask *cpus; in cpufreq_set_cur_state() local 451 cpus = cpufreq_cdev->policy->related_cpus; in cpufreq_set_cur_state() 452 max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus)); in cpufreq_set_cur_state() 455 arch_set_thermal_pressure(cpus, max_capacity - capacity); in cpufreq_set_cur_state()
|
/drivers/firmware/ |
D | qcom_scm.c | 278 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) in qcom_scm_set_warm_boot_addr() argument 293 for_each_cpu(cpu, cpus) { in qcom_scm_set_warm_boot_addr() 308 for_each_cpu(cpu, cpus) in qcom_scm_set_warm_boot_addr() 324 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) in qcom_scm_set_cold_boot_addr() argument 341 if (!cpus || (cpus && cpumask_empty(cpus))) in qcom_scm_set_cold_boot_addr() 344 for_each_cpu(cpu, cpus) { in qcom_scm_set_cold_boot_addr()
|