• Home
  • Raw
  • Download

Lines Matching +full:boost +full:- +full:low +full:- +full:freq

1 // SPDX-License-Identifier: GPL-2.0-only
67 FIE_UNSET = -1,
94 * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
106 * reach here from hard-irq context), which then schedules a normal work item
119 cpu_data = cppc_fi->cpu_data; in cppc_scale_freq_workfn()
121 if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) { in cppc_scale_freq_workfn()
126 perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, in cppc_scale_freq_workfn()
128 cppc_fi->prev_perf_fb_ctrs = fb_ctrs; in cppc_scale_freq_workfn()
131 local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf); in cppc_scale_freq_workfn()
137 per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale; in cppc_scale_freq_workfn()
145 kthread_queue_work(kworker_fie, &cppc_fi->work); in cppc_irq_work()
156 irq_work_queue(&cppc_fi->irq_work); in cppc_scale_freq_tick()
172 for_each_cpu(cpu, policy->cpus) { in cppc_cpufreq_cpu_fie_init()
174 cppc_fi->cpu = cpu; in cppc_cpufreq_cpu_fie_init()
175 cppc_fi->cpu_data = policy->driver_data; in cppc_cpufreq_cpu_fie_init()
176 kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn); in cppc_cpufreq_cpu_fie_init()
177 init_irq_work(&cppc_fi->irq_work, cppc_irq_work); in cppc_cpufreq_cpu_fie_init()
179 ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs); in cppc_cpufreq_cpu_fie_init()
193 /* Register for freq-invariance */ in cppc_cpufreq_cpu_fie_init()
194 topology_set_scale_freq_source(&cppc_sftd, policy->cpus); in cppc_cpufreq_cpu_fie_init()
199 * irq-work are per-cpu and the hotplug core takes care of flushing the pending
200 * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
203 * We just need to make sure to remove them all on policy->exit().
213 /* policy->cpus will be empty here, use related_cpus instead */ in cppc_cpufreq_cpu_fie_exit()
214 topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus); in cppc_cpufreq_cpu_fie_exit()
216 for_each_cpu(cpu, policy->related_cpus) { in cppc_cpufreq_cpu_fie_exit()
218 irq_work_sync(&cppc_fi->irq_work); in cppc_cpufreq_cpu_fie_exit()
219 kthread_cancel_work_sync(&cppc_fi->work); in cppc_cpufreq_cpu_fie_exit()
259 ret = sched_setattr_nocheck(kworker_fie->task, &attr); in cppc_freq_invariance_init()
300 if (dm->type == DMI_ENTRY_PROCESSOR && in cppc_find_dmi_mhz()
301 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { in cppc_find_dmi_mhz()
326 * use them to convert perf to freq and vice versa. The conversion is
328 * - (Low perf, Low freq)
329 * - (Nominal perf, Nominal perf)
334 struct cppc_perf_caps *caps = &cpu_data->perf_caps; in cppc_cpufreq_perf_to_khz()
339 if (caps->lowest_freq && caps->nominal_freq) { in cppc_cpufreq_perf_to_khz()
340 mul = caps->nominal_freq - caps->lowest_freq; in cppc_cpufreq_perf_to_khz()
341 div = caps->nominal_perf - caps->lowest_perf; in cppc_cpufreq_perf_to_khz()
342 offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div); in cppc_cpufreq_perf_to_khz()
347 div = caps->highest_perf; in cppc_cpufreq_perf_to_khz()
357 unsigned int freq) in cppc_cpufreq_khz_to_perf() argument
359 struct cppc_perf_caps *caps = &cpu_data->perf_caps; in cppc_cpufreq_khz_to_perf()
364 if (caps->lowest_freq && caps->nominal_freq) { in cppc_cpufreq_khz_to_perf()
365 mul = caps->nominal_perf - caps->lowest_perf; in cppc_cpufreq_khz_to_perf()
366 div = caps->nominal_freq - caps->lowest_freq; in cppc_cpufreq_khz_to_perf()
367 offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div); in cppc_cpufreq_khz_to_perf()
371 mul = caps->highest_perf; in cppc_cpufreq_khz_to_perf()
375 retval = offset + div64_u64(freq * mul, div); in cppc_cpufreq_khz_to_perf()
386 struct cppc_cpudata *cpu_data = policy->driver_data; in cppc_cpufreq_set_target()
387 unsigned int cpu = policy->cpu; in cppc_cpufreq_set_target()
394 if (desired_perf == cpu_data->perf_ctrls.desired_perf) in cppc_cpufreq_set_target()
397 cpu_data->perf_ctrls.desired_perf = desired_perf; in cppc_cpufreq_set_target()
398 freqs.old = policy->cur; in cppc_cpufreq_set_target()
402 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); in cppc_cpufreq_set_target()
415 struct cppc_cpudata *cpu_data = policy->driver_data; in cppc_cpufreq_fast_switch()
416 unsigned int cpu = policy->cpu; in cppc_cpufreq_fast_switch()
421 cpu_data->perf_ctrls.desired_perf = desired_perf; in cppc_cpufreq_fast_switch()
422 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); in cppc_cpufreq_fast_switch()
441 * on the shared PCC channel (including READs which do not count towards freq
488 int cpu = policy->cpu; in get_perf_level_count()
490 cpu_data = policy->driver_data; in get_perf_level_count()
491 perf_caps = &cpu_data->perf_caps; in get_perf_level_count()
493 min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf, in get_perf_level_count()
494 perf_caps->highest_perf); in get_perf_level_count()
497 return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP; in get_perf_level_count()
522 policy = cpufreq_cpu_get_raw(cpu_dev->id); in cppc_get_cpu_power()
523 cpu_data = policy->driver_data; in cppc_get_cpu_power()
524 perf_caps = &cpu_data->perf_caps; in cppc_get_cpu_power()
525 max_cap = arch_scale_cpu_capacity(cpu_dev->id); in cppc_get_cpu_power()
526 min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf, in cppc_get_cpu_power()
527 perf_caps->highest_perf); in cppc_get_cpu_power()
528 perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf, in cppc_get_cpu_power()
537 return -EINVAL; in cppc_get_cpu_power()
541 perf = perf_caps->highest_perf; in cppc_get_cpu_power()
544 perf = perf_caps->lowest_perf; in cppc_get_cpu_power()
548 perf = perf_caps->highest_perf; in cppc_get_cpu_power()
574 *power = compute_cost(cpu_dev->id, step); in cppc_get_cpu_power()
589 policy = cpufreq_cpu_get_raw(cpu_dev->id); in cppc_get_cpu_cost()
590 cpu_data = policy->driver_data; in cppc_get_cpu_cost()
591 perf_caps = &cpu_data->perf_caps; in cppc_get_cpu_cost()
592 max_cap = arch_scale_cpu_capacity(cpu_dev->id); in cppc_get_cpu_cost()
595 perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap; in cppc_get_cpu_cost()
598 *cost = compute_cost(cpu_dev->id, step); in cppc_get_cpu_cost()
611 class = gicc->efficiency_class; in populate_efficiency_class()
618 return -EINVAL; in populate_efficiency_class()
622 * Squeeze efficiency class values on [0:#efficiency_class-1]. in populate_efficiency_class()
629 if (gicc->efficiency_class == class) in populate_efficiency_class()
645 cpu_data = policy->driver_data; in cppc_cpufreq_register_em()
646 em_dev_register_perf_domain(get_cpu_device(policy->cpu), in cppc_cpufreq_register_em()
648 cpu_data->shared_cpu_map, 0); in cppc_cpufreq_register_em()
667 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) in cppc_cpufreq_get_cpu_data()
676 ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps); in cppc_cpufreq_get_cpu_data()
682 /* Convert the lowest and nominal freq from MHz to KHz */ in cppc_cpufreq_get_cpu_data()
683 cpu_data->perf_caps.lowest_freq *= 1000; in cppc_cpufreq_get_cpu_data()
684 cpu_data->perf_caps.nominal_freq *= 1000; in cppc_cpufreq_get_cpu_data()
686 list_add(&cpu_data->node, &cpu_data_list); in cppc_cpufreq_get_cpu_data()
691 free_cpumask_var(cpu_data->shared_cpu_map); in cppc_cpufreq_get_cpu_data()
700 struct cppc_cpudata *cpu_data = policy->driver_data; in cppc_cpufreq_put_cpu_data()
702 list_del(&cpu_data->node); in cppc_cpufreq_put_cpu_data()
703 free_cpumask_var(cpu_data->shared_cpu_map); in cppc_cpufreq_put_cpu_data()
705 policy->driver_data = NULL; in cppc_cpufreq_put_cpu_data()
710 unsigned int cpu = policy->cpu; in cppc_cpufreq_cpu_init()
718 return -ENODEV; in cppc_cpufreq_cpu_init()
720 caps = &cpu_data->perf_caps; in cppc_cpufreq_cpu_init()
721 policy->driver_data = cpu_data; in cppc_cpufreq_cpu_init()
727 policy->min = cppc_cpufreq_perf_to_khz(cpu_data, in cppc_cpufreq_cpu_init()
728 caps->lowest_nonlinear_perf); in cppc_cpufreq_cpu_init()
729 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, in cppc_cpufreq_cpu_init()
730 caps->nominal_perf); in cppc_cpufreq_cpu_init()
737 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, in cppc_cpufreq_cpu_init()
738 caps->lowest_perf); in cppc_cpufreq_cpu_init()
739 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, in cppc_cpufreq_cpu_init()
740 caps->nominal_perf); in cppc_cpufreq_cpu_init()
742 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); in cppc_cpufreq_cpu_init()
743 policy->shared_type = cpu_data->shared_type; in cppc_cpufreq_cpu_init()
745 switch (policy->shared_type) { in cppc_cpufreq_cpu_init()
748 /* Nothing to be done - we'll have a policy for each CPU */ in cppc_cpufreq_cpu_init()
754 * in policy->driver_data. in cppc_cpufreq_cpu_init()
756 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); in cppc_cpufreq_cpu_init()
759 pr_debug("Unsupported CPU co-ord type: %d\n", in cppc_cpufreq_cpu_init()
760 policy->shared_type); in cppc_cpufreq_cpu_init()
761 ret = -EFAULT; in cppc_cpufreq_cpu_init()
765 policy->fast_switch_possible = cppc_allow_fast_switch(); in cppc_cpufreq_cpu_init()
766 policy->dvfs_possible_from_any_cpu = true; in cppc_cpufreq_cpu_init()
769 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost in cppc_cpufreq_cpu_init()
772 if (caps->highest_perf > caps->nominal_perf) in cppc_cpufreq_cpu_init()
775 /* Set policy->cur to max now. The governors will adjust later. */ in cppc_cpufreq_cpu_init()
776 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); in cppc_cpufreq_cpu_init()
777 cpu_data->perf_ctrls.desired_perf = caps->highest_perf; in cppc_cpufreq_cpu_init()
779 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); in cppc_cpufreq_cpu_init()
782 caps->highest_perf, cpu, ret); in cppc_cpufreq_cpu_init()
796 struct cppc_cpudata *cpu_data = policy->driver_data; in cppc_cpufreq_cpu_exit()
797 struct cppc_perf_caps *caps = &cpu_data->perf_caps; in cppc_cpufreq_cpu_exit()
798 unsigned int cpu = policy->cpu; in cppc_cpufreq_cpu_exit()
803 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; in cppc_cpufreq_cpu_exit()
805 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); in cppc_cpufreq_cpu_exit()
808 caps->lowest_perf, cpu, ret); in cppc_cpufreq_cpu_exit()
817 return t1 - t0; in get_delta()
819 return (u32)t1 - (u32)t0; in get_delta()
829 reference_perf = fb_ctrs_t0->reference_perf; in cppc_perf_from_fbctrs()
831 delta_reference = get_delta(fb_ctrs_t1->reference, in cppc_perf_from_fbctrs()
832 fb_ctrs_t0->reference); in cppc_perf_from_fbctrs()
833 delta_delivered = get_delta(fb_ctrs_t1->delivered, in cppc_perf_from_fbctrs()
834 fb_ctrs_t0->delivered); in cppc_perf_from_fbctrs()
836 /* Check to avoid divide-by zero and invalid delivered_perf */ in cppc_perf_from_fbctrs()
838 return cpu_data->perf_ctrls.desired_perf; in cppc_perf_from_fbctrs()
847 struct cppc_cpudata *cpu_data = policy->driver_data; in cppc_cpufreq_get_rate()
871 struct cppc_cpudata *cpu_data = policy->driver_data; in cppc_cpufreq_set_boost()
872 struct cppc_perf_caps *caps = &cpu_data->perf_caps; in cppc_cpufreq_set_boost()
876 pr_err("BOOST not supported by CPU or firmware\n"); in cppc_cpufreq_set_boost()
877 return -EINVAL; in cppc_cpufreq_set_boost()
881 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, in cppc_cpufreq_set_boost()
882 caps->highest_perf); in cppc_cpufreq_set_boost()
884 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, in cppc_cpufreq_set_boost()
885 caps->nominal_perf); in cppc_cpufreq_set_boost()
886 policy->cpuinfo.max_freq = policy->max; in cppc_cpufreq_set_boost()
888 ret = freq_qos_update_request(policy->max_freq_req, policy->max); in cppc_cpufreq_set_boost()
897 struct cppc_cpudata *cpu_data = policy->driver_data; in show_freqdomain_cpus()
899 return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); in show_freqdomain_cpus()
930 struct cppc_cpudata *cpu_data = policy->driver_data; in hisi_cppc_cpufreq_get_rate()
938 return -EIO; in hisi_cppc_cpufreq_get_rate()
954 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && in cppc_check_hisi_workaround()
955 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && in cppc_check_hisi_workaround()
956 wa_info[i].oem_revision == tbl->oem_revision) { in cppc_check_hisi_workaround()
972 return -ENODEV; in cppc_cpufreq_init()
990 free_cpumask_var(iter->shared_cpu_map); in free_cpu_data()
991 list_del(&iter->node); in free_cpu_data()