• Home
  • Raw
  • Download

Lines Matching +full:cluster +full:- +full:cpufreq

1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/cpufreq.h>
49 * either cpufreq or counter driven. If the support status changes as in update_scale_freq_invariant()
67 * supported by cpufreq. in topology_set_scale_freq_source()
78 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { in topology_set_scale_freq_source()
101 if (sfd && sfd->source == source) { in topology_clear_scale_freq_source()
111 * use-after-free races. in topology_clear_scale_freq_source()
124 sfd->set_freq_scale(); in topology_scale_freq_tick()
141 * want to update the scale factor with information from CPUFREQ. in topology_set_freq_scale()
164 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
170 * operating on stale data when hot-plug is used for some CPUs. The
200 th_pressure = max_capacity - capacity; in topology_update_thermal_pressure()
215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
250 * Updating the sched_domains can't be done directly from cpufreq callbacks
307 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", in topology_parse_cpu_capacity()
325 * For non-clk CPU DVFS mechanism, there's no way to get the in topology_parse_cpu_capacity()
408 cpumask_pr_args(policy->related_cpus), in init_cpu_capacity_callback()
411 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); in init_cpu_capacity_callback()
413 for_each_cpu(cpu, policy->related_cpus) in init_cpu_capacity_callback()
414 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; in init_cpu_capacity_callback()
436 * On ACPI-based systems skip registering cpufreq notifier as cpufreq in register_cpufreq_notifier()
440 return -EINVAL; in register_cpufreq_notifier()
443 return -ENOMEM; in register_cpufreq_notifier()
473 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
477 * (3) -1 if the node does not exist in the device tree
486 return -1; in get_cpu_for_node()
519 } else if (cpu != -ENODEV) { in parse_core()
522 return -EINVAL; in parse_core()
534 return -EINVAL; in parse_core()
540 } else if (leaf && cpu != -ENODEV) { in parse_core()
542 return -EINVAL; in parse_core()
548 static int __init parse_cluster(struct device_node *cluster, int package_id, in parse_cluster() argument
565 snprintf(name, sizeof(name), "cluster%d", i); in parse_cluster()
566 c = of_get_child_by_name(cluster, name); in parse_cluster()
583 c = of_get_child_by_name(cluster, name); in parse_cluster()
588 pr_err("%pOF: cpu-map children should be clusters\n", in parse_cluster()
591 return -EINVAL; in parse_cluster()
598 pr_err("%pOF: Non-leaf cluster with core %s\n", in parse_cluster()
599 cluster, name); in parse_cluster()
600 ret = -EINVAL; in parse_cluster()
611 pr_warn("%pOF: empty cluster\n", cluster); in parse_cluster()
628 ret = parse_cluster(c, package_id, -1, 0); in parse_socket()
637 ret = parse_cluster(socket, 0, -1, 0); in parse_socket()
655 * When topology is provided cpu-map is essentially a root in parse_dt_topology()
656 * cluster with restricted subnodes. in parse_dt_topology()
658 map = of_get_child_by_name(cn, "cpu-map"); in parse_dt_topology()
674 ret = -EINVAL; in parse_dt_topology()
708 * For systems with no shared cpu-side LLC but with clusters defined, in cpu_coregroup_mask()
738 if (ret && ret != -ENOENT) in update_siblings_masks()
746 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
747 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); in update_siblings_masks()
750 if (cpuid_topo->package_id != cpu_topo->package_id) in update_siblings_masks()
753 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); in update_siblings_masks()
754 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
756 if (cpuid_topo->cluster_id != cpu_topo->cluster_id) in update_siblings_masks()
759 if (cpuid_topo->cluster_id >= 0) { in update_siblings_masks()
760 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); in update_siblings_masks()
761 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling); in update_siblings_masks()
764 if (cpuid_topo->core_id != cpu_topo->core_id) in update_siblings_masks()
767 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); in update_siblings_masks()
768 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
776 cpumask_clear(&cpu_topo->llc_sibling); in clear_cpu_topology()
777 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
779 cpumask_clear(&cpu_topo->cluster_sibling); in clear_cpu_topology()
780 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); in clear_cpu_topology()
782 cpumask_clear(&cpu_topo->core_sibling); in clear_cpu_topology()
783 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
784 cpumask_clear(&cpu_topo->thread_sibling); in clear_cpu_topology()
785 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
795 cpu_topo->thread_id = -1; in reset_cpu_topology()
796 cpu_topo->core_id = -1; in reset_cpu_topology()
797 cpu_topo->cluster_id = -1; in reset_cpu_topology()
798 cpu_topo->package_id = -1; in reset_cpu_topology()
839 * arch-specific early cache level detection a chance to run. in init_cpu_topology()
848 else if (ret != -ENOENT) in init_cpu_topology()
858 if (cpuid_topo->package_id != -1) in store_cpu_topology()
861 cpuid_topo->thread_id = -1; in store_cpu_topology()
862 cpuid_topo->core_id = cpuid; in store_cpu_topology()
863 cpuid_topo->package_id = cpu_to_node(cpuid); in store_cpu_topology()
866 cpuid, cpuid_topo->package_id, cpuid_topo->core_id, in store_cpu_topology()
867 cpuid_topo->thread_id); in store_cpu_topology()