• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
20 
21 #include <asm/cpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
24 
25 #ifdef CONFIG_ACPI
acpi_cpu_is_threaded(int cpu)26 static bool __init acpi_cpu_is_threaded(int cpu)
27 {
28 	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
29 
30 	/*
31 	 * if the PPTT doesn't have thread information, assume a homogeneous
32 	 * machine and return the current CPU's thread state.
33 	 */
34 	if (is_threaded < 0)
35 		is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
36 
37 	return !!is_threaded;
38 }
39 
40 /*
41  * Propagate the topology information of the processor_topology_node tree to the
42  * cpu_topology array.
43  */
parse_acpi_topology(void)44 int __init parse_acpi_topology(void)
45 {
46 	int cpu, topology_id;
47 
48 	if (acpi_disabled)
49 		return 0;
50 
51 	for_each_possible_cpu(cpu) {
52 		int i, cache_id;
53 
54 		topology_id = find_acpi_cpu_topology(cpu, 0);
55 		if (topology_id < 0)
56 			return topology_id;
57 
58 		if (acpi_cpu_is_threaded(cpu)) {
59 			cpu_topology[cpu].thread_id = topology_id;
60 			topology_id = find_acpi_cpu_topology(cpu, 1);
61 			cpu_topology[cpu].core_id   = topology_id;
62 		} else {
63 			cpu_topology[cpu].thread_id  = -1;
64 			cpu_topology[cpu].core_id    = topology_id;
65 		}
66 		topology_id = find_acpi_cpu_topology_package(cpu);
67 		cpu_topology[cpu].package_id = topology_id;
68 
69 		i = acpi_find_last_cache_level(cpu);
70 
71 		if (i > 0) {
72 			/*
73 			 * this is the only part of cpu_topology that has
74 			 * a direct relationship with the cache topology
75 			 */
76 			cache_id = find_acpi_cpu_cache_topology(cpu, i);
77 			if (cache_id > 0)
78 				cpu_topology[cpu].llc_id = cache_id;
79 		}
80 	}
81 
82 	return 0;
83 }
84 #endif
85 
86 #ifdef CONFIG_ARM64_AMU_EXTN
87 
88 #undef pr_fmt
89 #define pr_fmt(fmt) "AMU: " fmt
90 
91 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
92 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
93 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
94 static cpumask_var_t amu_fie_cpus;
95 
96 /* Initialize counter reference per-cpu variables for the current CPU */
init_cpu_freq_invariance_counters(void)97 void init_cpu_freq_invariance_counters(void)
98 {
99 	this_cpu_write(arch_core_cycles_prev,
100 		       read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
101 	this_cpu_write(arch_const_cycles_prev,
102 		       read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
103 }
104 
freq_counters_valid(int cpu)105 static inline bool freq_counters_valid(int cpu)
106 {
107 	if (!cpu_has_amu_feat(cpu)) {
108 		pr_debug("CPU%d: counters are not supported.\n", cpu);
109 		return false;
110 	}
111 
112 	if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
113 		     !per_cpu(arch_core_cycles_prev, cpu))) {
114 		pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
115 		return false;
116 	}
117 
118 	return true;
119 }
120 
freq_inv_set_max_ratio(int cpu,u64 max_rate,u64 ref_rate)121 static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
122 {
123 	u64 ratio;
124 
125 	if (unlikely(!max_rate || !ref_rate)) {
126 		pr_debug("CPU%d: invalid maximum or reference frequency.\n",
127 			 cpu);
128 		return -EINVAL;
129 	}
130 
131 	/*
132 	 * Pre-compute the fixed ratio between the frequency of the constant
133 	 * reference counter and the maximum frequency of the CPU.
134 	 *
135 	 *			    ref_rate
136 	 * arch_max_freq_scale =   ---------- * SCHED_CAPACITY_SCALE²
137 	 *			    max_rate
138 	 *
139 	 * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE²
140 	 * in order to ensure a good resolution for arch_max_freq_scale for
141 	 * very low reference frequencies (down to the KHz range which should
142 	 * be unlikely).
143 	 */
144 	ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
145 	ratio = div64_u64(ratio, max_rate);
146 	if (!ratio) {
147 		WARN_ONCE(1, "Reference frequency too low.\n");
148 		return -EINVAL;
149 	}
150 
151 	per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
152 
153 	return 0;
154 }
155 
156 static inline bool
enable_policy_freq_counters(int cpu,cpumask_var_t valid_cpus)157 enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
158 {
159 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
160 
161 	if (!policy) {
162 		pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
163 		return false;
164 	}
165 
166 	if (cpumask_subset(policy->related_cpus, valid_cpus))
167 		cpumask_or(amu_fie_cpus, policy->related_cpus,
168 			   amu_fie_cpus);
169 
170 	cpufreq_cpu_put(policy);
171 
172 	return true;
173 }
174 
175 static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
176 #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
177 
init_amu_fie(void)178 static int __init init_amu_fie(void)
179 {
180 	cpumask_var_t valid_cpus;
181 	bool have_policy = false;
182 	int ret = 0;
183 	int cpu;
184 
185 	if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
186 		return -ENOMEM;
187 
188 	if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
189 		ret = -ENOMEM;
190 		goto free_valid_mask;
191 	}
192 
193 	for_each_present_cpu(cpu) {
194 		if (!freq_counters_valid(cpu) ||
195 		    freq_inv_set_max_ratio(cpu,
196 					   cpufreq_get_hw_max_freq(cpu) * 1000,
197 					   arch_timer_get_rate()))
198 			continue;
199 
200 		cpumask_set_cpu(cpu, valid_cpus);
201 		have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
202 	}
203 
204 	/*
205 	 * If we are not restricted by cpufreq policies, we only enable
206 	 * the use of the AMU feature for FIE if all CPUs support AMU.
207 	 * Otherwise, enable_policy_freq_counters has already enabled
208 	 * policy cpus.
209 	 */
210 	if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
211 		cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
212 
213 	if (!cpumask_empty(amu_fie_cpus)) {
214 		pr_info("CPUs[%*pbl]: counters will be used for FIE.",
215 			cpumask_pr_args(amu_fie_cpus));
216 		static_branch_enable(&amu_fie_key);
217 	}
218 
219 	/*
220 	 * If the system is not fully invariant after AMU init, disable
221 	 * partial use of counters for frequency invariance.
222 	 */
223 	if (!topology_scale_freq_invariant())
224 		static_branch_disable(&amu_fie_key);
225 
226 free_valid_mask:
227 	free_cpumask_var(valid_cpus);
228 
229 	return ret;
230 }
231 late_initcall_sync(init_amu_fie);
232 
arch_freq_counters_available(const struct cpumask * cpus)233 bool arch_freq_counters_available(const struct cpumask *cpus)
234 {
235 	return amu_freq_invariant() &&
236 	       cpumask_subset(cpus, amu_fie_cpus);
237 }
238 
topology_scale_freq_tick(void)239 void topology_scale_freq_tick(void)
240 {
241 	u64 prev_core_cnt, prev_const_cnt;
242 	u64 core_cnt, const_cnt, scale;
243 	int cpu = smp_processor_id();
244 
245 	if (!amu_freq_invariant())
246 		return;
247 
248 	if (!cpumask_test_cpu(cpu, amu_fie_cpus))
249 		return;
250 
251 	const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
252 	core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
253 	prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
254 	prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
255 
256 	if (unlikely(core_cnt <= prev_core_cnt ||
257 		     const_cnt <= prev_const_cnt))
258 		goto store_and_exit;
259 
260 	/*
261 	 *	    /\core    arch_max_freq_scale
262 	 * scale =  ------- * --------------------
263 	 *	    /\const   SCHED_CAPACITY_SCALE
264 	 *
265 	 * See validate_cpu_freq_invariance_counters() for details on
266 	 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
267 	 */
268 	scale = core_cnt - prev_core_cnt;
269 	scale *= this_cpu_read(arch_max_freq_scale);
270 	scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
271 			  const_cnt - prev_const_cnt);
272 
273 	scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
274 	this_cpu_write(freq_scale, (unsigned long)scale);
275 
276 store_and_exit:
277 	this_cpu_write(arch_core_cycles_prev, core_cnt);
278 	this_cpu_write(arch_const_cycles_prev, const_cnt);
279 }
280 #endif /* CONFIG_ARM64_AMU_EXTN */
281