1 /*
2 * arch/arm64/kernel/topology.c
3 *
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
5 *
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
20
21 #include <asm/cpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
24
25 #ifdef CONFIG_ACPI
acpi_cpu_is_threaded(int cpu)26 static bool __init acpi_cpu_is_threaded(int cpu)
27 {
28 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
29
30 /*
31 * if the PPTT doesn't have thread information, assume a homogeneous
32 * machine and return the current CPU's thread state.
33 */
34 if (is_threaded < 0)
35 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
36
37 return !!is_threaded;
38 }
39
40 /*
41 * Propagate the topology information of the processor_topology_node tree to the
42 * cpu_topology array.
43 */
parse_acpi_topology(void)44 int __init parse_acpi_topology(void)
45 {
46 int cpu, topology_id;
47
48 if (acpi_disabled)
49 return 0;
50
51 for_each_possible_cpu(cpu) {
52 int i, cache_id;
53
54 topology_id = find_acpi_cpu_topology(cpu, 0);
55 if (topology_id < 0)
56 return topology_id;
57
58 if (acpi_cpu_is_threaded(cpu)) {
59 cpu_topology[cpu].thread_id = topology_id;
60 topology_id = find_acpi_cpu_topology(cpu, 1);
61 cpu_topology[cpu].core_id = topology_id;
62 } else {
63 cpu_topology[cpu].thread_id = -1;
64 cpu_topology[cpu].core_id = topology_id;
65 }
66 topology_id = find_acpi_cpu_topology_package(cpu);
67 cpu_topology[cpu].package_id = topology_id;
68
69 i = acpi_find_last_cache_level(cpu);
70
71 if (i > 0) {
72 /*
73 * this is the only part of cpu_topology that has
74 * a direct relationship with the cache topology
75 */
76 cache_id = find_acpi_cpu_cache_topology(cpu, i);
77 if (cache_id > 0)
78 cpu_topology[cpu].llc_id = cache_id;
79 }
80 }
81
82 return 0;
83 }
84 #endif
85
86 #ifdef CONFIG_ARM64_AMU_EXTN
87
88 #undef pr_fmt
89 #define pr_fmt(fmt) "AMU: " fmt
90
91 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
92 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
93 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
94 static cpumask_var_t amu_fie_cpus;
95
96 /* Initialize counter reference per-cpu variables for the current CPU */
init_cpu_freq_invariance_counters(void)97 void init_cpu_freq_invariance_counters(void)
98 {
99 this_cpu_write(arch_core_cycles_prev,
100 read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
101 this_cpu_write(arch_const_cycles_prev,
102 read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
103 }
104
validate_cpu_freq_invariance_counters(int cpu)105 static int validate_cpu_freq_invariance_counters(int cpu)
106 {
107 u64 max_freq_hz, ratio;
108
109 if (!cpu_has_amu_feat(cpu)) {
110 pr_debug("CPU%d: counters are not supported.\n", cpu);
111 return -EINVAL;
112 }
113
114 if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
115 !per_cpu(arch_core_cycles_prev, cpu))) {
116 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
117 return -EINVAL;
118 }
119
120 /* Convert maximum frequency from KHz to Hz and validate */
121 max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000ULL;
122 if (unlikely(!max_freq_hz)) {
123 pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
124 return -EINVAL;
125 }
126
127 /*
128 * Pre-compute the fixed ratio between the frequency of the constant
129 * counter and the maximum frequency of the CPU.
130 *
131 * const_freq
132 * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALE²
133 * cpuinfo_max_freq
134 *
135 * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE²
136 * in order to ensure a good resolution for arch_max_freq_scale for
137 * very low arch timer frequencies (down to the KHz range which should
138 * be unlikely).
139 */
140 ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
141 ratio = div64_u64(ratio, max_freq_hz);
142 if (!ratio) {
143 WARN_ONCE(1, "System timer frequency too low.\n");
144 return -EINVAL;
145 }
146
147 per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
148
149 return 0;
150 }
151
152 static inline bool
enable_policy_freq_counters(int cpu,cpumask_var_t valid_cpus)153 enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
154 {
155 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
156
157 if (!policy) {
158 pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
159 return false;
160 }
161
162 if (cpumask_subset(policy->related_cpus, valid_cpus))
163 cpumask_or(amu_fie_cpus, policy->related_cpus,
164 amu_fie_cpus);
165
166 cpufreq_cpu_put(policy);
167
168 return true;
169 }
170
171 static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
172 #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
173
init_amu_fie(void)174 static int __init init_amu_fie(void)
175 {
176 cpumask_var_t valid_cpus;
177 bool have_policy = false;
178 int ret = 0;
179 int cpu;
180
181 if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
182 return -ENOMEM;
183
184 if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
185 ret = -ENOMEM;
186 goto free_valid_mask;
187 }
188
189 for_each_present_cpu(cpu) {
190 if (validate_cpu_freq_invariance_counters(cpu))
191 continue;
192 cpumask_set_cpu(cpu, valid_cpus);
193 have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
194 }
195
196 /*
197 * If we are not restricted by cpufreq policies, we only enable
198 * the use of the AMU feature for FIE if all CPUs support AMU.
199 * Otherwise, enable_policy_freq_counters has already enabled
200 * policy cpus.
201 */
202 if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
203 cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
204
205 if (!cpumask_empty(amu_fie_cpus)) {
206 pr_info("CPUs[%*pbl]: counters will be used for FIE.",
207 cpumask_pr_args(amu_fie_cpus));
208 static_branch_enable(&amu_fie_key);
209 }
210
211 /*
212 * If the system is not fully invariant after AMU init, disable
213 * partial use of counters for frequency invariance.
214 */
215 if (!topology_scale_freq_invariant())
216 static_branch_disable(&amu_fie_key);
217
218 free_valid_mask:
219 free_cpumask_var(valid_cpus);
220
221 return ret;
222 }
223 late_initcall_sync(init_amu_fie);
224
arch_freq_counters_available(const struct cpumask * cpus)225 bool arch_freq_counters_available(const struct cpumask *cpus)
226 {
227 return amu_freq_invariant() &&
228 cpumask_subset(cpus, amu_fie_cpus);
229 }
230
topology_scale_freq_tick(void)231 void topology_scale_freq_tick(void)
232 {
233 u64 prev_core_cnt, prev_const_cnt;
234 u64 core_cnt, const_cnt, scale;
235 int cpu = smp_processor_id();
236
237 if (!amu_freq_invariant())
238 return;
239
240 if (!cpumask_test_cpu(cpu, amu_fie_cpus))
241 return;
242
243 const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
244 core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
245 prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
246 prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
247
248 if (unlikely(core_cnt <= prev_core_cnt ||
249 const_cnt <= prev_const_cnt))
250 goto store_and_exit;
251
252 /*
253 * /\core arch_max_freq_scale
254 * scale = ------- * --------------------
255 * /\const SCHED_CAPACITY_SCALE
256 *
257 * See validate_cpu_freq_invariance_counters() for details on
258 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
259 */
260 scale = core_cnt - prev_core_cnt;
261 scale *= this_cpu_read(arch_max_freq_scale);
262 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
263 const_cnt - prev_const_cnt);
264
265 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
266 this_cpu_write(freq_scale, (unsigned long)scale);
267
268 store_and_exit:
269 this_cpu_write(arch_core_cycles_prev, core_cnt);
270 this_cpu_write(arch_const_cycles_prev, const_cnt);
271 }
272 #endif /* CONFIG_ARM64_AMU_EXTN */
273