• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Arch specific cpu topology information
4  *
5  * Copyright (C) 2016, ARM Ltd.
6  * Written by: Juri Lelli, ARM Ltd.
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpufreq.h>
12 #include <linux/device.h>
13 #include <linux/of.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/sched/topology.h>
17 #include <linux/cpuset.h>
18 #include <linux/cpumask.h>
19 #include <linux/init.h>
20 #include <linux/percpu.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
23 
24 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
25 DEFINE_PER_CPU(unsigned long, max_cpu_freq);
26 DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
27 
arch_set_freq_scale(struct cpumask * cpus,unsigned long cur_freq,unsigned long max_freq)28 void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
29 			 unsigned long max_freq)
30 {
31 	unsigned long scale;
32 	int i;
33 
34 	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
35 
36 	for_each_cpu(i, cpus) {
37 		per_cpu(freq_scale, i) = scale;
38 		per_cpu(max_cpu_freq, i) = max_freq;
39 	}
40 }
41 
arch_set_max_freq_scale(struct cpumask * cpus,unsigned long policy_max_freq)42 void arch_set_max_freq_scale(struct cpumask *cpus,
43 			     unsigned long policy_max_freq)
44 {
45 	unsigned long scale, max_freq;
46 	int cpu = cpumask_first(cpus);
47 
48 	if (cpu > nr_cpu_ids)
49 		return;
50 
51 	max_freq = per_cpu(max_cpu_freq, cpu);
52 	if (!max_freq)
53 		return;
54 
55 	scale = (policy_max_freq << SCHED_CAPACITY_SHIFT) / max_freq;
56 
57 	for_each_cpu(cpu, cpus)
58 		per_cpu(max_freq_scale, cpu) = scale;
59 }
60 
61 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
62 
topology_set_cpu_scale(unsigned int cpu,unsigned long capacity)63 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
64 {
65 	per_cpu(cpu_scale, cpu) = capacity;
66 }
67 
cpu_capacity_show(struct device * dev,struct device_attribute * attr,char * buf)68 static ssize_t cpu_capacity_show(struct device *dev,
69 				 struct device_attribute *attr,
70 				 char *buf)
71 {
72 	struct cpu *cpu = container_of(dev, struct cpu, dev);
73 
74 	return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
75 }
76 
77 static void update_topology_flags_workfn(struct work_struct *work);
78 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
79 
80 static DEVICE_ATTR_RO(cpu_capacity);
81 
register_cpu_capacity_sysctl(void)82 static int register_cpu_capacity_sysctl(void)
83 {
84 	int i;
85 	struct device *cpu;
86 
87 	for_each_possible_cpu(i) {
88 		cpu = get_cpu_device(i);
89 		if (!cpu) {
90 			pr_err("%s: too early to get CPU%d device!\n",
91 			       __func__, i);
92 			continue;
93 		}
94 		device_create_file(cpu, &dev_attr_cpu_capacity);
95 	}
96 
97 	return 0;
98 }
99 subsys_initcall(register_cpu_capacity_sysctl);
100 
101 static int update_topology;
102 
topology_update_cpu_topology(void)103 int topology_update_cpu_topology(void)
104 {
105 	return update_topology;
106 }
107 
108 /*
109  * Updating the sched_domains can't be done directly from cpufreq callbacks
110  * due to locking, so queue the work for later.
111  */
update_topology_flags_workfn(struct work_struct * work)112 static void update_topology_flags_workfn(struct work_struct *work)
113 {
114 	update_topology = 1;
115 	rebuild_sched_domains();
116 	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
117 	update_topology = 0;
118 }
119 
120 static u32 capacity_scale;
121 static u32 *raw_capacity;
122 
free_raw_capacity(void)123 static int free_raw_capacity(void)
124 {
125 	kfree(raw_capacity);
126 	raw_capacity = NULL;
127 
128 	return 0;
129 }
130 
topology_normalize_cpu_scale(void)131 void topology_normalize_cpu_scale(void)
132 {
133 	u64 capacity;
134 	int cpu;
135 
136 	if (!raw_capacity)
137 		return;
138 
139 	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
140 	for_each_possible_cpu(cpu) {
141 		pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
142 			 cpu, raw_capacity[cpu]);
143 		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
144 			/ capacity_scale;
145 		topology_set_cpu_scale(cpu, capacity);
146 		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
147 			cpu, topology_get_cpu_scale(cpu));
148 	}
149 }
150 
topology_parse_cpu_capacity(struct device_node * cpu_node,int cpu)151 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
152 {
153 	static bool cap_parsing_failed;
154 	int ret;
155 	u32 cpu_capacity;
156 
157 	if (cap_parsing_failed)
158 		return false;
159 
160 	ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
161 				   &cpu_capacity);
162 	if (!ret) {
163 		if (!raw_capacity) {
164 			raw_capacity = kcalloc(num_possible_cpus(),
165 					       sizeof(*raw_capacity),
166 					       GFP_KERNEL);
167 			if (!raw_capacity) {
168 				cap_parsing_failed = true;
169 				return false;
170 			}
171 		}
172 		capacity_scale = max(cpu_capacity, capacity_scale);
173 		raw_capacity[cpu] = cpu_capacity;
174 		pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
175 			cpu_node, raw_capacity[cpu]);
176 	} else {
177 		if (raw_capacity) {
178 			pr_err("cpu_capacity: missing %pOF raw capacity\n",
179 				cpu_node);
180 			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
181 		}
182 		cap_parsing_failed = true;
183 		free_raw_capacity();
184 	}
185 
186 	return !ret;
187 }
188 
189 #ifdef CONFIG_CPU_FREQ
190 static cpumask_var_t cpus_to_visit;
191 static void parsing_done_workfn(struct work_struct *work);
192 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
193 
194 static int
init_cpu_capacity_callback(struct notifier_block * nb,unsigned long val,void * data)195 init_cpu_capacity_callback(struct notifier_block *nb,
196 			   unsigned long val,
197 			   void *data)
198 {
199 	struct cpufreq_policy *policy = data;
200 	int cpu;
201 
202 	if (!raw_capacity)
203 		return 0;
204 
205 	if (val != CPUFREQ_CREATE_POLICY)
206 		return 0;
207 
208 	pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
209 		 cpumask_pr_args(policy->related_cpus),
210 		 cpumask_pr_args(cpus_to_visit));
211 
212 	cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
213 
214 	for_each_cpu(cpu, policy->related_cpus) {
215 		raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
216 				    policy->cpuinfo.max_freq / 1000UL;
217 		capacity_scale = max(raw_capacity[cpu], capacity_scale);
218 	}
219 
220 	if (cpumask_empty(cpus_to_visit)) {
221 		topology_normalize_cpu_scale();
222 		schedule_work(&update_topology_flags_work);
223 		free_raw_capacity();
224 		pr_debug("cpu_capacity: parsing done\n");
225 		schedule_work(&parsing_done_work);
226 	}
227 
228 	return 0;
229 }
230 
231 static struct notifier_block init_cpu_capacity_notifier = {
232 	.notifier_call = init_cpu_capacity_callback,
233 };
234 
register_cpufreq_notifier(void)235 static int __init register_cpufreq_notifier(void)
236 {
237 	int ret;
238 
239 	/*
240 	 * on ACPI-based systems we need to use the default cpu capacity
241 	 * until we have the necessary code to parse the cpu capacity, so
242 	 * skip registering cpufreq notifier.
243 	 */
244 	if (!acpi_disabled || !raw_capacity)
245 		return -EINVAL;
246 
247 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
248 		return -ENOMEM;
249 
250 	cpumask_copy(cpus_to_visit, cpu_possible_mask);
251 
252 	ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
253 					CPUFREQ_POLICY_NOTIFIER);
254 
255 	if (ret)
256 		free_cpumask_var(cpus_to_visit);
257 
258 	return ret;
259 }
260 core_initcall(register_cpufreq_notifier);
261 
parsing_done_workfn(struct work_struct * work)262 static void parsing_done_workfn(struct work_struct *work)
263 {
264 	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
265 					 CPUFREQ_POLICY_NOTIFIER);
266 	free_cpumask_var(cpus_to_visit);
267 }
268 
269 #else
270 core_initcall(free_raw_capacity);
271 #endif
272 
273 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
get_cpu_for_node(struct device_node * node)274 static int __init get_cpu_for_node(struct device_node *node)
275 {
276 	struct device_node *cpu_node;
277 	int cpu;
278 
279 	cpu_node = of_parse_phandle(node, "cpu", 0);
280 	if (!cpu_node)
281 		return -1;
282 
283 	cpu = of_cpu_node_to_id(cpu_node);
284 	if (cpu >= 0)
285 		topology_parse_cpu_capacity(cpu_node, cpu);
286 	else
287 		pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
288 
289 	of_node_put(cpu_node);
290 	return cpu;
291 }
292 
parse_core(struct device_node * core,int package_id,int core_id)293 static int __init parse_core(struct device_node *core, int package_id,
294 			     int core_id)
295 {
296 	char name[10];
297 	bool leaf = true;
298 	int i = 0;
299 	int cpu;
300 	struct device_node *t;
301 
302 	do {
303 		snprintf(name, sizeof(name), "thread%d", i);
304 		t = of_get_child_by_name(core, name);
305 		if (t) {
306 			leaf = false;
307 			cpu = get_cpu_for_node(t);
308 			if (cpu >= 0) {
309 				cpu_topology[cpu].package_id = package_id;
310 				cpu_topology[cpu].core_id = core_id;
311 				cpu_topology[cpu].thread_id = i;
312 			} else {
313 				pr_err("%pOF: Can't get CPU for thread\n",
314 				       t);
315 				of_node_put(t);
316 				return -EINVAL;
317 			}
318 			of_node_put(t);
319 		}
320 		i++;
321 	} while (t);
322 
323 	cpu = get_cpu_for_node(core);
324 	if (cpu >= 0) {
325 		if (!leaf) {
326 			pr_err("%pOF: Core has both threads and CPU\n",
327 			       core);
328 			return -EINVAL;
329 		}
330 
331 		cpu_topology[cpu].package_id = package_id;
332 		cpu_topology[cpu].core_id = core_id;
333 	} else if (leaf) {
334 		pr_err("%pOF: Can't get CPU for leaf core\n", core);
335 		return -EINVAL;
336 	}
337 
338 	return 0;
339 }
340 
parse_cluster(struct device_node * cluster,int depth)341 static int __init parse_cluster(struct device_node *cluster, int depth)
342 {
343 	char name[10];
344 	bool leaf = true;
345 	bool has_cores = false;
346 	struct device_node *c;
347 	static int package_id __initdata;
348 	int core_id = 0;
349 	int i, ret;
350 
351 	/*
352 	 * First check for child clusters; we currently ignore any
353 	 * information about the nesting of clusters and present the
354 	 * scheduler with a flat list of them.
355 	 */
356 	i = 0;
357 	do {
358 		snprintf(name, sizeof(name), "cluster%d", i);
359 		c = of_get_child_by_name(cluster, name);
360 		if (c) {
361 			leaf = false;
362 			ret = parse_cluster(c, depth + 1);
363 			of_node_put(c);
364 			if (ret != 0)
365 				return ret;
366 		}
367 		i++;
368 	} while (c);
369 
370 	/* Now check for cores */
371 	i = 0;
372 	do {
373 		snprintf(name, sizeof(name), "core%d", i);
374 		c = of_get_child_by_name(cluster, name);
375 		if (c) {
376 			has_cores = true;
377 
378 			if (depth == 0) {
379 				pr_err("%pOF: cpu-map children should be clusters\n",
380 				       c);
381 				of_node_put(c);
382 				return -EINVAL;
383 			}
384 
385 			if (leaf) {
386 				ret = parse_core(c, package_id, core_id++);
387 			} else {
388 				pr_err("%pOF: Non-leaf cluster with core %s\n",
389 				       cluster, name);
390 				ret = -EINVAL;
391 			}
392 
393 			of_node_put(c);
394 			if (ret != 0)
395 				return ret;
396 		}
397 		i++;
398 	} while (c);
399 
400 	if (leaf && !has_cores)
401 		pr_warn("%pOF: empty cluster\n", cluster);
402 
403 	if (leaf)
404 		package_id++;
405 
406 	return 0;
407 }
408 
parse_dt_topology(void)409 static int __init parse_dt_topology(void)
410 {
411 	struct device_node *cn, *map;
412 	int ret = 0;
413 	int cpu;
414 
415 	cn = of_find_node_by_path("/cpus");
416 	if (!cn) {
417 		pr_err("No CPU information found in DT\n");
418 		return 0;
419 	}
420 
421 	/*
422 	 * When topology is provided cpu-map is essentially a root
423 	 * cluster with restricted subnodes.
424 	 */
425 	map = of_get_child_by_name(cn, "cpu-map");
426 	if (!map)
427 		goto out;
428 
429 	ret = parse_cluster(map, 0);
430 	if (ret != 0)
431 		goto out_map;
432 
433 	topology_normalize_cpu_scale();
434 
435 	/*
436 	 * Check that all cores are in the topology; the SMP code will
437 	 * only mark cores described in the DT as possible.
438 	 */
439 	for_each_possible_cpu(cpu)
440 		if (cpu_topology[cpu].package_id == -1)
441 			ret = -EINVAL;
442 
443 out_map:
444 	of_node_put(map);
445 out:
446 	of_node_put(cn);
447 	return ret;
448 }
449 #endif
450 
451 /*
452  * cpu topology table
453  */
454 struct cpu_topology cpu_topology[NR_CPUS];
455 EXPORT_SYMBOL_GPL(cpu_topology);
456 
cpu_coregroup_mask(int cpu)457 const struct cpumask *cpu_coregroup_mask(int cpu)
458 {
459 	const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
460 
461 	/* Find the smaller of NUMA, core or LLC siblings */
462 	if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
463 		/* not numa in package, lets use the package siblings */
464 		core_mask = &cpu_topology[cpu].core_sibling;
465 	}
466 	if (cpu_topology[cpu].llc_id != -1) {
467 		if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
468 			core_mask = &cpu_topology[cpu].llc_sibling;
469 	}
470 
471 	return core_mask;
472 }
473 
update_siblings_masks(unsigned int cpuid)474 void update_siblings_masks(unsigned int cpuid)
475 {
476 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
477 	int cpu;
478 
479 	/* update core and thread sibling masks */
480 	for_each_online_cpu(cpu) {
481 		cpu_topo = &cpu_topology[cpu];
482 
483 		if (cpuid_topo->llc_id == cpu_topo->llc_id) {
484 			cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
485 			cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
486 		}
487 
488 		if (cpuid_topo->package_id != cpu_topo->package_id)
489 			continue;
490 
491 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
492 		cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
493 
494 		if (cpuid_topo->core_id != cpu_topo->core_id)
495 			continue;
496 
497 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
498 		cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
499 	}
500 }
501 
clear_cpu_topology(int cpu)502 static void clear_cpu_topology(int cpu)
503 {
504 	struct cpu_topology *cpu_topo = &cpu_topology[cpu];
505 
506 	cpumask_clear(&cpu_topo->llc_sibling);
507 	cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
508 
509 	cpumask_clear(&cpu_topo->core_sibling);
510 	cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
511 	cpumask_clear(&cpu_topo->thread_sibling);
512 	cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
513 }
514 
reset_cpu_topology(void)515 void __init reset_cpu_topology(void)
516 {
517 	unsigned int cpu;
518 
519 	for_each_possible_cpu(cpu) {
520 		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
521 
522 		cpu_topo->thread_id = -1;
523 		cpu_topo->core_id = -1;
524 		cpu_topo->package_id = -1;
525 		cpu_topo->llc_id = -1;
526 
527 		clear_cpu_topology(cpu);
528 	}
529 }
530 
remove_cpu_topology(unsigned int cpu)531 void remove_cpu_topology(unsigned int cpu)
532 {
533 	int sibling;
534 
535 	for_each_cpu(sibling, topology_core_cpumask(cpu))
536 		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
537 	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
538 		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
539 	for_each_cpu(sibling, topology_llc_cpumask(cpu))
540 		cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
541 
542 	clear_cpu_topology(cpu);
543 }
544 
parse_acpi_topology(void)545 __weak int __init parse_acpi_topology(void)
546 {
547 	return 0;
548 }
549 
550 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
init_cpu_topology(void)551 void __init init_cpu_topology(void)
552 {
553 	reset_cpu_topology();
554 
555 	/*
556 	 * Discard anything that was parsed if we hit an error so we
557 	 * don't use partial information.
558 	 */
559 	if (parse_acpi_topology())
560 		reset_cpu_topology();
561 	else if (of_have_populated_dt() && parse_dt_topology())
562 		reset_cpu_topology();
563 }
564 #endif
565