• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <stdint.h>
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 
6 #include <cpuinfo.h>
7 #include <arm/linux/api.h>
8 #if defined(__ANDROID__)
9 	#include <arm/android/api.h>
10 #endif
11 #include <arm/api.h>
12 #include <arm/midr.h>
13 #include <linux/api.h>
14 #include <cpuinfo/internal-api.h>
15 #include <cpuinfo/log.h>
16 
17 
18 struct cpuinfo_arm_isa cpuinfo_isa = { 0 };
19 
20 static struct cpuinfo_package package = { { 0 } };
21 
bitmask_all(uint32_t bitfield,uint32_t mask)22 static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
23 	return (bitfield & mask) == mask;
24 }
25 
min(uint32_t a,uint32_t b)26 static inline uint32_t min(uint32_t a, uint32_t b) {
27 	return a < b ? a : b;
28 }
29 
cmp(uint32_t a,uint32_t b)30 static inline int cmp(uint32_t a, uint32_t b) {
31 	return (a > b) - (a < b);
32 }
33 
cluster_siblings_parser(uint32_t processor,uint32_t siblings_start,uint32_t siblings_end,struct cpuinfo_arm_linux_processor * processors)34 static bool cluster_siblings_parser(
35 	uint32_t processor, uint32_t siblings_start, uint32_t siblings_end,
36 	struct cpuinfo_arm_linux_processor* processors)
37 {
38 	processors[processor].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
39 	uint32_t package_leader_id = processors[processor].package_leader_id;
40 
41 	for (uint32_t sibling = siblings_start; sibling < siblings_end; sibling++) {
42 		if (!bitmask_all(processors[sibling].flags, CPUINFO_LINUX_FLAG_VALID)) {
43 			cpuinfo_log_info("invalid processor %"PRIu32" reported as a sibling for processor %"PRIu32,
44 				sibling, processor);
45 			continue;
46 		}
47 
48 		const uint32_t sibling_package_leader_id = processors[sibling].package_leader_id;
49 		if (sibling_package_leader_id < package_leader_id) {
50 			package_leader_id = sibling_package_leader_id;
51 		}
52 
53 		processors[sibling].package_leader_id = package_leader_id;
54 		processors[sibling].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
55 	}
56 
57 	processors[processor].package_leader_id = package_leader_id;
58 
59 	return true;
60 }
61 
cmp_arm_linux_processor(const void * ptr_a,const void * ptr_b)62 static int cmp_arm_linux_processor(const void* ptr_a, const void* ptr_b) {
63 	const struct cpuinfo_arm_linux_processor* processor_a = (const struct cpuinfo_arm_linux_processor*) ptr_a;
64 	const struct cpuinfo_arm_linux_processor* processor_b = (const struct cpuinfo_arm_linux_processor*) ptr_b;
65 
66 	/* Move usable processors towards the start of the array */
67 	const bool usable_a = bitmask_all(processor_a->flags, CPUINFO_LINUX_FLAG_VALID);
68 	const bool usable_b = bitmask_all(processor_b->flags, CPUINFO_LINUX_FLAG_VALID);
69 	if (usable_a != usable_b) {
70 		return (int) usable_b - (int) usable_a;
71 	}
72 
73 	/* Compare based on core type (e.g. Cortex-A57 < Cortex-A53) */
74 	const uint32_t midr_a = processor_a->midr;
75 	const uint32_t midr_b = processor_b->midr;
76 	if (midr_a != midr_b) {
77 		const uint32_t score_a = midr_score_core(midr_a);
78 		const uint32_t score_b = midr_score_core(midr_b);
79 		if (score_a != score_b) {
80 			return score_a > score_b ? -1 : 1;
81 		}
82 	}
83 
84 	/* Compare based on core frequency (e.g. 2.0 GHz < 1.2 GHz) */
85 	const uint32_t frequency_a = processor_a->max_frequency;
86 	const uint32_t frequency_b = processor_b->max_frequency;
87 	if (frequency_a != frequency_b) {
88 		return frequency_a > frequency_b ? -1 : 1;
89 	}
90 
91 	/* Compare based on cluster leader id (i.e. cluster 1 < cluster 0) */
92 	const uint32_t cluster_a = processor_a->package_leader_id;
93 	const uint32_t cluster_b = processor_b->package_leader_id;
94 	if (cluster_a != cluster_b) {
95 		return cluster_a > cluster_b ? -1 : 1;
96 	}
97 
98 	/* Compare based on system processor id (i.e. processor 0 < processor 1) */
99 	const uint32_t id_a = processor_a->system_processor_id;
100 	const uint32_t id_b = processor_b->system_processor_id;
101 	return cmp(id_a, id_b);
102 }
103 
cpuinfo_arm_linux_init(void)104 void cpuinfo_arm_linux_init(void) {
105 	struct cpuinfo_arm_linux_processor* arm_linux_processors = NULL;
106 	struct cpuinfo_processor* processors = NULL;
107 	struct cpuinfo_core* cores = NULL;
108 	struct cpuinfo_cluster* clusters = NULL;
109 	struct cpuinfo_uarch_info* uarchs = NULL;
110 	struct cpuinfo_cache* l1i = NULL;
111 	struct cpuinfo_cache* l1d = NULL;
112 	struct cpuinfo_cache* l2 = NULL;
113 	struct cpuinfo_cache* l3 = NULL;
114 	const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL;
115 	const struct cpuinfo_core** linux_cpu_to_core_map = NULL;
116 	uint32_t* linux_cpu_to_uarch_index_map = NULL;
117 
118 	const uint32_t max_processors_count = cpuinfo_linux_get_max_processors_count();
119 	cpuinfo_log_debug("system maximum processors count: %"PRIu32, max_processors_count);
120 
121 	const uint32_t max_possible_processors_count = 1 +
122 		cpuinfo_linux_get_max_possible_processor(max_processors_count);
123 	cpuinfo_log_debug("maximum possible processors count: %"PRIu32, max_possible_processors_count);
124 	const uint32_t max_present_processors_count = 1 +
125 		cpuinfo_linux_get_max_present_processor(max_processors_count);
126 	cpuinfo_log_debug("maximum present processors count: %"PRIu32, max_present_processors_count);
127 
128 	uint32_t valid_processor_mask = 0;
129 	uint32_t arm_linux_processors_count = max_processors_count;
130 	if (max_present_processors_count != 0) {
131 		arm_linux_processors_count = min(arm_linux_processors_count, max_present_processors_count);
132 		valid_processor_mask = CPUINFO_LINUX_FLAG_PRESENT;
133 	}
134 	if (max_possible_processors_count != 0) {
135 		arm_linux_processors_count = min(arm_linux_processors_count, max_possible_processors_count);
136 		valid_processor_mask |= CPUINFO_LINUX_FLAG_POSSIBLE;
137 	}
138 	if ((max_present_processors_count | max_possible_processors_count) == 0) {
139 		cpuinfo_log_error("failed to parse both lists of possible and present processors");
140 		return;
141 	}
142 
143 	arm_linux_processors = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_arm_linux_processor));
144 	if (arm_linux_processors == NULL) {
145 		cpuinfo_log_error(
146 			"failed to allocate %zu bytes for descriptions of %"PRIu32" ARM logical processors",
147 			arm_linux_processors_count * sizeof(struct cpuinfo_arm_linux_processor),
148 			arm_linux_processors_count);
149 		return;
150 	}
151 
152 	if (max_possible_processors_count) {
153 		cpuinfo_linux_detect_possible_processors(
154 			arm_linux_processors_count, &arm_linux_processors->flags,
155 			sizeof(struct cpuinfo_arm_linux_processor),
156 			CPUINFO_LINUX_FLAG_POSSIBLE);
157 	}
158 
159 	if (max_present_processors_count) {
160 		cpuinfo_linux_detect_present_processors(
161 			arm_linux_processors_count, &arm_linux_processors->flags,
162 			sizeof(struct cpuinfo_arm_linux_processor),
163 			CPUINFO_LINUX_FLAG_PRESENT);
164 	}
165 
166 #if defined(__ANDROID__)
167 	struct cpuinfo_android_properties android_properties;
168 	cpuinfo_arm_android_parse_properties(&android_properties);
169 #else
170 	char proc_cpuinfo_hardware[CPUINFO_HARDWARE_VALUE_MAX];
171 #endif
172 	char proc_cpuinfo_revision[CPUINFO_REVISION_VALUE_MAX];
173 
174 	if (!cpuinfo_arm_linux_parse_proc_cpuinfo(
175 #if defined(__ANDROID__)
176 			android_properties.proc_cpuinfo_hardware,
177 #else
178 			proc_cpuinfo_hardware,
179 #endif
180 			proc_cpuinfo_revision,
181 			arm_linux_processors_count,
182 			arm_linux_processors)) {
183 		cpuinfo_log_error("failed to parse processor information from /proc/cpuinfo");
184 		return;
185 	}
186 
187 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
188 		if (bitmask_all(arm_linux_processors[i].flags, valid_processor_mask)) {
189 			arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_VALID;
190 			cpuinfo_log_debug("parsed processor %"PRIu32" MIDR 0x%08"PRIx32,
191 				i, arm_linux_processors[i].midr);
192 		}
193 	}
194 
195 	uint32_t valid_processors = 0, last_midr = 0;
196 	#if CPUINFO_ARCH_ARM
197 	uint32_t last_architecture_version = 0, last_architecture_flags = 0;
198 	#endif
199 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
200 		arm_linux_processors[i].system_processor_id = i;
201 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
202 			valid_processors += 1;
203 
204 			if (!(arm_linux_processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR)) {
205 				/*
206 				 * Processor is in possible and present lists, but not reported in /proc/cpuinfo.
207 				 * This is fairly common: high-index processors can be not reported if they are offline.
208 				 */
209 				cpuinfo_log_info("processor %"PRIu32" is not listed in /proc/cpuinfo", i);
210 			}
211 
212 			if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
213 				last_midr = arm_linux_processors[i].midr;
214 			}
215 			#if CPUINFO_ARCH_ARM
216 				if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_ARCHITECTURE)) {
217 					last_architecture_version = arm_linux_processors[i].architecture_version;
218 					last_architecture_flags   = arm_linux_processors[i].architecture_flags;
219 				}
220 			#endif
221 		} else {
222 			/* Processor reported in /proc/cpuinfo, but not in possible and/or present lists: log and ignore */
223 			if (!(arm_linux_processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR)) {
224 				cpuinfo_log_warning("invalid processor %"PRIu32" reported in /proc/cpuinfo", i);
225 			}
226 		}
227 	}
228 
229 #if defined(__ANDROID__)
230 	const struct cpuinfo_arm_chipset chipset =
231 		cpuinfo_arm_android_decode_chipset(&android_properties, valid_processors, 0);
232 #else
233 	const struct cpuinfo_arm_chipset chipset =
234 		cpuinfo_arm_linux_decode_chipset(proc_cpuinfo_hardware, proc_cpuinfo_revision, valid_processors, 0);
235 #endif
236 
237 	#if CPUINFO_ARCH_ARM
238 		uint32_t isa_features = 0, isa_features2 = 0;
239 		#ifdef __ANDROID__
240 			/*
241 			 * On Android before API 20, libc.so does not provide getauxval function.
242 			 * Thus, we try to dynamically find it, or use two fallback mechanisms:
243 			 * 1. dlopen libc.so, and try to find getauxval
244 			 * 2. Parse /proc/self/auxv procfs file
245 			 * 3. Use features reported in /proc/cpuinfo
246 			 */
247 			if (!cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2)) {
248 				/* getauxval can't be used, fall back to parsing /proc/self/auxv */
249 				if (!cpuinfo_arm_linux_hwcap_from_procfs(&isa_features, &isa_features2)) {
250 					/*
251 					 * Reading /proc/self/auxv failed, probably due to file permissions.
252 					 * Use information from /proc/cpuinfo to detect ISA.
253 					 *
254 					 * If different processors report different ISA features, take the intersection.
255 					 */
256 					uint32_t processors_with_features = 0;
257 					for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
258 						if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID | CPUINFO_ARM_LINUX_VALID_FEATURES)) {
259 							if (processors_with_features == 0) {
260 								isa_features = arm_linux_processors[i].features;
261 								isa_features2 = arm_linux_processors[i].features2;
262 							} else {
263 								isa_features &= arm_linux_processors[i].features;
264 								isa_features2 &= arm_linux_processors[i].features2;
265 							}
266 							processors_with_features += 1;
267 						}
268 					}
269 				}
270 			}
271 		#else
272 			/* On GNU/Linux getauxval is always available */
273 			cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2);
274 		#endif
275 		cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo(
276 			isa_features, isa_features2,
277 			last_midr, last_architecture_version, last_architecture_flags,
278 			&chipset, &cpuinfo_isa);
279 	#elif CPUINFO_ARCH_ARM64
280 		uint32_t isa_features = 0, isa_features2 = 0;
281 		/* getauxval is always available on ARM64 Android */
282 		cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2);
283 		cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
284 			isa_features, isa_features2, last_midr, &chipset, &cpuinfo_isa);
285 	#endif
286 
287 	/* Detect min/max frequency and package ID */
288 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
289 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
290 			const uint32_t max_frequency = cpuinfo_linux_get_processor_max_frequency(i);
291 			if (max_frequency != 0) {
292 				arm_linux_processors[i].max_frequency = max_frequency;
293 				arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
294 			}
295 
296 			const uint32_t min_frequency = cpuinfo_linux_get_processor_min_frequency(i);
297 			if (min_frequency != 0) {
298 				arm_linux_processors[i].min_frequency = min_frequency;
299 				arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
300 			}
301 
302 			if (cpuinfo_linux_get_processor_package_id(i, &arm_linux_processors[i].package_id)) {
303 				arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_ID;
304 			}
305 		}
306 	}
307 
308 	/* Initialize topology group IDs */
309 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
310 		arm_linux_processors[i].package_leader_id = i;
311 	}
312 
313 	/* Propagate topology group IDs among siblings */
314 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
315 		if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
316 			continue;
317 		}
318 
319 		if (arm_linux_processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_ID) {
320 			cpuinfo_linux_detect_core_siblings(
321 				arm_linux_processors_count, i,
322 				(cpuinfo_siblings_callback) cluster_siblings_parser,
323 				arm_linux_processors);
324 		}
325 	}
326 
327 	/* Propagate all cluster IDs */
328 	uint32_t clustered_processors = 0;
329 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
330 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID | CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
331 			clustered_processors += 1;
332 
333 			const uint32_t package_leader_id = arm_linux_processors[i].package_leader_id;
334 			if (package_leader_id < i) {
335 				arm_linux_processors[i].package_leader_id = arm_linux_processors[package_leader_id].package_leader_id;
336 			}
337 
338 			cpuinfo_log_debug("processor %"PRIu32" clustered with processor %"PRIu32" as inferred from system siblings lists",
339 				i, arm_linux_processors[i].package_leader_id);
340 		}
341 	}
342 
343 	if (clustered_processors != valid_processors) {
344 		/*
345 		 * Topology information about some or all logical processors may be unavailable, for the following reasons:
346 		 * - Linux kernel is too old, or configured without support for topology information in sysfs.
347 		 * - Core is offline, and Linux kernel is configured to not report topology for offline cores.
348 		 *
349 		 * In this case, we assign processors to clusters using two methods:
350 		 * - Try heuristic cluster configurations (e.g. 6-core SoC usually has 4+2 big.LITTLE configuration).
351 		 * - If heuristic failed, assign processors to core clusters in a sequential scan.
352 		 */
353 		if (!cpuinfo_arm_linux_detect_core_clusters_by_heuristic(valid_processors, arm_linux_processors_count, arm_linux_processors)) {
354 			cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(arm_linux_processors_count, arm_linux_processors);
355 		}
356 	}
357 
358 	cpuinfo_arm_linux_count_cluster_processors(arm_linux_processors_count, arm_linux_processors);
359 
360 	const uint32_t cluster_count = cpuinfo_arm_linux_detect_cluster_midr(
361 		&chipset,
362 		arm_linux_processors_count, valid_processors, arm_linux_processors);
363 
364 	/* Initialize core vendor, uarch, MIDR, and frequency for every logical processor */
365 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
366 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
367 			const uint32_t cluster_leader = arm_linux_processors[i].package_leader_id;
368 			if (cluster_leader == i) {
369 				/* Cluster leader: decode core vendor and uarch */
370 				cpuinfo_arm_decode_vendor_uarch(
371 				arm_linux_processors[cluster_leader].midr,
372 #if CPUINFO_ARCH_ARM
373 				!!(arm_linux_processors[cluster_leader].features & CPUINFO_ARM_LINUX_FEATURE_VFPV4),
374 #endif
375 				&arm_linux_processors[cluster_leader].vendor,
376 				&arm_linux_processors[cluster_leader].uarch);
377 			} else {
378 				/* Cluster non-leader: copy vendor, uarch, MIDR, and frequency from cluster leader */
379 				arm_linux_processors[i].flags |= arm_linux_processors[cluster_leader].flags &
380 					(CPUINFO_ARM_LINUX_VALID_MIDR | CPUINFO_LINUX_FLAG_MAX_FREQUENCY);
381 				arm_linux_processors[i].midr = arm_linux_processors[cluster_leader].midr;
382 				arm_linux_processors[i].vendor = arm_linux_processors[cluster_leader].vendor;
383 				arm_linux_processors[i].uarch = arm_linux_processors[cluster_leader].uarch;
384 				arm_linux_processors[i].max_frequency = arm_linux_processors[cluster_leader].max_frequency;
385 			}
386 		}
387 	}
388 
389 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
390 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
391 			cpuinfo_log_debug("post-analysis processor %"PRIu32": MIDR %08"PRIx32" frequency %"PRIu32,
392 				i, arm_linux_processors[i].midr, arm_linux_processors[i].max_frequency);
393 		}
394 	}
395 
396 	qsort(arm_linux_processors, arm_linux_processors_count,
397 		sizeof(struct cpuinfo_arm_linux_processor), cmp_arm_linux_processor);
398 
399 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
400 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
401 			cpuinfo_log_debug("post-sort processor %"PRIu32": system id %"PRIu32" MIDR %08"PRIx32" frequency %"PRIu32,
402 				i, arm_linux_processors[i].system_processor_id, arm_linux_processors[i].midr, arm_linux_processors[i].max_frequency);
403 		}
404 	}
405 
406 	uint32_t uarchs_count = 0;
407 	enum cpuinfo_uarch last_uarch;
408 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
409 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
410 			if (uarchs_count == 0 || arm_linux_processors[i].uarch != last_uarch) {
411 				last_uarch = arm_linux_processors[i].uarch;
412 				uarchs_count += 1;
413 			}
414 			arm_linux_processors[i].uarch_index = uarchs_count - 1;
415 		}
416 	}
417 
418 	/*
419 	 * Assumptions:
420 	 * - No SMP (i.e. each core supports only one hardware thread).
421 	 * - Level 1 instruction and data caches are private to the core clusters.
422 	 * - Level 2 and level 3 cache is shared between cores in the same cluster.
423 	 */
424 	cpuinfo_arm_chipset_to_string(&chipset, package.name);
425 	package.processor_count = valid_processors;
426 	package.core_count = valid_processors;
427 	package.cluster_count = cluster_count;
428 
429 	processors = calloc(valid_processors, sizeof(struct cpuinfo_processor));
430 	if (processors == NULL) {
431 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors",
432 			valid_processors * sizeof(struct cpuinfo_processor), valid_processors);
433 		goto cleanup;
434 	}
435 
436 	cores = calloc(valid_processors, sizeof(struct cpuinfo_core));
437 	if (cores == NULL) {
438 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores",
439 			valid_processors * sizeof(struct cpuinfo_core), valid_processors);
440 		goto cleanup;
441 	}
442 
443 	clusters = calloc(cluster_count, sizeof(struct cpuinfo_cluster));
444 	if (clusters == NULL) {
445 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters",
446 			cluster_count * sizeof(struct cpuinfo_cluster), cluster_count);
447 		goto cleanup;
448 	}
449 
450 	uarchs = calloc(uarchs_count, sizeof(struct cpuinfo_uarch_info));
451 	if (uarchs == NULL) {
452 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" microarchitectures",
453 			uarchs_count * sizeof(struct cpuinfo_uarch_info), uarchs_count);
454 		goto cleanup;
455 	}
456 
457 	linux_cpu_to_processor_map = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_processor*));
458 	if (linux_cpu_to_processor_map == NULL) {
459 		cpuinfo_log_error("failed to allocate %zu bytes for %"PRIu32" logical processor mapping entries",
460 			arm_linux_processors_count * sizeof(struct cpuinfo_processor*), arm_linux_processors_count);
461 		goto cleanup;
462 	}
463 
464 	linux_cpu_to_core_map = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_core*));
465 	if (linux_cpu_to_core_map == NULL) {
466 		cpuinfo_log_error("failed to allocate %zu bytes for %"PRIu32" core mapping entries",
467 			arm_linux_processors_count * sizeof(struct cpuinfo_core*), arm_linux_processors_count);
468 		goto cleanup;
469 	}
470 
471 	if (uarchs_count > 1) {
472 		linux_cpu_to_uarch_index_map = calloc(arm_linux_processors_count, sizeof(uint32_t));
473 		if (linux_cpu_to_uarch_index_map == NULL) {
474 			cpuinfo_log_error("failed to allocate %zu bytes for %"PRIu32" uarch index mapping entries",
475 				arm_linux_processors_count * sizeof(uint32_t), arm_linux_processors_count);
476 			goto cleanup;
477 		}
478 	}
479 
480 	l1i = calloc(valid_processors, sizeof(struct cpuinfo_cache));
481 	if (l1i == NULL) {
482 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches",
483 			valid_processors * sizeof(struct cpuinfo_cache), valid_processors);
484 		goto cleanup;
485 	}
486 
487 	l1d = calloc(valid_processors, sizeof(struct cpuinfo_cache));
488 	if (l1d == NULL) {
489 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches",
490 			valid_processors * sizeof(struct cpuinfo_cache), valid_processors);
491 		goto cleanup;
492 	}
493 
494 	uint32_t uarchs_index = 0;
495 	for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
496 		if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
497 			if (uarchs_index == 0 || arm_linux_processors[i].uarch != last_uarch) {
498 				last_uarch = arm_linux_processors[i].uarch;
499 				uarchs[uarchs_index] = (struct cpuinfo_uarch_info) {
500 					.uarch = arm_linux_processors[i].uarch,
501 					.midr = arm_linux_processors[i].midr,
502 				};
503 				uarchs_index += 1;
504 			}
505 			uarchs[uarchs_index - 1].processor_count += 1;
506 			uarchs[uarchs_index - 1].core_count += 1;
507 		}
508 	}
509 
510 	uint32_t l2_count = 0, l3_count = 0, big_l3_size = 0, cluster_id = UINT32_MAX;
511 	/* Indication whether L3 (if it exists) is shared between all cores */
512 	bool shared_l3 = true;
513 	/* Populate cache infromation structures in l1i, l1d */
514 	for (uint32_t i = 0; i < valid_processors; i++) {
515 		if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
516 			cluster_id += 1;
517 			clusters[cluster_id] = (struct cpuinfo_cluster) {
518 				.processor_start = i,
519 				.processor_count = arm_linux_processors[i].package_processor_count,
520 				.core_start = i,
521 				.core_count = arm_linux_processors[i].package_processor_count,
522 				.cluster_id = cluster_id,
523 				.package = &package,
524 				.vendor = arm_linux_processors[i].vendor,
525 				.uarch = arm_linux_processors[i].uarch,
526 				.midr = arm_linux_processors[i].midr,
527 			};
528 		}
529 
530 		processors[i].smt_id = 0;
531 		processors[i].core = cores + i;
532 		processors[i].cluster = clusters + cluster_id;
533 		processors[i].package = &package;
534 		processors[i].linux_id = (int) arm_linux_processors[i].system_processor_id;
535 		processors[i].cache.l1i = l1i + i;
536 		processors[i].cache.l1d = l1d + i;
537 		linux_cpu_to_processor_map[arm_linux_processors[i].system_processor_id] = &processors[i];
538 
539 		cores[i].processor_start = i;
540 		cores[i].processor_count = 1;
541 		cores[i].core_id = i;
542 		cores[i].cluster = clusters + cluster_id;
543 		cores[i].package = &package;
544 		cores[i].vendor = arm_linux_processors[i].vendor;
545 		cores[i].uarch = arm_linux_processors[i].uarch;
546 		cores[i].midr = arm_linux_processors[i].midr;
547 		linux_cpu_to_core_map[arm_linux_processors[i].system_processor_id] = &cores[i];
548 
549 		if (linux_cpu_to_uarch_index_map != NULL) {
550 			linux_cpu_to_uarch_index_map[arm_linux_processors[i].system_processor_id] =
551 				arm_linux_processors[i].uarch_index;
552 		}
553 
554 		struct cpuinfo_cache temp_l2 = { 0 }, temp_l3 = { 0 };
555 		cpuinfo_arm_decode_cache(
556 			arm_linux_processors[i].uarch,
557 			arm_linux_processors[i].package_processor_count,
558 			arm_linux_processors[i].midr,
559 			&chipset,
560 			cluster_id,
561 			arm_linux_processors[i].architecture_version,
562 			&l1i[i], &l1d[i], &temp_l2, &temp_l3);
563 		l1i[i].processor_start = l1d[i].processor_start = i;
564 		l1i[i].processor_count = l1d[i].processor_count = 1;
565 		#if CPUINFO_ARCH_ARM
566 			/* L1I reported in /proc/cpuinfo overrides defaults */
567 			if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_ICACHE)) {
568 				l1i[i] = (struct cpuinfo_cache) {
569 					.size = arm_linux_processors[i].proc_cpuinfo_cache.i_size,
570 					.associativity = arm_linux_processors[i].proc_cpuinfo_cache.i_assoc,
571 					.sets = arm_linux_processors[i].proc_cpuinfo_cache.i_sets,
572 					.partitions = 1,
573 					.line_size = arm_linux_processors[i].proc_cpuinfo_cache.i_line_length
574 				};
575 			}
576 			/* L1D reported in /proc/cpuinfo overrides defaults */
577 			if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_DCACHE)) {
578 				l1d[i] = (struct cpuinfo_cache) {
579 					.size = arm_linux_processors[i].proc_cpuinfo_cache.d_size,
580 					.associativity = arm_linux_processors[i].proc_cpuinfo_cache.d_assoc,
581 					.sets = arm_linux_processors[i].proc_cpuinfo_cache.d_sets,
582 					.partitions = 1,
583 					.line_size = arm_linux_processors[i].proc_cpuinfo_cache.d_line_length
584 				};
585 			}
586 		#endif
587 
588 		if (temp_l3.size != 0) {
589 			/*
590 			 * Assumptions:
591 			 * - L2 is private to each core
592 			 * - L3 is shared by cores in the same cluster
593 			 * - If cores in different clusters report the same L3, it is shared between all cores.
594 			 */
595 			l2_count += 1;
596 			if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
597 				if (cluster_id == 0) {
598 					big_l3_size = temp_l3.size;
599 					l3_count = 1;
600 				} else if (temp_l3.size != big_l3_size) {
601 					/* If some cores have different L3 size, L3 is not shared between all cores */
602 					shared_l3 = false;
603 					l3_count += 1;
604 				}
605 			}
606 		} else {
607 			/* If some cores don't have L3 cache, L3 is not shared between all cores */
608 			shared_l3 = false;
609 			if (temp_l2.size != 0) {
610 				/* Assume L2 is shared by cores in the same cluster */
611 				if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
612 					l2_count += 1;
613 				}
614 			}
615 		}
616 	}
617 
618 	if (l2_count != 0) {
619 		l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
620 		if (l2 == NULL) {
621 			cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches",
622 				l2_count * sizeof(struct cpuinfo_cache), l2_count);
623 			goto cleanup;
624 		}
625 
626 		if (l3_count != 0) {
627 			l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
628 			if (l3 == NULL) {
629 				cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches",
630 					l3_count * sizeof(struct cpuinfo_cache), l3_count);
631 				goto cleanup;
632 			}
633 		}
634 	}
635 
636 	cluster_id = UINT32_MAX;
637 	uint32_t l2_index = UINT32_MAX, l3_index = UINT32_MAX;
638 	for (uint32_t i = 0; i < valid_processors; i++) {
639 		if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
640 			cluster_id++;
641 		}
642 
643 		struct cpuinfo_cache dummy_l1i, dummy_l1d, temp_l2 = { 0 }, temp_l3 = { 0 };
644 		cpuinfo_arm_decode_cache(
645 			arm_linux_processors[i].uarch,
646 			arm_linux_processors[i].package_processor_count,
647 			arm_linux_processors[i].midr,
648 			&chipset,
649 			cluster_id,
650 			arm_linux_processors[i].architecture_version,
651 			&dummy_l1i, &dummy_l1d, &temp_l2, &temp_l3);
652 
653 		if (temp_l3.size != 0) {
654 			/*
655 			 * Assumptions:
656 			 * - L2 is private to each core
657 			 * - L3 is shared by cores in the same cluster
658 			 * - If cores in different clusters report the same L3, it is shared between all cores.
659 			 */
660 			l2_index += 1;
661 			l2[l2_index] = (struct cpuinfo_cache) {
662 				.size            = temp_l2.size,
663 				.associativity   = temp_l2.associativity,
664 				.sets            = temp_l2.sets,
665 				.partitions      = 1,
666 				.line_size       = temp_l2.line_size,
667 				.flags           = temp_l2.flags,
668 				.processor_start = i,
669 				.processor_count = 1,
670 			};
671 			processors[i].cache.l2 = l2 + l2_index;
672 			if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
673 				l3_index += 1;
674 				if (l3_index < l3_count) {
675 					l3[l3_index] = (struct cpuinfo_cache) {
676 						.size            = temp_l3.size,
677 						.associativity   = temp_l3.associativity,
678 						.sets            = temp_l3.sets,
679 						.partitions      = 1,
680 						.line_size       = temp_l3.line_size,
681 						.flags           = temp_l3.flags,
682 						.processor_start = i,
683 						.processor_count =
684 							shared_l3 ? valid_processors : arm_linux_processors[i].package_processor_count,
685 					};
686 				}
687 			}
688 			if (shared_l3) {
689 				processors[i].cache.l3 = l3;
690 			} else if (l3_index < l3_count) {
691 				processors[i].cache.l3 = l3 + l3_index;
692 			}
693 		} else if (temp_l2.size != 0) {
694 			/* Assume L2 is shared by cores in the same cluster */
695 			if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
696 				l2_index += 1;
697 				l2[l2_index] = (struct cpuinfo_cache) {
698 					.size            = temp_l2.size,
699 					.associativity   = temp_l2.associativity,
700 					.sets            = temp_l2.sets,
701 					.partitions      = 1,
702 					.line_size       = temp_l2.line_size,
703 					.flags           = temp_l2.flags,
704 					.processor_start = i,
705 					.processor_count = arm_linux_processors[i].package_processor_count,
706 				};
707 			}
708 			processors[i].cache.l2 = l2 + l2_index;
709 		}
710 	}
711 
712 	/* Commit */
713 	cpuinfo_processors = processors;
714 	cpuinfo_cores = cores;
715 	cpuinfo_clusters = clusters;
716 	cpuinfo_packages = &package;
717 	cpuinfo_uarchs = uarchs;
718 	cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
719 	cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
720 	cpuinfo_cache[cpuinfo_cache_level_2]  = l2;
721 	cpuinfo_cache[cpuinfo_cache_level_3]  = l3;
722 
723 	cpuinfo_processors_count = valid_processors;
724 	cpuinfo_cores_count = valid_processors;
725 	cpuinfo_clusters_count = cluster_count;
726 	cpuinfo_packages_count = 1;
727 	cpuinfo_uarchs_count = uarchs_count;
728 	cpuinfo_cache_count[cpuinfo_cache_level_1i] = valid_processors;
729 	cpuinfo_cache_count[cpuinfo_cache_level_1d] = valid_processors;
730 	cpuinfo_cache_count[cpuinfo_cache_level_2]  = l2_count;
731 	cpuinfo_cache_count[cpuinfo_cache_level_3]  = l3_count;
732 	cpuinfo_max_cache_size = cpuinfo_arm_compute_max_cache_size(&processors[0]);
733 
734 	cpuinfo_linux_cpu_max = arm_linux_processors_count;
735 	cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map;
736 	cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map;
737 	cpuinfo_linux_cpu_to_uarch_index_map = linux_cpu_to_uarch_index_map;
738 
739 	__sync_synchronize();
740 
741 	cpuinfo_is_initialized = true;
742 
743 	processors = NULL;
744 	cores = NULL;
745 	clusters = NULL;
746 	uarchs = NULL;
747 	l1i = l1d = l2 = l3 = NULL;
748 	linux_cpu_to_processor_map = NULL;
749 	linux_cpu_to_core_map = NULL;
750 	linux_cpu_to_uarch_index_map = NULL;
751 
752 cleanup:
753 	free(arm_linux_processors);
754 	free(processors);
755 	free(cores);
756 	free(clusters);
757 	free(uarchs);
758 	free(l1i);
759 	free(l1d);
760 	free(l2);
761 	free(l3);
762 	free(linux_cpu_to_processor_map);
763 	free(linux_cpu_to_core_map);
764 	free(linux_cpu_to_uarch_index_map);
765 }
766