1 #include <stdint.h>
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5
6 #include <cpuinfo.h>
7 #include <arm/linux/api.h>
8 #if defined(__ANDROID__)
9 #include <arm/android/api.h>
10 #endif
11 #include <arm/api.h>
12 #include <arm/midr.h>
13 #include <linux/api.h>
14 #include <cpuinfo/internal-api.h>
15 #include <cpuinfo/log.h>
16 #include <cpuinfo/common.h>
17
18
19 #define CLUSTERS_MAX 3
20
bitmask_all(uint32_t bitfield,uint32_t mask)21 static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
22 return (bitfield & mask) == mask;
23 }
24
25 /* Description of core clusters configuration in a chipset (identified by series and model number) */
26 struct cluster_config {
27 /* Number of cores (logical processors) */
28 uint8_t cores;
29 /* ARM chipset series (see cpuinfo_arm_chipset_series enum) */
30 uint8_t series;
31 /* Chipset model number (see cpuinfo_arm_chipset struct) */
32 uint16_t model;
33 /* Number of heterogenous clusters in the CPU package */
34 uint8_t clusters;
35 /*
36 * Number of cores in each cluster:
37 # - Symmetric configurations: [0] = # cores
38 * - big.LITTLE configurations: [0] = # LITTLE cores, [1] = # big cores
39 * - Max.Med.Min configurations: [0] = # Min cores, [1] = # Med cores, [2] = # Max cores
40 */
41 uint8_t cluster_cores[CLUSTERS_MAX];
42 /*
43 * MIDR of cores in each cluster:
44 * - Symmetric configurations: [0] = core MIDR
45 * - big.LITTLE configurations: [0] = LITTLE core MIDR, [1] = big core MIDR
46 * - Max.Med.Min configurations: [0] = Min core MIDR, [1] = Med core MIDR, [2] = Max core MIDR
47 */
48 uint32_t cluster_midr[CLUSTERS_MAX];
49 };
50
51 /*
52 * The list of chipsets where MIDR may not be unambigiously decoded at least on some devices.
53 * The typical reasons for impossibility to decoded MIDRs are buggy kernels, which either do not report all MIDR
54 * information (e.g. on ATM7029 kernel doesn't report CPU Part), or chipsets have more than one type of cores
55 * (i.e. 4x Cortex-A53 + 4x Cortex-A53 is out) and buggy kernels report MIDR information only about some cores
56 * in /proc/cpuinfo (either only online cores, or only the core that reads /proc/cpuinfo). On these kernels/chipsets,
57 * it is not possible to detect all core types by just parsing /proc/cpuinfo, so we use chipset name and this table to
58 * find their MIDR (and thus microarchitecture, cache, etc).
59 *
60 * Note: not all chipsets with heterogeneous multiprocessing need an entry in this table. The following HMP
61 * chipsets always list information about all cores in /proc/cpuinfo:
62 *
63 * - Snapdragon 660
64 * - Snapdragon 820 (MSM8996)
65 * - Snapdragon 821 (MSM8996PRO)
66 * - Snapdragon 835 (MSM8998)
67 * - Exynos 8895
68 * - Kirin 960
69 *
70 * As these are all new processors, there is hope that this table won't uncontrollably grow over time.
71 */
72 static const struct cluster_config cluster_configs[] = {
73 #if CPUINFO_ARCH_ARM
74 {
75 /*
76 * MSM8916 (Snapdragon 410): 4x Cortex-A53
77 * Some AArch32 phones use non-standard /proc/cpuinfo format.
78 */
79 .cores = 4,
80 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
81 .model = UINT16_C(8916),
82 .clusters = 1,
83 .cluster_cores = {
84 [0] = 4,
85 },
86 .cluster_midr = {
87 [0] = UINT32_C(0x410FD030),
88 },
89 },
90 {
91 /*
92 * MSM8939 (Snapdragon 615): 4x Cortex-A53 + 4x Cortex-A53
93 * Some AArch32 phones use non-standard /proc/cpuinfo format.
94 */
95 .cores = 8,
96 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
97 .model = UINT16_C(8939),
98 .clusters = 2,
99 .cluster_cores = {
100 [0] = 4,
101 [1] = 4,
102 },
103 .cluster_midr = {
104 [0] = UINT32_C(0x410FD034),
105 [1] = UINT32_C(0x410FD034),
106 },
107 },
108 #endif
109 {
110 /* MSM8956 (Snapdragon 650): 2x Cortex-A72 + 4x Cortex-A53 */
111 .cores = 6,
112 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
113 .model = UINT16_C(8956),
114 .clusters = 2,
115 .cluster_cores = {
116 [0] = 4,
117 [1] = 2,
118 },
119 .cluster_midr = {
120 [0] = UINT32_C(0x410FD034),
121 [1] = UINT32_C(0x410FD080),
122 },
123 },
124 {
125 /* MSM8976/MSM8976PRO (Snapdragon 652/653): 4x Cortex-A72 + 4x Cortex-A53 */
126 .cores = 8,
127 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
128 .model = UINT16_C(8976),
129 .clusters = 2,
130 .cluster_cores = {
131 [0] = 4,
132 [1] = 4,
133 },
134 .cluster_midr = {
135 [0] = UINT32_C(0x410FD034),
136 [1] = UINT32_C(0x410FD080),
137 },
138 },
139 {
140 /* MSM8992 (Snapdragon 808): 2x Cortex-A57 + 4x Cortex-A53 */
141 .cores = 6,
142 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
143 .model = UINT16_C(8992),
144 .clusters = 2,
145 .cluster_cores = {
146 [0] = 4,
147 [1] = 2,
148 },
149 .cluster_midr = {
150 [0] = UINT32_C(0x410FD033),
151 [1] = UINT32_C(0x411FD072),
152 },
153 },
154 {
155 /* MSM8994/MSM8994V (Snapdragon 810): 4x Cortex-A57 + 4x Cortex-A53 */
156 .cores = 8,
157 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
158 .model = UINT16_C(8994),
159 .clusters = 2,
160 .cluster_cores = {
161 [0] = 4,
162 [1] = 4,
163 },
164 .cluster_midr = {
165 [0] = UINT32_C(0x410FD032),
166 [1] = UINT32_C(0x411FD071),
167 },
168 },
169 #if CPUINFO_ARCH_ARM
170 {
171 /* Exynos 5422: 4x Cortex-A15 + 4x Cortex-A7 */
172 .cores = 8,
173 .series = cpuinfo_arm_chipset_series_samsung_exynos,
174 .model = UINT16_C(5422),
175 .clusters = 2,
176 .cluster_cores = {
177 [0] = 4,
178 [1] = 4,
179 },
180 .cluster_midr = {
181 [0] = UINT32_C(0x410FC073),
182 [1] = UINT32_C(0x412FC0F3),
183 },
184 },
185 {
186 /* Exynos 5430: 4x Cortex-A15 + 4x Cortex-A7 */
187 .cores = 8,
188 .series = cpuinfo_arm_chipset_series_samsung_exynos,
189 .model = UINT16_C(5430),
190 .clusters = 2,
191 .cluster_cores = {
192 [0] = 4,
193 [1] = 4,
194 },
195 .cluster_midr = {
196 [0] = UINT32_C(0x410FC074),
197 [1] = UINT32_C(0x413FC0F3),
198 },
199 },
200 #endif /* CPUINFO_ARCH_ARM */
201 {
202 /* Exynos 5433: 4x Cortex-A57 + 4x Cortex-A53 */
203 .cores = 8,
204 .series = cpuinfo_arm_chipset_series_samsung_exynos,
205 .model = UINT16_C(5433),
206 .clusters = 2,
207 .cluster_cores = {
208 [0] = 4,
209 [1] = 4,
210 },
211 .cluster_midr = {
212 [0] = UINT32_C(0x410FD031),
213 [1] = UINT32_C(0x411FD070),
214 },
215 },
216 {
217 /* Exynos 7420: 4x Cortex-A57 + 4x Cortex-A53 */
218 .cores = 8,
219 .series = cpuinfo_arm_chipset_series_samsung_exynos,
220 .model = UINT16_C(7420),
221 .clusters = 2,
222 .cluster_cores = {
223 [0] = 4,
224 [1] = 4,
225 },
226 .cluster_midr = {
227 [0] = UINT32_C(0x410FD032),
228 [1] = UINT32_C(0x411FD070),
229 },
230 },
231 {
232 /* Exynos 8890: 4x Exynos M1 + 4x Cortex-A53 */
233 .cores = 8,
234 .series = cpuinfo_arm_chipset_series_samsung_exynos,
235 .model = UINT16_C(8890),
236 .clusters = 2,
237 .cluster_cores = {
238 [0] = 4,
239 [1] = 4,
240 },
241 .cluster_midr = {
242 [0] = UINT32_C(0x410FD034),
243 [1] = UINT32_C(0x531F0011),
244 },
245 },
246 #if CPUINFO_ARCH_ARM
247 {
248 /* Kirin 920: 4x Cortex-A15 + 4x Cortex-A7 */
249 .cores = 8,
250 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
251 .model = UINT16_C(920),
252 .clusters = 2,
253 .cluster_cores = {
254 [0] = 4,
255 [1] = 4,
256 },
257 .cluster_midr = {
258 [0] = UINT32_C(0x410FC075),
259 [1] = UINT32_C(0x413FC0F3),
260 },
261 },
262 {
263 /* Kirin 925: 4x Cortex-A15 + 4x Cortex-A7 */
264 .cores = 8,
265 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
266 .model = UINT16_C(925),
267 .clusters = 2,
268 .cluster_cores = {
269 [0] = 4,
270 [1] = 4,
271 },
272 .cluster_midr = {
273 [0] = UINT32_C(0x410FC075),
274 [1] = UINT32_C(0x413FC0F3),
275 },
276 },
277 {
278 /* Kirin 928: 4x Cortex-A15 + 4x Cortex-A7 */
279 .cores = 8,
280 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
281 .model = UINT16_C(928),
282 .clusters = 2,
283 .cluster_cores = {
284 [0] = 4,
285 [1] = 4,
286 },
287 .cluster_midr = {
288 [0] = UINT32_C(0x410FC075),
289 [1] = UINT32_C(0x413FC0F3),
290 },
291 },
292 #endif /* CPUINFO_ARCH_ARM */
293 {
294 /* Kirin 950: 4x Cortex-A72 + 4x Cortex-A53 */
295 .cores = 8,
296 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
297 .model = UINT16_C(950),
298 .clusters = 2,
299 .cluster_cores = {
300 [0] = 4,
301 [1] = 4,
302 },
303 .cluster_midr = {
304 [0] = UINT32_C(0x410FD034),
305 [1] = UINT32_C(0x410FD080),
306 },
307 },
308 {
309 /* Kirin 955: 4x Cortex-A72 + 4x Cortex-A53 */
310 .cores = 8,
311 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
312 .model = UINT16_C(955),
313 .clusters = 2,
314 .cluster_cores = {
315 [0] = 4,
316 [1] = 4,
317 },
318 .cluster_midr = {
319 [0] = UINT32_C(0x410FD034),
320 [1] = UINT32_C(0x410FD080),
321 },
322 },
323 #if CPUINFO_ARCH_ARM
324 {
325 /* MediaTek MT8135: 2x Cortex-A7 + 2x Cortex-A15 */
326 .cores = 4,
327 .series = cpuinfo_arm_chipset_series_mediatek_mt,
328 .model = UINT16_C(8135),
329 .clusters = 2,
330 .cluster_cores = {
331 [0] = 2,
332 [1] = 2,
333 },
334 .cluster_midr = {
335 [0] = UINT32_C(0x410FC073),
336 [1] = UINT32_C(0x413FC0F2),
337 },
338 },
339 #endif
340 {
341 /* MediaTek MT8173: 2x Cortex-A72 + 2x Cortex-A53 */
342 .cores = 4,
343 .series = cpuinfo_arm_chipset_series_mediatek_mt,
344 .model = UINT16_C(8173),
345 .clusters = 2,
346 .cluster_cores = {
347 [0] = 2,
348 [1] = 2,
349 },
350 .cluster_midr = {
351 [0] = UINT32_C(0x410FD032),
352 [1] = UINT32_C(0x410FD080),
353 },
354 },
355 {
356 /* MediaTek MT8176: 2x Cortex-A72 + 4x Cortex-A53 */
357 .cores = 6,
358 .series = cpuinfo_arm_chipset_series_mediatek_mt,
359 .model = UINT16_C(8176),
360 .clusters = 2,
361 .cluster_cores = {
362 [0] = 4,
363 [1] = 2,
364 },
365 .cluster_midr = {
366 [0] = UINT32_C(0x410FD032),
367 [1] = UINT32_C(0x410FD080),
368 },
369 },
370 #if CPUINFO_ARCH_ARM64
371 {
372 /*
373 * MediaTek MT8735: 4x Cortex-A53
374 * Some AArch64 phones use non-standard /proc/cpuinfo format.
375 */
376 .cores = 4,
377 .series = cpuinfo_arm_chipset_series_mediatek_mt,
378 .model = UINT16_C(8735),
379 .clusters = 1,
380 .cluster_cores = {
381 [0] = 4,
382 },
383 .cluster_midr = {
384 [0] = UINT32_C(0x410FD034),
385 },
386 },
387 #endif
388 #if CPUINFO_ARCH_ARM
389 {
390 /*
391 * MediaTek MT6592: 4x Cortex-A7 + 4x Cortex-A7
392 * Some phones use non-standard /proc/cpuinfo format.
393 */
394 .cores = 4,
395 .series = cpuinfo_arm_chipset_series_mediatek_mt,
396 .model = UINT16_C(6592),
397 .clusters = 2,
398 .cluster_cores = {
399 [0] = 4,
400 [1] = 4,
401 },
402 .cluster_midr = {
403 [0] = UINT32_C(0x410FC074),
404 [1] = UINT32_C(0x410FC074),
405 },
406 },
407 {
408 /* MediaTek MT6595: 4x Cortex-A17 + 4x Cortex-A7 */
409 .cores = 8,
410 .series = cpuinfo_arm_chipset_series_mediatek_mt,
411 .model = UINT16_C(6595),
412 .clusters = 2,
413 .cluster_cores = {
414 [0] = 4,
415 [1] = 4,
416 },
417 .cluster_midr = {
418 [0] = UINT32_C(0x410FC075),
419 [1] = UINT32_C(0x410FC0E0),
420 },
421 },
422 #endif
423 {
424 /* MediaTek MT6797: 2x Cortex-A72 + 4x Cortex-A53 + 4x Cortex-A53 */
425 .cores = 10,
426 .series = cpuinfo_arm_chipset_series_mediatek_mt,
427 .model = UINT16_C(6797),
428 .clusters = 3,
429 .cluster_cores = {
430 [0] = 4,
431 [1] = 4,
432 [2] = 2,
433 },
434 .cluster_midr = {
435 [0] = UINT32_C(0x410FD034),
436 [1] = UINT32_C(0x410FD034),
437 [2] = UINT32_C(0x410FD081),
438 },
439 },
440 {
441 /* MediaTek MT6799: 2x Cortex-A73 + 4x Cortex-A53 + 4x Cortex-A35 */
442 .cores = 10,
443 .series = cpuinfo_arm_chipset_series_mediatek_mt,
444 .model = UINT16_C(6799),
445 .clusters = 3,
446 .cluster_cores = {
447 [0] = 4,
448 [1] = 4,
449 [2] = 2,
450 },
451 .cluster_midr = {
452 [0] = UINT32_C(0x410FD041),
453 [1] = UINT32_C(0x410FD034),
454 [2] = UINT32_C(0x410FD092),
455 },
456 },
457 {
458 /* Rockchip RK3399: 2x Cortex-A72 + 4x Cortex-A53 */
459 .cores = 6,
460 .series = cpuinfo_arm_chipset_series_rockchip_rk,
461 .model = UINT16_C(3399),
462 .clusters = 2,
463 .cluster_cores = {
464 [0] = 4,
465 [1] = 2,
466 },
467 .cluster_midr = {
468 [0] = UINT32_C(0x410FD034),
469 [1] = UINT32_C(0x410FD082),
470 },
471 },
472 #if CPUINFO_ARCH_ARM
473 {
474 /* Actions ATM8029: 4x Cortex-A5
475 * Most devices use non-standard /proc/cpuinfo format.
476 */
477 .cores = 4,
478 .series = cpuinfo_arm_chipset_series_actions_atm,
479 .model = UINT16_C(7029),
480 .clusters = 1,
481 .cluster_cores = {
482 [0] = 4,
483 },
484 .cluster_midr = {
485 [0] = UINT32_C(0x410FC051),
486 },
487 },
488 #endif
489 };
490
491 /*
492 * Searches chipset name in mapping of chipset name to cores' MIDR values. If match is successful, initializes MIDR
493 * for all clusters' leaders with tabulated values.
494 *
495 * @param[in] chipset - chipset (SoC) name information.
496 * @param clusters_count - number of CPU core clusters detected in the SoC.
497 * @param cluster_leaders - indices of core clusters' leaders in the @p processors array.
498 * @param processors_count - number of usable logical processors in the system.
499 * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
500 * and decoded core cluster (package_leader_id) information.
501 * Upon successful return, processors[i].midr for all clusters' leaders contains the
502 * tabulated MIDR values.
503 * @param verify_midr - indicated whether the function should check that the MIDR values to be assigned to leaders of
504 * core clusters are consistent with known parts of their parsed values.
505 * Set if to false if the only MIDR value parsed from /proc/cpuinfo is for the last processor
506 * reported in /proc/cpuinfo and thus can't be unambiguously attributed to that processor.
507 *
508 * @retval true if the chipset was found in the mapping and core clusters' leaders initialized with MIDR values.
509 * @retval false if the chipset was not found in the mapping, or any consistency check failed.
510 */
cpuinfo_arm_linux_detect_cluster_midr_by_chipset(const struct cpuinfo_arm_chipset chipset[restrict static1],uint32_t clusters_count,const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],uint32_t processors_count,struct cpuinfo_arm_linux_processor processors[restrict static processors_count],bool verify_midr)511 static bool cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
512 const struct cpuinfo_arm_chipset chipset[restrict static 1],
513 uint32_t clusters_count,
514 const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],
515 uint32_t processors_count,
516 struct cpuinfo_arm_linux_processor processors[restrict static processors_count],
517 bool verify_midr)
518 {
519 if (clusters_count <= CLUSTERS_MAX) {
520 for (uint32_t c = 0; c < CPUINFO_COUNT_OF(cluster_configs); c++) {
521 if (cluster_configs[c].model == chipset->model && cluster_configs[c].series == chipset->series) {
522 /* Verify that the total number of cores and clusters of cores matches expectation */
523 if (cluster_configs[c].cores != processors_count || cluster_configs[c].clusters != clusters_count) {
524 return false;
525 }
526
527 /* Verify that core cluster configuration matches expectation */
528 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
529 const uint32_t cluster_leader = cluster_leaders[cluster];
530 if (cluster_configs[c].cluster_cores[cluster] != processors[cluster_leader].package_processor_count) {
531 return false;
532 }
533 }
534
535 if (verify_midr) {
536 /* Verify known parts of MIDR */
537 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
538 const uint32_t cluster_leader = cluster_leaders[cluster];
539
540 /* Create a mask of known midr bits */
541 uint32_t midr_mask = 0;
542 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
543 midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK;
544 }
545 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
546 midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK;
547 }
548 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) {
549 midr_mask |= CPUINFO_ARM_MIDR_PART_MASK;
550 }
551 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
552 midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK;
553 }
554
555 /* Verify the bits under the mask */
556 if ((processors[cluster_leader].midr ^ cluster_configs[c].cluster_midr[cluster]) & midr_mask) {
557 cpuinfo_log_debug("parsed MIDR of cluster %08"PRIu32" does not match tabulated value %08"PRIu32,
558 processors[cluster_leader].midr, cluster_configs[c].cluster_midr[cluster]);
559 return false;
560 }
561 }
562 }
563
564 /* Assign MIDRs according to tabulated configurations */
565 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
566 const uint32_t cluster_leader = cluster_leaders[cluster];
567 processors[cluster_leader].midr = cluster_configs[c].cluster_midr[cluster];
568 processors[cluster_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
569 cpuinfo_log_debug("cluster %"PRIu32" MIDR = 0x%08"PRIx32, cluster, cluster_configs[c].cluster_midr[cluster]);
570 }
571 return true;
572 }
573 }
574 }
575 return false;
576 }
577
578 /*
579 * Initializes MIDR for leaders of core clusters using a heuristic for big.LITTLE systems:
580 * - If the only known MIDR is for the big core cluster, guess the matching MIDR for the LITTLE cluster.
581 * - Estimate which of the clusters is big using maximum frequency, if known, otherwise using system processor ID.
582 * - Initialize the MIDR for big and LITTLE core clusters using the guesstimates values.
583 *
584 * @param clusters_count - number of CPU core clusters detected in the SoC.
585 * @param cluster_with_midr_count - number of CPU core clusters in the SoC with known MIDR values.
586 * @param last_processor_with_midr - index of the last logical processor with known MIDR in the @p processors array.
587 * @param cluster_leaders - indices of core clusters' leaders in the @p processors array.
588 * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
589 * and decoded core cluster (package_leader_id) information.
590 * Upon successful return, processors[i].midr for all core clusters' leaders contains
591 * the heuristically detected MIDR value.
592 * @param verify_midr - indicated whether the function should check that the MIDR values to be assigned to leaders of
593 * core clusters are consistent with known parts of their parsed values.
594 * Set if to false if the only MIDR value parsed from /proc/cpuinfo is for the last processor
595 * reported in /proc/cpuinfo and thus can't be unambiguously attributed to that processor.
596 *
597 * @retval true if this is a big.LITTLE system with only one known MIDR and the CPU core clusters' leaders were
598 * initialized with MIDR values.
599 * @retval false if this is not a big.LITTLE system.
600 */
cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(uint32_t clusters_count,uint32_t cluster_with_midr_count,uint32_t last_processor_with_midr,const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],struct cpuinfo_arm_linux_processor processors[restrict static last_processor_with_midr],bool verify_midr)601 static bool cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
602 uint32_t clusters_count,
603 uint32_t cluster_with_midr_count,
604 uint32_t last_processor_with_midr,
605 const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],
606 struct cpuinfo_arm_linux_processor processors[restrict static last_processor_with_midr],
607 bool verify_midr)
608 {
609 if (clusters_count != 2 || cluster_with_midr_count != 1) {
610 /* Not a big.LITTLE system, or MIDR is known for both/neither clusters */
611 return false;
612 }
613
614 const uint32_t midr_flags =
615 (processors[processors[last_processor_with_midr].package_leader_id].flags & CPUINFO_ARM_LINUX_VALID_MIDR);
616 const uint32_t big_midr = processors[processors[last_processor_with_midr].package_leader_id].midr;
617 const uint32_t little_midr = midr_little_core_for_big(big_midr);
618
619 /* Default assumption: the first reported cluster is LITTLE cluster (this holds on most Linux kernels) */
620 uint32_t little_cluster_leader = cluster_leaders[0];
621 const uint32_t other_cluster_leader = cluster_leaders[1];
622 /* If maximum frequency is known for both clusters, assume LITTLE cluster is the one with lower frequency */
623 if (processors[little_cluster_leader].flags & processors[other_cluster_leader].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
624 if (processors[little_cluster_leader].max_frequency > processors[other_cluster_leader].max_frequency) {
625 little_cluster_leader = other_cluster_leader;
626 }
627 }
628
629 if (verify_midr) {
630 /* Verify known parts of MIDR */
631 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
632 const uint32_t cluster_leader = cluster_leaders[cluster];
633
634 /* Create a mask of known midr bits */
635 uint32_t midr_mask = 0;
636 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
637 midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK;
638 }
639 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
640 midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK;
641 }
642 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) {
643 midr_mask |= CPUINFO_ARM_MIDR_PART_MASK;
644 }
645 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
646 midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK;
647 }
648
649 /* Verify the bits under the mask */
650 const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr;
651 if ((processors[cluster_leader].midr ^ midr) & midr_mask) {
652 cpuinfo_log_debug(
653 "parsed MIDR %08"PRIu32" of cluster leader %"PRIu32" is inconsistent with expected value %08"PRIu32,
654 processors[cluster_leader].midr, cluster_leader, midr);
655 return false;
656 }
657 }
658 }
659
660 for (uint32_t c = 0; c < clusters_count; c++) {
661 /* Skip cluster with already assigned MIDR */
662 const uint32_t cluster_leader = cluster_leaders[c];
663 if (bitmask_all(processors[cluster_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
664 continue;
665 }
666
667 const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr;
668 cpuinfo_log_info("assume processor %"PRIu32" to have MIDR %08"PRIx32, cluster_leader, midr);
669 /* To be consistent, we copy the MIDR entirely, rather than by parts */
670 processors[cluster_leader].midr = midr;
671 processors[cluster_leader].flags |= midr_flags;
672 }
673 return true;
674 }
675
676 /*
677 * Initializes MIDR for leaders of core clusters in a single sequential scan:
678 * - Clusters preceeding the first reported MIDR value are assumed to have default MIDR value.
679 * - Clusters following any reported MIDR value to have that MIDR value.
680 *
681 * @param default_midr - MIDR value that will be assigned to cluster leaders preceeding any reported MIDR value.
682 * @param processors_count - number of logical processor descriptions in the @p processors array.
683 * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
684 * and decoded core cluster (package_leader_id) information.
685 * Upon successful return, processors[i].midr for all core clusters' leaders contains
686 * the assigned MIDR value.
687 */
cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(uint32_t default_midr,uint32_t processors_count,struct cpuinfo_arm_linux_processor processors[restrict static processors_count])688 static void cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
689 uint32_t default_midr,
690 uint32_t processors_count,
691 struct cpuinfo_arm_linux_processor processors[restrict static processors_count])
692 {
693 uint32_t midr = default_midr;
694 for (uint32_t i = 0; i < processors_count; i++) {
695 if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
696 if (processors[i].package_leader_id == i) {
697 if (bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
698 midr = processors[i].midr;
699 } else {
700 cpuinfo_log_info("assume processor %"PRIu32" to have MIDR %08"PRIx32, i, midr);
701 /* To be consistent, we copy the MIDR entirely, rather than by parts */
702 processors[i].midr = midr;
703 processors[i].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
704 }
705 }
706 }
707 }
708 }
709
710 /*
711 * Detects MIDR of each CPU core clusters' leader.
712 *
713 * @param[in] chipset - chipset (SoC) name information.
714 * @param max_processors - number of processor descriptions in the @p processors array.
715 * @param usable_processors - number of processor descriptions in the @p processors array with both POSSIBLE and
716 * PRESENT flags.
717 * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
718 * and decoded core cluster (package_leader_id) information.
719 * Upon return, processors[i].midr for all clusters' leaders contains the MIDR value.
720 *
721 * @returns The number of core clusters
722 */
cpuinfo_arm_linux_detect_cluster_midr(const struct cpuinfo_arm_chipset chipset[restrict static1],uint32_t max_processors,uint32_t usable_processors,struct cpuinfo_arm_linux_processor processors[restrict static max_processors])723 uint32_t cpuinfo_arm_linux_detect_cluster_midr(
724 const struct cpuinfo_arm_chipset chipset[restrict static 1],
725 uint32_t max_processors,
726 uint32_t usable_processors,
727 struct cpuinfo_arm_linux_processor processors[restrict static max_processors])
728 {
729 uint32_t clusters_count = 0;
730 uint32_t cluster_leaders[CLUSTERS_MAX];
731 uint32_t last_processor_in_cpuinfo = max_processors;
732 uint32_t last_processor_with_midr = max_processors;
733 uint32_t processors_with_midr_count = 0;
734 for (uint32_t i = 0; i < max_processors; i++) {
735 if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
736 if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR) {
737 last_processor_in_cpuinfo = i;
738 }
739 if (bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_IMPLEMENTER | CPUINFO_ARM_LINUX_VALID_PART)) {
740 last_processor_with_midr = i;
741 processors_with_midr_count += 1;
742 }
743 const uint32_t group_leader = processors[i].package_leader_id;
744 if (group_leader == i) {
745 if (clusters_count < CLUSTERS_MAX) {
746 cluster_leaders[clusters_count] = i;
747 }
748 clusters_count += 1;
749 } else {
750 /* Copy known bits of information to cluster leader */
751
752 if ((processors[i].flags & ~processors[group_leader].flags) & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
753 processors[group_leader].max_frequency = processors[i].max_frequency;
754 processors[group_leader].flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
755 }
756 if (!bitmask_all(processors[group_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR) &&
757 bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR))
758 {
759 processors[group_leader].midr = processors[i].midr;
760 processors[group_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
761 }
762 }
763 }
764 }
765 cpuinfo_log_debug("detected %"PRIu32" core clusters", clusters_count);
766
767 /*
768 * Two relations between reported /proc/cpuinfo information, and cores is possible:
769 * - /proc/cpuinfo reports information for all or some of the cores below the corresponding
770 * "processor : <number>" lines. Information on offline cores may be missing.
771 * - /proc/cpuinfo reports information only once, after all "processor : <number>" lines.
772 * The reported information may relate to processor #0 or to the processor which
773 * executed the system calls to read /proc/cpuinfo. It is also indistinguishable
774 * from /proc/cpuinfo reporting information only for the last core (e.g. if all other
775 * cores are offline).
776 *
777 * We detect the second case by checking if /proc/cpuinfo contains valid MIDR only for one,
778 * last reported, processor. Note, that the last reported core may be not the last
779 * present & possible processor, as /proc/cpuinfo may non-report high-index offline cores.
780 */
781 if (processors_with_midr_count == 1 && last_processor_in_cpuinfo == last_processor_with_midr && clusters_count > 1) {
782 /*
783 * There are multiple core clusters, but /proc/cpuinfo reported MIDR only for one
784 * processor, and we don't even know which logical processor this information refers to.
785 *
786 * We make three attempts to detect MIDR for all clusters:
787 * 1. Search tabulated MIDR values for chipsets which have heterogeneous clusters and ship with Linux
788 * kernels which do not always report all cores in /proc/cpuinfo. If found, use the tabulated values.
789 * 2. For systems with 2 clusters and MIDR known for one cluster, assume big.LITTLE configuration,
790 * and estimate MIDR for the other cluster under assumption that MIDR for the big cluster is known.
791 * 3. Initialize MIDRs for all core clusters to the only parsed MIDR value.
792 */
793 cpuinfo_log_debug("the only reported MIDR can not be attributed to a particular processor");
794
795 if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
796 chipset, clusters_count, cluster_leaders, usable_processors, processors, false))
797 {
798 return clusters_count;
799 }
800
801 /* Try big.LITTLE heuristic */
802 if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
803 clusters_count, 1, last_processor_with_midr,
804 cluster_leaders, processors, false))
805 {
806 return clusters_count;
807 }
808
809 /* Fall back to sequential initialization of MIDR values for core clusters */
810 cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
811 processors[processors[last_processor_with_midr].package_leader_id].midr,
812 max_processors, processors);
813 } else if (processors_with_midr_count < usable_processors) {
814 /*
815 * /proc/cpuinfo reported MIDR only for some processors, and probably some core clusters do not have MIDR
816 * for any of the cores. Check if this is the case.
817 */
818 uint32_t clusters_with_midr_count = 0;
819 for (uint32_t i = 0; i < max_processors; i++) {
820 if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID | CPUINFO_ARM_LINUX_VALID_MIDR)) {
821 if (processors[i].package_leader_id == i) {
822 clusters_with_midr_count += 1;
823 }
824 }
825 }
826
827 if (clusters_with_midr_count < clusters_count) {
828 /*
829 * /proc/cpuinfo reported MIDR only for some clusters, need to reconstruct others.
830 * We make three attempts to detect MIDR for clusters without it:
831 * 1. Search tabulated MIDR values for chipsets which have heterogeneous clusters and ship with Linux
832 * kernels which do not always report all cores in /proc/cpuinfo. If found, use the tabulated values.
833 * 2. For systems with 2 clusters and MIDR known for one cluster, assume big.LITTLE configuration,
834 * and estimate MIDR for the other cluster under assumption that MIDR for the big cluster is known.
835 * 3. Initialize MIDRs for core clusters in a single sequential scan:
836 * - Clusters preceeding the first reported MIDR value are assumed to have the last reported MIDR value.
837 * - Clusters following any reported MIDR value to have that MIDR value.
838 */
839
840 if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
841 chipset, clusters_count, cluster_leaders, usable_processors, processors, true))
842 {
843 return clusters_count;
844 }
845
846 if (last_processor_with_midr != max_processors) {
847 /* Try big.LITTLE heuristic */
848 if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
849 clusters_count, processors_with_midr_count, last_processor_with_midr,
850 cluster_leaders, processors, true))
851 {
852 return clusters_count;
853 }
854
855 /* Fall back to sequential initialization of MIDR values for core clusters */
856 cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
857 processors[processors[last_processor_with_midr].package_leader_id].midr,
858 max_processors, processors);
859 }
860 }
861 }
862 return clusters_count;
863 }
864