1 #include <stddef.h>
2 #include <stdint.h>
3 #include <stdlib.h>
4 #include <string.h>
5
6 #include <arm/linux/api.h>
7 #include <cpuinfo.h>
8 #if defined(__ANDROID__)
9 #include <arm/android/api.h>
10 #endif
11 #include <arm/api.h>
12 #include <arm/midr.h>
13 #include <cpuinfo/common.h>
14 #include <cpuinfo/internal-api.h>
15 #include <cpuinfo/log.h>
16 #include <linux/api.h>
17
18 #define CLUSTERS_MAX 3
19
bitmask_all(uint32_t bitfield,uint32_t mask)20 static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
21 return (bitfield & mask) == mask;
22 }
23
24 /* Description of core clusters configuration in a chipset (identified by series
25 * and model number) */
26 struct cluster_config {
27 /* Number of cores (logical processors) */
28 uint8_t cores;
29 /* ARM chipset series (see cpuinfo_arm_chipset_series enum) */
30 uint8_t series;
31 /* Chipset model number (see cpuinfo_arm_chipset struct) */
32 uint16_t model;
33 /* Number of heterogenous clusters in the CPU package */
34 uint8_t clusters;
35 /*
36 * Number of cores in each cluster:
37 # - Symmetric configurations: [0] = # cores
38 * - big.LITTLE configurations: [0] = # LITTLE cores, [1] = # big cores
39 * - Max.Med.Min configurations: [0] = # Min cores, [1] = # Med cores,
40 [2] = # Max cores
41 */
42 uint8_t cluster_cores[CLUSTERS_MAX];
43 /*
44 * MIDR of cores in each cluster:
45 * - Symmetric configurations: [0] = core MIDR
46 * - big.LITTLE configurations: [0] = LITTLE core MIDR, [1] = big core
47 * MIDR
48 * - Max.Med.Min configurations: [0] = Min core MIDR, [1] = Med core
49 * MIDR, [2] = Max core MIDR
50 */
51 uint32_t cluster_midr[CLUSTERS_MAX];
52 };
53
54 /*
55 * The list of chipsets where MIDR may not be unambigiously decoded at least on
56 * some devices. The typical reasons for impossibility to decoded MIDRs are
57 * buggy kernels, which either do not report all MIDR information (e.g. on
58 * ATM7029 kernel doesn't report CPU Part), or chipsets have more than one type
59 * of cores (i.e. 4x Cortex-A53 + 4x Cortex-A53 is out) and buggy kernels report
60 * MIDR information only about some cores in /proc/cpuinfo (either only online
61 * cores, or only the core that reads /proc/cpuinfo). On these kernels/chipsets,
62 * it is not possible to detect all core types by just parsing /proc/cpuinfo, so
63 * we use chipset name and this table to find their MIDR (and thus
64 * microarchitecture, cache, etc).
65 *
66 * Note: not all chipsets with heterogeneous multiprocessing need an entry in
67 * this table. The following HMP chipsets always list information about all
68 * cores in /proc/cpuinfo:
69 *
70 * - Snapdragon 660
71 * - Snapdragon 820 (MSM8996)
72 * - Snapdragon 821 (MSM8996PRO)
73 * - Snapdragon 835 (MSM8998)
74 * - Exynos 8895
75 * - Kirin 960
76 *
77 * As these are all new processors, there is hope that this table won't
78 * uncontrollably grow over time.
79 */
80 static const struct cluster_config
81 cluster_configs[] =
82 {
83 #if CPUINFO_ARCH_ARM
84 {
85 /*
86 * MSM8916 (Snapdragon 410): 4x Cortex-A53
87 * Some AArch32 phones use non-standard /proc/cpuinfo format.
88 */
89 .cores = 4,
90 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
91 .model = UINT16_C(8916),
92 .clusters = 1,
93 .cluster_cores =
94 {
95 [0] = 4,
96 },
97 .cluster_midr =
98 {
99 [0] = UINT32_C(0x410FD030),
100 },
101 },
102 {
103 /*
104 * MSM8939 (Snapdragon 615): 4x Cortex-A53 + 4x Cortex-A53
105 * Some AArch32 phones use non-standard /proc/cpuinfo format.
106 */
107 .cores = 8,
108 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
109 .model = UINT16_C(8939),
110 .clusters = 2,
111 .cluster_cores =
112 {
113 [0] = 4,
114 [1] = 4,
115 },
116 .cluster_midr =
117 {
118 [0] = UINT32_C(0x410FD034),
119 [1] = UINT32_C(0x410FD034),
120 },
121 },
122 #endif
123 {
124 /* MSM8956 (Snapdragon 650): 2x Cortex-A72 + 4x Cortex-A53 */
125 .cores = 6,
126 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
127 .model = UINT16_C(8956),
128 .clusters = 2,
129 .cluster_cores =
130 {
131 [0] = 4,
132 [1] = 2,
133 },
134 .cluster_midr =
135 {
136 [0] = UINT32_C(0x410FD034),
137 [1] = UINT32_C(0x410FD080),
138 },
139 },
140 {
141 /* MSM8976/MSM8976PRO (Snapdragon 652/653): 4x Cortex-A72 + 4x
142 Cortex-A53 */
143 .cores = 8,
144 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
145 .model = UINT16_C(8976),
146 .clusters = 2,
147 .cluster_cores =
148 {
149 [0] = 4,
150 [1] = 4,
151 },
152 .cluster_midr =
153 {
154 [0] = UINT32_C(0x410FD034),
155 [1] = UINT32_C(0x410FD080),
156 },
157 },
158 {
159 /* MSM8992 (Snapdragon 808): 2x Cortex-A57 + 4x Cortex-A53 */
160 .cores = 6,
161 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
162 .model = UINT16_C(8992),
163 .clusters = 2,
164 .cluster_cores =
165 {
166 [0] = 4,
167 [1] = 2,
168 },
169 .cluster_midr =
170 {
171 [0] = UINT32_C(0x410FD033),
172 [1] = UINT32_C(0x411FD072),
173 },
174 },
175 {
176 /* MSM8994/MSM8994V (Snapdragon 810): 4x Cortex-A57 + 4x
177 Cortex-A53 */
178 .cores = 8,
179 .series = cpuinfo_arm_chipset_series_qualcomm_msm,
180 .model = UINT16_C(8994),
181 .clusters = 2,
182 .cluster_cores =
183 {
184 [0] = 4,
185 [1] = 4,
186 },
187 .cluster_midr =
188 {
189 [0] = UINT32_C(0x410FD032),
190 [1] = UINT32_C(0x411FD071),
191 },
192 },
193 #if CPUINFO_ARCH_ARM
194 {
195 /* Exynos 5422: 4x Cortex-A15 + 4x Cortex-A7 */
196 .cores = 8,
197 .series = cpuinfo_arm_chipset_series_samsung_exynos,
198 .model = UINT16_C(5422),
199 .clusters = 2,
200 .cluster_cores =
201 {
202 [0] = 4,
203 [1] = 4,
204 },
205 .cluster_midr =
206 {
207 [0] = UINT32_C(0x410FC073),
208 [1] = UINT32_C(0x412FC0F3),
209 },
210 },
211 {
212 /* Exynos 5430: 4x Cortex-A15 + 4x Cortex-A7 */
213 .cores = 8,
214 .series = cpuinfo_arm_chipset_series_samsung_exynos,
215 .model = UINT16_C(5430),
216 .clusters = 2,
217 .cluster_cores =
218 {
219 [0] = 4,
220 [1] = 4,
221 },
222 .cluster_midr =
223 {
224 [0] = UINT32_C(0x410FC074),
225 [1] = UINT32_C(0x413FC0F3),
226 },
227 },
228 #endif /* CPUINFO_ARCH_ARM */
229 {
230 /* Exynos 5433: 4x Cortex-A57 + 4x Cortex-A53 */
231 .cores = 8,
232 .series = cpuinfo_arm_chipset_series_samsung_exynos,
233 .model = UINT16_C(5433),
234 .clusters = 2,
235 .cluster_cores =
236 {
237 [0] = 4,
238 [1] = 4,
239 },
240 .cluster_midr =
241 {
242 [0] = UINT32_C(0x410FD031),
243 [1] = UINT32_C(0x411FD070),
244 },
245 },
246 {
247 /* Exynos 7420: 4x Cortex-A57 + 4x Cortex-A53 */
248 .cores = 8,
249 .series = cpuinfo_arm_chipset_series_samsung_exynos,
250 .model = UINT16_C(7420),
251 .clusters = 2,
252 .cluster_cores =
253 {
254 [0] = 4,
255 [1] = 4,
256 },
257 .cluster_midr =
258 {
259 [0] = UINT32_C(0x410FD032),
260 [1] = UINT32_C(0x411FD070),
261 },
262 },
263 {
264 /* Exynos 8890: 4x Exynos M1 + 4x Cortex-A53 */
265 .cores = 8,
266 .series = cpuinfo_arm_chipset_series_samsung_exynos,
267 .model = UINT16_C(8890),
268 .clusters = 2,
269 .cluster_cores =
270 {
271 [0] = 4,
272 [1] = 4,
273 },
274 .cluster_midr =
275 {
276 [0] = UINT32_C(0x410FD034),
277 [1] = UINT32_C(0x531F0011),
278 },
279 },
280 #if CPUINFO_ARCH_ARM
281 {
282 /* Kirin 920: 4x Cortex-A15 + 4x Cortex-A7 */
283 .cores = 8,
284 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
285 .model = UINT16_C(920),
286 .clusters = 2,
287 .cluster_cores =
288 {
289 [0] = 4,
290 [1] = 4,
291 },
292 .cluster_midr =
293 {
294 [0] = UINT32_C(0x410FC075),
295 [1] = UINT32_C(0x413FC0F3),
296 },
297 },
298 {
299 /* Kirin 925: 4x Cortex-A15 + 4x Cortex-A7 */
300 .cores = 8,
301 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
302 .model = UINT16_C(925),
303 .clusters = 2,
304 .cluster_cores =
305 {
306 [0] = 4,
307 [1] = 4,
308 },
309 .cluster_midr =
310 {
311 [0] = UINT32_C(0x410FC075),
312 [1] = UINT32_C(0x413FC0F3),
313 },
314 },
315 {
316 /* Kirin 928: 4x Cortex-A15 + 4x Cortex-A7 */
317 .cores = 8,
318 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
319 .model = UINT16_C(928),
320 .clusters = 2,
321 .cluster_cores =
322 {
323 [0] = 4,
324 [1] = 4,
325 },
326 .cluster_midr =
327 {
328 [0] = UINT32_C(0x410FC075),
329 [1] = UINT32_C(0x413FC0F3),
330 },
331 },
332 #endif /* CPUINFO_ARCH_ARM */
333 {
334 /* Kirin 950: 4x Cortex-A72 + 4x Cortex-A53 */
335 .cores = 8,
336 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
337 .model = UINT16_C(950),
338 .clusters = 2,
339 .cluster_cores =
340 {
341 [0] = 4,
342 [1] = 4,
343 },
344 .cluster_midr =
345 {
346 [0] = UINT32_C(0x410FD034),
347 [1] = UINT32_C(0x410FD080),
348 },
349 },
350 {
351 /* Kirin 955: 4x Cortex-A72 + 4x Cortex-A53 */
352 .cores = 8,
353 .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
354 .model = UINT16_C(955),
355 .clusters = 2,
356 .cluster_cores =
357 {
358 [0] = 4,
359 [1] = 4,
360 },
361 .cluster_midr =
362 {
363 [0] = UINT32_C(0x410FD034),
364 [1] = UINT32_C(0x410FD080),
365 },
366 },
367 #if CPUINFO_ARCH_ARM
368 {
369 /* MediaTek MT8135: 2x Cortex-A7 + 2x Cortex-A15 */
370 .cores = 4,
371 .series = cpuinfo_arm_chipset_series_mediatek_mt,
372 .model = UINT16_C(8135),
373 .clusters = 2,
374 .cluster_cores =
375 {
376 [0] = 2,
377 [1] = 2,
378 },
379 .cluster_midr =
380 {
381 [0] = UINT32_C(0x410FC073),
382 [1] = UINT32_C(0x413FC0F2),
383 },
384 },
385 #endif
386 {
387 /* MediaTek MT8173: 2x Cortex-A72 + 2x Cortex-A53 */
388 .cores = 4,
389 .series = cpuinfo_arm_chipset_series_mediatek_mt,
390 .model = UINT16_C(8173),
391 .clusters = 2,
392 .cluster_cores =
393 {
394 [0] = 2,
395 [1] = 2,
396 },
397 .cluster_midr =
398 {
399 [0] = UINT32_C(0x410FD032),
400 [1] = UINT32_C(0x410FD080),
401 },
402 },
403 {
404 /* MediaTek MT8176: 2x Cortex-A72 + 4x Cortex-A53 */
405 .cores = 6,
406 .series = cpuinfo_arm_chipset_series_mediatek_mt,
407 .model = UINT16_C(8176),
408 .clusters = 2,
409 .cluster_cores =
410 {
411 [0] = 4,
412 [1] = 2,
413 },
414 .cluster_midr =
415 {
416 [0] = UINT32_C(0x410FD032),
417 [1] = UINT32_C(0x410FD080),
418 },
419 },
420 #if CPUINFO_ARCH_ARM64
421 {
422 /*
423 * MediaTek MT8735: 4x Cortex-A53
424 * Some AArch64 phones use non-standard /proc/cpuinfo format.
425 */
426 .cores = 4,
427 .series = cpuinfo_arm_chipset_series_mediatek_mt,
428 .model = UINT16_C(8735),
429 .clusters = 1,
430 .cluster_cores =
431 {
432 [0] = 4,
433 },
434 .cluster_midr =
435 {
436 [0] = UINT32_C(0x410FD034),
437 },
438 },
439 #endif
440 #if CPUINFO_ARCH_ARM
441 {
442 /*
443 * MediaTek MT6592: 4x Cortex-A7 + 4x Cortex-A7
444 * Some phones use non-standard /proc/cpuinfo format.
445 */
446 .cores = 4,
447 .series = cpuinfo_arm_chipset_series_mediatek_mt,
448 .model = UINT16_C(6592),
449 .clusters = 2,
450 .cluster_cores =
451 {
452 [0] = 4,
453 [1] = 4,
454 },
455 .cluster_midr =
456 {
457 [0] = UINT32_C(0x410FC074),
458 [1] = UINT32_C(0x410FC074),
459 },
460 },
461 {
462 /* MediaTek MT6595: 4x Cortex-A17 + 4x Cortex-A7 */
463 .cores = 8,
464 .series = cpuinfo_arm_chipset_series_mediatek_mt,
465 .model = UINT16_C(6595),
466 .clusters = 2,
467 .cluster_cores =
468 {
469 [0] = 4,
470 [1] = 4,
471 },
472 .cluster_midr =
473 {
474 [0] = UINT32_C(0x410FC075),
475 [1] = UINT32_C(0x410FC0E0),
476 },
477 },
478 #endif
479 {
480 /* MediaTek MT6797: 2x Cortex-A72 + 4x Cortex-A53 + 4x
481 Cortex-A53 */
482 .cores = 10,
483 .series = cpuinfo_arm_chipset_series_mediatek_mt,
484 .model = UINT16_C(6797),
485 .clusters = 3,
486 .cluster_cores =
487 {
488 [0] = 4,
489 [1] = 4,
490 [2] = 2,
491 },
492 .cluster_midr =
493 {
494 [0] = UINT32_C(0x410FD034),
495 [1] = UINT32_C(0x410FD034),
496 [2] = UINT32_C(0x410FD081),
497 },
498 },
499 {
500 /* MediaTek MT6799: 2x Cortex-A73 + 4x Cortex-A53 + 4x
501 Cortex-A35 */
502 .cores = 10,
503 .series = cpuinfo_arm_chipset_series_mediatek_mt,
504 .model = UINT16_C(6799),
505 .clusters = 3,
506 .cluster_cores =
507 {
508 [0] = 4,
509 [1] = 4,
510 [2] = 2,
511 },
512 .cluster_midr =
513 {
514 [0] = UINT32_C(0x410FD041),
515 [1] = UINT32_C(0x410FD034),
516 [2] = UINT32_C(0x410FD092),
517 },
518 },
519 {
520 /* Rockchip RK3399: 2x Cortex-A72 + 4x Cortex-A53 */
521 .cores = 6,
522 .series = cpuinfo_arm_chipset_series_rockchip_rk,
523 .model = UINT16_C(3399),
524 .clusters = 2,
525 .cluster_cores =
526 {
527 [0] = 4,
528 [1] = 2,
529 },
530 .cluster_midr =
531 {
532 [0] = UINT32_C(0x410FD034),
533 [1] = UINT32_C(0x410FD082),
534 },
535 },
536 #if CPUINFO_ARCH_ARM
537 {
538 /* Actions ATM8029: 4x Cortex-A5
539 * Most devices use non-standard /proc/cpuinfo format.
540 */
541 .cores = 4,
542 .series = cpuinfo_arm_chipset_series_actions_atm,
543 .model = UINT16_C(7029),
544 .clusters = 1,
545 .cluster_cores =
546 {
547 [0] = 4,
548 },
549 .cluster_midr =
550 {
551 [0] = UINT32_C(0x410FC051),
552 },
553 },
554 #endif
555 };
556
557 /*
558 * Searches chipset name in mapping of chipset name to cores' MIDR values. If
559 * match is successful, initializes MIDR for all clusters' leaders with
560 * tabulated values.
561 *
562 * @param[in] chipset - chipset (SoC) name information.
563 * @param clusters_count - number of CPU core clusters detected in the SoC.
564 * @param cluster_leaders - indices of core clusters' leaders in the @p
565 * processors array.
566 * @param processors_count - number of usable logical processors in the system.
567 * @param[in,out] processors - array of logical processor descriptions with
568 * pre-parsed MIDR, maximum frequency, and decoded core cluster
569 * (package_leader_id) information. Upon successful return, processors[i].midr
570 * for all clusters' leaders contains the tabulated MIDR values.
571 * @param verify_midr - indicated whether the function should check that the
572 * MIDR values to be assigned to leaders of core clusters are consistent with
573 * known parts of their parsed values. Set if to false if the only MIDR value
574 * parsed from /proc/cpuinfo is for the last processor reported in /proc/cpuinfo
575 * and thus can't be unambiguously attributed to that processor.
576 *
577 * @retval true if the chipset was found in the mapping and core clusters'
578 * leaders initialized with MIDR values.
579 * @retval false if the chipset was not found in the mapping, or any consistency
580 * check failed.
581 */
cpuinfo_arm_linux_detect_cluster_midr_by_chipset(const struct cpuinfo_arm_chipset chipset[restrict static1],uint32_t clusters_count,const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],uint32_t processors_count,struct cpuinfo_arm_linux_processor processors[restrict static processors_count],bool verify_midr)582 static bool cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
583 const struct cpuinfo_arm_chipset chipset[restrict static 1],
584 uint32_t clusters_count,
585 const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],
586 uint32_t processors_count,
587 struct cpuinfo_arm_linux_processor processors[restrict static processors_count],
588 bool verify_midr) {
589 if (clusters_count <= CLUSTERS_MAX) {
590 for (uint32_t c = 0; c < CPUINFO_COUNT_OF(cluster_configs); c++) {
591 if (cluster_configs[c].model == chipset->model &&
592 cluster_configs[c].series == chipset->series) {
593 /* Verify that the total number of cores and
594 * clusters of cores matches expectation */
595 if (cluster_configs[c].cores != processors_count ||
596 cluster_configs[c].clusters != clusters_count) {
597 return false;
598 }
599
600 /* Verify that core cluster configuration
601 * matches expectation */
602 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
603 const uint32_t cluster_leader = cluster_leaders[cluster];
604 if (cluster_configs[c].cluster_cores[cluster] !=
605 processors[cluster_leader].package_processor_count) {
606 return false;
607 }
608 }
609
610 if (verify_midr) {
611 /* Verify known parts of MIDR */
612 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
613 const uint32_t cluster_leader = cluster_leaders[cluster];
614
615 /* Create a mask of known midr
616 * bits */
617 uint32_t midr_mask = 0;
618 if (processors[cluster_leader].flags &
619 CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
620 midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK;
621 }
622 if (processors[cluster_leader].flags &
623 CPUINFO_ARM_LINUX_VALID_VARIANT) {
624 midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK;
625 }
626 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) {
627 midr_mask |= CPUINFO_ARM_MIDR_PART_MASK;
628 }
629 if (processors[cluster_leader].flags &
630 CPUINFO_ARM_LINUX_VALID_REVISION) {
631 midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK;
632 }
633
634 /* Verify the bits under the
635 * mask */
636 if ((processors[cluster_leader].midr ^
637 cluster_configs[c].cluster_midr[cluster]) &
638 midr_mask) {
639 cpuinfo_log_debug(
640 "parsed MIDR of cluster %08" PRIu32
641 " does not match tabulated value %08" PRIu32,
642 processors[cluster_leader].midr,
643 cluster_configs[c].cluster_midr[cluster]);
644 return false;
645 }
646 }
647 }
648
649 /* Assign MIDRs according to tabulated
650 * configurations */
651 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
652 const uint32_t cluster_leader = cluster_leaders[cluster];
653 processors[cluster_leader].midr = cluster_configs[c].cluster_midr[cluster];
654 processors[cluster_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
655 cpuinfo_log_debug(
656 "cluster %" PRIu32 " MIDR = 0x%08" PRIx32,
657 cluster,
658 cluster_configs[c].cluster_midr[cluster]);
659 }
660 return true;
661 }
662 }
663 }
664 return false;
665 }
666
667 /*
668 * Initializes MIDR for leaders of core clusters using a heuristic for
669 * big.LITTLE systems:
670 * - If the only known MIDR is for the big core cluster, guess the matching MIDR
671 * for the LITTLE cluster.
672 * - Estimate which of the clusters is big using maximum frequency, if known,
673 * otherwise using system processor ID.
674 * - Initialize the MIDR for big and LITTLE core clusters using the guesstimates
675 * values.
676 *
677 * @param clusters_count - number of CPU core clusters detected in the SoC.
678 * @param cluster_with_midr_count - number of CPU core clusters in the SoC with
679 * known MIDR values.
680 * @param last_processor_with_midr - index of the last logical processor with
681 * known MIDR in the @p processors array.
682 * @param cluster_leaders - indices of core clusters' leaders in the @p
683 * processors array.
684 * @param[in,out] processors - array of logical processor descriptions with
685 * pre-parsed MIDR, maximum frequency, and decoded core cluster
686 * (package_leader_id) information. Upon successful return, processors[i].midr
687 * for all core clusters' leaders contains the heuristically detected MIDR
688 * value.
689 * @param verify_midr - indicated whether the function should check that the
690 * MIDR values to be assigned to leaders of core clusters are consistent with
691 * known parts of their parsed values. Set if to false if the only MIDR value
692 * parsed from /proc/cpuinfo is for the last processor reported in /proc/cpuinfo
693 * and thus can't be unambiguously attributed to that processor.
694 *
695 * @retval true if this is a big.LITTLE system with only one known MIDR and the
696 * CPU core clusters' leaders were initialized with MIDR values.
697 * @retval false if this is not a big.LITTLE system.
698 */
cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(uint32_t clusters_count,uint32_t cluster_with_midr_count,uint32_t last_processor_with_midr,const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],struct cpuinfo_arm_linux_processor processors[restrict static last_processor_with_midr],bool verify_midr)699 static bool cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
700 uint32_t clusters_count,
701 uint32_t cluster_with_midr_count,
702 uint32_t last_processor_with_midr,
703 const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],
704 struct cpuinfo_arm_linux_processor processors[restrict static last_processor_with_midr],
705 bool verify_midr) {
706 if (clusters_count != 2 || cluster_with_midr_count != 1) {
707 /* Not a big.LITTLE system, or MIDR is known for both/neither
708 * clusters */
709 return false;
710 }
711
712 const uint32_t midr_flags =
713 (processors[processors[last_processor_with_midr].package_leader_id].flags &
714 CPUINFO_ARM_LINUX_VALID_MIDR);
715 const uint32_t big_midr = processors[processors[last_processor_with_midr].package_leader_id].midr;
716 const uint32_t little_midr = midr_little_core_for_big(big_midr);
717
718 /* Default assumption: the first reported cluster is LITTLE cluster
719 * (this holds on most Linux kernels) */
720 uint32_t little_cluster_leader = cluster_leaders[0];
721 const uint32_t other_cluster_leader = cluster_leaders[1];
722 /* If maximum frequency is known for both clusters, assume LITTLE
723 * cluster is the one with lower frequency */
724 if (processors[little_cluster_leader].flags & processors[other_cluster_leader].flags &
725 CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
726 if (processors[little_cluster_leader].max_frequency > processors[other_cluster_leader].max_frequency) {
727 little_cluster_leader = other_cluster_leader;
728 }
729 }
730
731 if (verify_midr) {
732 /* Verify known parts of MIDR */
733 for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
734 const uint32_t cluster_leader = cluster_leaders[cluster];
735
736 /* Create a mask of known midr bits */
737 uint32_t midr_mask = 0;
738 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
739 midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK;
740 }
741 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
742 midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK;
743 }
744 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) {
745 midr_mask |= CPUINFO_ARM_MIDR_PART_MASK;
746 }
747 if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
748 midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK;
749 }
750
751 /* Verify the bits under the mask */
752 const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr;
753 if ((processors[cluster_leader].midr ^ midr) & midr_mask) {
754 cpuinfo_log_debug(
755 "parsed MIDR %08" PRIu32 " of cluster leader %" PRIu32
756 " is inconsistent with expected value %08" PRIu32,
757 processors[cluster_leader].midr,
758 cluster_leader,
759 midr);
760 return false;
761 }
762 }
763 }
764
765 for (uint32_t c = 0; c < clusters_count; c++) {
766 /* Skip cluster with already assigned MIDR */
767 const uint32_t cluster_leader = cluster_leaders[c];
768 if (bitmask_all(processors[cluster_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
769 continue;
770 }
771
772 const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr;
773 cpuinfo_log_info("assume processor %" PRIu32 " to have MIDR %08" PRIx32, cluster_leader, midr);
774 /* To be consistent, we copy the MIDR entirely, rather than by
775 * parts */
776 processors[cluster_leader].midr = midr;
777 processors[cluster_leader].flags |= midr_flags;
778 }
779 return true;
780 }
781
782 /*
783 * Initializes MIDR for leaders of core clusters in a single sequential scan:
784 * - Clusters preceding the first reported MIDR value are assumed to have
785 * default MIDR value.
786 * - Clusters following any reported MIDR value to have that MIDR value.
787 *
788 * @param default_midr - MIDR value that will be assigned to cluster leaders
789 * preceding any reported MIDR value.
790 * @param processors_count - number of logical processor descriptions in the @p
791 * processors array.
792 * @param[in,out] processors - array of logical processor descriptions with
793 * pre-parsed MIDR, maximum frequency, and decoded core cluster
794 * (package_leader_id) information. Upon successful return, processors[i].midr
795 * for all core clusters' leaders contains the assigned MIDR value.
796 */
cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(uint32_t default_midr,uint32_t processors_count,struct cpuinfo_arm_linux_processor processors[restrict static processors_count])797 static void cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
798 uint32_t default_midr,
799 uint32_t processors_count,
800 struct cpuinfo_arm_linux_processor processors[restrict static processors_count]) {
801 uint32_t midr = default_midr;
802 for (uint32_t i = 0; i < processors_count; i++) {
803 if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
804 if (processors[i].package_leader_id == i) {
805 if (bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
806 midr = processors[i].midr;
807 } else {
808 cpuinfo_log_info(
809 "assume processor %" PRIu32 " to have MIDR %08" PRIx32, i, midr);
810 /* To be consistent, we copy the MIDR
811 * entirely, rather than by parts
812 */
813 processors[i].midr = midr;
814 processors[i].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
815 }
816 }
817 }
818 }
819 }
820
821 /*
822 * Detects MIDR of each CPU core clusters' leader.
823 *
824 * @param[in] chipset - chipset (SoC) name information.
825 * @param max_processors - number of processor descriptions in the @p processors
826 * array.
827 * @param usable_processors - number of processor descriptions in the @p
828 * processors array with both POSSIBLE and PRESENT flags.
829 * @param[in,out] processors - array of logical processor descriptions with
830 * pre-parsed MIDR, maximum frequency, and decoded core cluster
831 * (package_leader_id) information. Upon return, processors[i].midr for all
832 * clusters' leaders contains the MIDR value.
833 *
834 * @returns The number of core clusters
835 */
cpuinfo_arm_linux_detect_cluster_midr(const struct cpuinfo_arm_chipset chipset[restrict static1],uint32_t max_processors,uint32_t usable_processors,struct cpuinfo_arm_linux_processor processors[restrict static max_processors])836 uint32_t cpuinfo_arm_linux_detect_cluster_midr(
837 const struct cpuinfo_arm_chipset chipset[restrict static 1],
838 uint32_t max_processors,
839 uint32_t usable_processors,
840 struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) {
841 uint32_t clusters_count = 0;
842 uint32_t cluster_leaders[CLUSTERS_MAX];
843 uint32_t last_processor_in_cpuinfo = max_processors;
844 uint32_t last_processor_with_midr = max_processors;
845 uint32_t processors_with_midr_count = 0;
846 for (uint32_t i = 0; i < max_processors; i++) {
847 if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
848 if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR) {
849 last_processor_in_cpuinfo = i;
850 }
851 if (bitmask_all(
852 processors[i].flags,
853 CPUINFO_ARM_LINUX_VALID_IMPLEMENTER | CPUINFO_ARM_LINUX_VALID_PART)) {
854 last_processor_with_midr = i;
855 processors_with_midr_count += 1;
856 }
857 const uint32_t group_leader = processors[i].package_leader_id;
858 if (group_leader == i) {
859 if (clusters_count < CLUSTERS_MAX) {
860 cluster_leaders[clusters_count] = i;
861 }
862 clusters_count += 1;
863 } else {
864 /* Copy known bits of information to cluster
865 * leader */
866
867 if ((processors[i].flags & ~processors[group_leader].flags) &
868 CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
869 processors[group_leader].max_frequency = processors[i].max_frequency;
870 processors[group_leader].flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
871 }
872 if (!bitmask_all(processors[group_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR) &&
873 bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
874 processors[group_leader].midr = processors[i].midr;
875 processors[group_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
876 }
877 }
878 }
879 }
880 cpuinfo_log_debug("detected %" PRIu32 " core clusters", clusters_count);
881
882 /*
883 * Two relations between reported /proc/cpuinfo information, and cores
884 * is possible:
885 * - /proc/cpuinfo reports information for all or some of the cores
886 * below the corresponding "processor : <number>" lines. Information on
887 * offline cores may be missing.
888 * - /proc/cpuinfo reports information only once, after all "processor :
889 * <number>" lines. The reported information may relate to processor #0
890 * or to the processor which executed the system calls to read
891 * /proc/cpuinfo. It is also indistinguishable from /proc/cpuinfo
892 * reporting information only for the last core (e.g. if all other cores
893 * are offline).
894 *
895 * We detect the second case by checking if /proc/cpuinfo contains valid
896 * MIDR only for one, last reported, processor. Note, that the last
897 * reported core may be not the last present & possible processor, as
898 * /proc/cpuinfo may non-report high-index offline cores.
899 */
900 if (processors_with_midr_count == 1 && last_processor_in_cpuinfo == last_processor_with_midr &&
901 clusters_count > 1) {
902 /*
903 * There are multiple core clusters, but /proc/cpuinfo reported
904 * MIDR only for one processor, and we don't even know which
905 * logical processor this information refers to.
906 *
907 * We make three attempts to detect MIDR for all clusters:
908 * 1. Search tabulated MIDR values for chipsets which have
909 * heterogeneous clusters and ship with Linux kernels which do
910 * not always report all cores in /proc/cpuinfo. If found, use
911 * the tabulated values.
912 * 2. For systems with 2 clusters and MIDR known for one
913 * cluster, assume big.LITTLE configuration, and estimate MIDR
914 * for the other cluster under assumption that MIDR for the big
915 * cluster is known.
916 * 3. Initialize MIDRs for all core clusters to the only parsed
917 * MIDR value.
918 */
919 cpuinfo_log_debug("the only reported MIDR can not be attributed to a particular processor");
920
921 if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
922 chipset, clusters_count, cluster_leaders, usable_processors, processors, false)) {
923 return clusters_count;
924 }
925
926 /* Try big.LITTLE heuristic */
927 if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
928 clusters_count, 1, last_processor_with_midr, cluster_leaders, processors, false)) {
929 return clusters_count;
930 }
931
932 /* Fall back to sequential initialization of MIDR values for
933 * core clusters
934 */
935 cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
936 processors[processors[last_processor_with_midr].package_leader_id].midr,
937 max_processors,
938 processors);
939 } else if (processors_with_midr_count < usable_processors) {
940 /*
941 * /proc/cpuinfo reported MIDR only for some processors, and
942 * probably some core clusters do not have MIDR for any of the
943 * cores. Check if this is the case.
944 */
945 uint32_t clusters_with_midr_count = 0;
946 for (uint32_t i = 0; i < max_processors; i++) {
947 if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID | CPUINFO_ARM_LINUX_VALID_MIDR)) {
948 if (processors[i].package_leader_id == i) {
949 clusters_with_midr_count += 1;
950 }
951 }
952 }
953
954 if (clusters_with_midr_count < clusters_count) {
955 /*
956 * /proc/cpuinfo reported MIDR only for some clusters,
957 * need to reconstruct others. We make three attempts to
958 * detect MIDR for clusters without it:
959 * 1. Search tabulated MIDR values for chipsets which
960 * have heterogeneous clusters and ship with Linux
961 * kernels which do not always report all cores in
962 * /proc/cpuinfo. If found, use the tabulated values.
963 * 2. For systems with 2 clusters and MIDR known for one
964 * cluster, assume big.LITTLE configuration, and
965 * estimate MIDR for the other cluster under assumption
966 * that MIDR for the big cluster is known.
967 * 3. Initialize MIDRs for core clusters in a single
968 * sequential scan:
969 * - Clusters preceding the first reported MIDR value
970 * are assumed to have the last reported MIDR value.
971 * - Clusters following any reported MIDR value to
972 * have that MIDR value.
973 */
974
975 if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
976 chipset, clusters_count, cluster_leaders, usable_processors, processors, true)) {
977 return clusters_count;
978 }
979
980 if (last_processor_with_midr != max_processors) {
981 /* Try big.LITTLE heuristic */
982 if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
983 clusters_count,
984 processors_with_midr_count,
985 last_processor_with_midr,
986 cluster_leaders,
987 processors,
988 true)) {
989 return clusters_count;
990 }
991
992 /* Fall back to sequential initialization of
993 * MIDR values for core clusters */
994 cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
995 processors[processors[last_processor_with_midr].package_leader_id].midr,
996 max_processors,
997 processors);
998 }
999 }
1000 }
1001 return clusters_count;
1002 }
1003