1 /**************************************************************************
2 *
3 * Copyright 2008 Dennis Smit
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 /**
28 * @file
29 * CPU feature detection.
30 *
31 * @author Dennis Smit
32 * @author Based on the work of Eric Anholt <anholt@FreeBSD.org>
33 */
34
35 #include "util/detect.h"
36 #include "util/compiler.h"
37
38 #include "util/u_debug.h"
39 #include "u_cpu_detect.h"
40 #include "u_math.h"
41 #include "os_file.h"
42 #include "c11/threads.h"
43
44 #include <stdio.h>
45 #include <inttypes.h>
46
47 #if DETECT_ARCH_PPC
48 #if DETECT_OS_APPLE
49 #include <sys/sysctl.h>
50 #else
51 #include <signal.h>
52 #include <setjmp.h>
53 #endif
54 #endif
55
56 #if DETECT_OS_BSD
57 #include <sys/param.h>
58 #include <sys/sysctl.h>
59 #include <machine/cpu.h>
60 #endif
61
62 #if DETECT_OS_FREEBSD
63 #if __has_include(<sys/auxv.h>)
64 #include <sys/auxv.h>
65 #define HAVE_ELF_AUX_INFO
66 #endif
67 #endif
68
69 #if DETECT_OS_LINUX
70 #include <signal.h>
71 #include <fcntl.h>
72 #include <elf.h>
73 #endif
74
75 #if DETECT_OS_UNIX
76 #include <unistd.h>
77 #endif
78
79 #if defined(HAS_ANDROID_CPUFEATURES)
80 #include <cpu-features.h>
81 #endif
82
83 #if DETECT_OS_WINDOWS
84 #include <windows.h>
85 #if DETECT_CC_MSVC
86 #include <intrin.h>
87 #endif
88 #endif
89
90 #if defined(HAS_SCHED_H)
91 #include <sched.h>
92 #endif
93
94 // prevent inadvert infinite recursion
95 #define util_get_cpu_caps() util_get_cpu_caps_DO_NOT_USE()
96
97 DEBUG_GET_ONCE_BOOL_OPTION(dump_cpu, "GALLIUM_DUMP_CPU", false)
98
99 static
100 struct util_cpu_caps_t util_cpu_caps;
101
102 /* Do not try to access _util_cpu_caps_state directly, call to util_get_cpu_caps instead */
103 struct _util_cpu_caps_state_t _util_cpu_caps_state = {
104 .once_flag = ONCE_FLAG_INIT,
105 .detect_done = 0,
106 };
107
108 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
109 static int has_cpuid(void);
110 #endif
111
112
113 #if DETECT_ARCH_PPC && !DETECT_OS_APPLE && !DETECT_OS_BSD && !DETECT_OS_LINUX
114 static jmp_buf __lv_powerpc_jmpbuf;
115 static volatile sig_atomic_t __lv_powerpc_canjump = 0;
116
117 static void
sigill_handler(int sig)118 sigill_handler(int sig)
119 {
120 if (!__lv_powerpc_canjump) {
121 signal (sig, SIG_DFL);
122 raise (sig);
123 }
124
125 __lv_powerpc_canjump = 0;
126 longjmp(__lv_powerpc_jmpbuf, 1);
127 }
128 #endif
129
130 #if DETECT_ARCH_PPC
131 static void
check_os_altivec_support(void)132 check_os_altivec_support(void)
133 {
134 #if defined(__ALTIVEC__)
135 util_cpu_caps.has_altivec = 1;
136 #endif
137 #if defined(__VSX__)
138 util_cpu_caps.has_vsx = 1;
139 #endif
140 #if defined(__ALTIVEC__) && defined(__VSX__)
141 /* Do nothing */
142 #elif DETECT_OS_APPLE || DETECT_OS_NETBSD || DETECT_OS_OPENBSD
143 #ifdef HW_VECTORUNIT
144 int sels[2] = {CTL_HW, HW_VECTORUNIT};
145 #else
146 int sels[2] = {CTL_MACHDEP, CPU_ALTIVEC};
147 #endif
148 int has_vu = 0;
149 size_t len = sizeof (has_vu);
150 int err;
151
152 err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
153
154 if (err == 0) {
155 if (has_vu != 0) {
156 util_cpu_caps.has_altivec = 1;
157 }
158 }
159 #elif DETECT_OS_FREEBSD /* !DETECT_OS_APPLE && !DETECT_OS_NETBSD && !DETECT_OS_OPENBSD */
160 unsigned long hwcap = 0;
161 #ifdef HAVE_ELF_AUX_INFO
162 elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
163 #else
164 size_t len = sizeof(hwcap);
165 sysctlbyname("hw.cpu_features", &hwcap, &len, NULL, 0);
166 #endif
167 if (hwcap & PPC_FEATURE_HAS_ALTIVEC)
168 util_cpu_caps.has_altivec = 1;
169 if (hwcap & PPC_FEATURE_HAS_VSX)
170 util_cpu_caps.has_vsx = 1;
171 #elif DETECT_OS_LINUX /* !DETECT_OS_FREEBSD */
172 #if DETECT_ARCH_PPC_64
173 Elf64_auxv_t aux;
174 #else
175 Elf32_auxv_t aux;
176 #endif
177 int fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
178 if (fd >= 0) {
179 while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
180 if (aux.a_type == AT_HWCAP) {
181 char *env_vsx = getenv("GALLIVM_VSX");
182 uint64_t hwcap = aux.a_un.a_val;
183 util_cpu_caps.has_altivec = (hwcap >> 28) & 1;
184 if (!env_vsx || env_vsx[0] != '0') {
185 util_cpu_caps.has_vsx = (hwcap >> 7) & 1;
186 }
187 break;
188 }
189 }
190 close(fd);
191 }
192 #else /* !DETECT_OS_APPLE && !DETECT_OS_BSD && !DETECT_OS_LINUX */
193 /* not on Apple/Darwin or Linux, do it the brute-force way */
194 /* this is borrowed from the libmpeg2 library */
195 signal(SIGILL, sigill_handler);
196 if (setjmp(__lv_powerpc_jmpbuf)) {
197 signal(SIGILL, SIG_DFL);
198 } else {
199 bool enable_altivec = true; /* Default: enable if available, and if not overridden */
200 bool enable_vsx = true;
201 #ifdef DEBUG
202 /* Disabling Altivec code generation is not the same as disabling VSX code generation,
203 * which can be done simply by passing -mattr=-vsx to the LLVM compiler; cf.
204 * lp_build_create_jit_compiler_for_module().
205 * If you want to disable Altivec code generation, the best place to do it is here.
206 */
207 char *env_control = getenv("GALLIVM_ALTIVEC"); /* 1=enable (default); 0=disable */
208 if (env_control && env_control[0] == '0') {
209 enable_altivec = false;
210 }
211 #endif
212 /* VSX instructions can be explicitly enabled/disabled via GALLIVM_VSX=1 or 0 */
213 char *env_vsx = getenv("GALLIVM_VSX");
214 if (env_vsx && env_vsx[0] == '0') {
215 enable_vsx = false;
216 }
217 if (enable_altivec) {
218 __lv_powerpc_canjump = 1;
219
220 __asm __volatile
221 ("mtspr 256, %0\n\t"
222 "vand %%v0, %%v0, %%v0"
223 :
224 : "r" (-1));
225
226 util_cpu_caps.has_altivec = 1;
227
228 if (enable_vsx) {
229 __asm __volatile("xxland %vs0, %vs0, %vs0");
230 util_cpu_caps.has_vsx = 1;
231 }
232 signal(SIGILL, SIG_DFL);
233 } else {
234 util_cpu_caps.has_altivec = 0;
235 }
236 }
237 #endif /* !DETECT_OS_APPLE && !DETECT_OS_LINUX */
238 }
239 #endif /* DETECT_ARCH_PPC */
240
241
242 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
has_cpuid(void)243 static int has_cpuid(void)
244 {
245 #if DETECT_ARCH_X86
246 #if DETECT_OS_GCC
247 int a, c;
248
249 __asm __volatile
250 ("pushf\n"
251 "popl %0\n"
252 "movl %0, %1\n"
253 "xorl $0x200000, %0\n"
254 "push %0\n"
255 "popf\n"
256 "pushf\n"
257 "popl %0\n"
258 : "=a" (a), "=c" (c)
259 :
260 : "cc");
261
262 return a != c;
263 #else
264 /* FIXME */
265 return 1;
266 #endif
267 #elif DETECT_ARCH_X86_64
268 return 1;
269 #else
270 return 0;
271 #endif
272 }
273
274
275 /**
276 * @sa cpuid.h included in gcc-4.3 onwards.
277 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
278 */
279 static inline void
cpuid(uint32_t ax,uint32_t * p)280 cpuid(uint32_t ax, uint32_t *p)
281 {
282 #if DETECT_CC_GCC && DETECT_ARCH_X86
283 __asm __volatile (
284 "xchgl %%ebx, %1\n\t"
285 "cpuid\n\t"
286 "xchgl %%ebx, %1"
287 : "=a" (p[0]),
288 "=S" (p[1]),
289 "=c" (p[2]),
290 "=d" (p[3])
291 : "0" (ax)
292 );
293 #elif DETECT_CC_GCC && DETECT_ARCH_X86_64
294 __asm __volatile (
295 "cpuid\n\t"
296 : "=a" (p[0]),
297 "=b" (p[1]),
298 "=c" (p[2]),
299 "=d" (p[3])
300 : "0" (ax)
301 );
302 #elif DETECT_CC_MSVC
303 __cpuid(p, ax);
304 #else
305 p[0] = 0;
306 p[1] = 0;
307 p[2] = 0;
308 p[3] = 0;
309 #endif
310 }
311
312 /**
313 * @sa cpuid.h included in gcc-4.4 onwards.
314 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh%28v=vs.90%29.aspx
315 */
316 static inline void
cpuid_count(uint32_t ax,uint32_t cx,uint32_t * p)317 cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p)
318 {
319 #if DETECT_CC_GCC && DETECT_ARCH_X86
320 __asm __volatile (
321 "xchgl %%ebx, %1\n\t"
322 "cpuid\n\t"
323 "xchgl %%ebx, %1"
324 : "=a" (p[0]),
325 "=S" (p[1]),
326 "=c" (p[2]),
327 "=d" (p[3])
328 : "0" (ax), "2" (cx)
329 );
330 #elif DETECT_CC_GCC && DETECT_ARCH_X86_64
331 __asm __volatile (
332 "cpuid\n\t"
333 : "=a" (p[0]),
334 "=b" (p[1]),
335 "=c" (p[2]),
336 "=d" (p[3])
337 : "0" (ax), "2" (cx)
338 );
339 #elif DETECT_CC_MSVC
340 __cpuidex(p, ax, cx);
341 #else
342 p[0] = 0;
343 p[1] = 0;
344 p[2] = 0;
345 p[3] = 0;
346 #endif
347 }
348
349
xgetbv(void)350 static inline uint64_t xgetbv(void)
351 {
352 #if DETECT_CC_GCC
353 uint32_t eax, edx;
354
355 __asm __volatile (
356 ".byte 0x0f, 0x01, 0xd0" // xgetbv isn't supported on gcc < 4.4
357 : "=a"(eax),
358 "=d"(edx)
359 : "c"(0)
360 );
361
362 return ((uint64_t)edx << 32) | eax;
363 #elif DETECT_CC_MSVC && defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
364 return _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
365 #else
366 return 0;
367 #endif
368 }
369
370
371 #if DETECT_ARCH_X86
372 UTIL_ALIGN_STACK
373 static inline bool
sse2_has_daz(void)374 sse2_has_daz(void)
375 {
376 alignas(16) struct {
377 uint32_t pad1[7];
378 uint32_t mxcsr_mask;
379 uint32_t pad2[128-8];
380 } fxarea;
381
382 fxarea.mxcsr_mask = 0;
383 #if DETECT_CC_GCC
384 __asm __volatile ("fxsave %0" : "+m" (fxarea));
385 #elif DETECT_CC_MSVC || DETECT_CC_ICL
386 _fxsave(&fxarea);
387 #else
388 fxarea.mxcsr_mask = 0;
389 #endif
390 return !!(fxarea.mxcsr_mask & (1 << 6));
391 }
392 #endif
393
394 #endif /* X86 or X86_64 */
395
396 #if DETECT_ARCH_ARM
397 static void
check_os_arm_support(void)398 check_os_arm_support(void)
399 {
400 /*
401 * On Android, the cpufeatures library is preferred way of checking
402 * CPU capabilities. However, it is not available for standalone Mesa
403 * builds, i.e. when Android build system (Android.mk-based) is not
404 * used. Because of this we cannot use DETECT_OS_ANDROID here, but rather
405 * have a separate macro that only gets enabled from respective Android.mk.
406 */
407 #if defined(__ARM_NEON) || defined(__ARM_NEON__)
408 util_cpu_caps.has_neon = 1;
409 #elif DETECT_OS_FREEBSD && defined(HAVE_ELF_AUX_INFO)
410 unsigned long hwcap = 0;
411 elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
412 if (hwcap & HWCAP_NEON)
413 util_cpu_caps.has_neon = 1;
414 #elif defined(HAS_ANDROID_CPUFEATURES)
415 AndroidCpuFamily cpu_family = android_getCpuFamily();
416 uint64_t cpu_features = android_getCpuFeatures();
417
418 if (cpu_family == ANDROID_CPU_FAMILY_ARM) {
419 if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON)
420 util_cpu_caps.has_neon = 1;
421 }
422 #elif DETECT_OS_LINUX
423 Elf32_auxv_t aux;
424 int fd;
425
426 fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
427 if (fd >= 0) {
428 while (read(fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t)) {
429 if (aux.a_type == AT_HWCAP) {
430 uint32_t hwcap = aux.a_un.a_val;
431
432 util_cpu_caps.has_neon = (hwcap >> 12) & 1;
433 break;
434 }
435 }
436 close (fd);
437 }
438 #endif /* DETECT_OS_LINUX */
439 }
440
441 #elif DETECT_ARCH_AARCH64
442 static void
check_os_arm_support(void)443 check_os_arm_support(void)
444 {
445 util_cpu_caps.has_neon = true;
446 }
447 #endif /* DETECT_ARCH_ARM || DETECT_ARCH_AARCH64 */
448
449 #if DETECT_ARCH_MIPS64
450 static void
check_os_mips64_support(void)451 check_os_mips64_support(void)
452 {
453 #if DETECT_OS_LINUX
454 Elf64_auxv_t aux;
455 int fd;
456
457 fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
458 if (fd >= 0) {
459 while (read(fd, &aux, sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
460 if (aux.a_type == AT_HWCAP) {
461 uint64_t hwcap = aux.a_un.a_val;
462
463 util_cpu_caps.has_msa = (hwcap >> 1) & 1;
464 break;
465 }
466 }
467 close (fd);
468 }
469 #endif /* DETECT_OS_LINUX */
470 }
471 #endif /* DETECT_ARCH_MIPS64 */
472
473
474 static void
get_cpu_topology(void)475 get_cpu_topology(void)
476 {
477 /* Default. This is OK if L3 is not present or there is only one. */
478 util_cpu_caps.num_L3_caches = 1;
479
480 memset(util_cpu_caps.cpu_to_L3, 0xff, sizeof(util_cpu_caps.cpu_to_L3));
481
482 #if DETECT_OS_LINUX
483 uint64_t big_cap = 0;
484 unsigned num_big_cpus = 0;
485 uint64_t *caps = malloc(sizeof(uint64_t) * util_cpu_caps.max_cpus);
486 bool fail = false;
487 for (unsigned i = 0; caps && i < util_cpu_caps.max_cpus; i++) {
488 char name[PATH_MAX];
489 snprintf(name, sizeof(name), "/sys/devices/system/cpu/cpu%u/cpu_capacity", i);
490 size_t size = 0;
491 char *cap = os_read_file(name, &size);
492 if (!cap) {
493 num_big_cpus = 0;
494 fail = true;
495 break;
496 }
497 errno = 0;
498 caps[i] = strtoull(cap, NULL, 10);
499 free(cap);
500 if (errno) {
501 fail = true;
502 break;
503 }
504 big_cap = MAX2(caps[i], big_cap);
505 }
506 if (!fail) {
507 for (unsigned i = 0; caps && i < util_cpu_caps.max_cpus; i++) {
508 if (caps[i] >= big_cap / 2)
509 num_big_cpus++;
510 }
511 }
512 free(caps);
513 util_cpu_caps.nr_big_cpus = num_big_cpus;
514 #endif
515
516 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
517 /* AMD Zen */
518 if (util_cpu_caps.family >= CPU_AMD_ZEN1_ZEN2 &&
519 util_cpu_caps.family < CPU_AMD_LAST) {
520 uint32_t regs[4];
521
522 uint32_t saved_mask[UTIL_MAX_CPUS / 32] = {0};
523 uint32_t mask[UTIL_MAX_CPUS / 32] = {0};
524 bool saved = false;
525
526 uint32_t L3_found[UTIL_MAX_CPUS] = {0};
527 uint32_t num_L3_caches = 0;
528 util_affinity_mask *L3_affinity_masks = NULL;
529
530 /* Query APIC IDs from each CPU core.
531 *
532 * An APIC ID is a logical ID of the CPU with respect to the cache
533 * hierarchy, meaning that consecutive APIC IDs are neighbours in
534 * the hierarchy, e.g. sharing the same cache.
535 *
536 * For example, CPU 0 can have APIC ID 0 and CPU 12 can have APIC ID 1,
537 * which means that both CPU 0 and 12 are next to each other.
538 * (e.g. they are 2 threads belonging to 1 SMT2 core)
539 *
540 * We need to find out which CPUs share the same L3 cache and they can
541 * be all over the place.
542 *
543 * Querying the APIC ID can only be done by pinning the current thread
544 * to each core. The original affinity mask is saved.
545 *
546 * Loop over all possible CPUs even though some may be offline.
547 */
548 for (int16_t i = 0; i < util_cpu_caps.max_cpus && i < UTIL_MAX_CPUS; i++) {
549 uint32_t cpu_bit = 1u << (i % 32);
550
551 mask[i / 32] = cpu_bit;
552
553 /* The assumption is that trying to bind the thread to a CPU that is
554 * offline will fail.
555 */
556 if (util_set_current_thread_affinity(mask,
557 !saved ? saved_mask : NULL,
558 util_cpu_caps.num_cpu_mask_bits)) {
559 saved = true;
560
561 /* Query the APIC ID of the current core. */
562 cpuid(0x00000001, regs);
563 unsigned apic_id = regs[1] >> 24;
564
565 /* Query the total core count for the CPU */
566 uint32_t core_count = 1;
567 if (regs[3] & (1 << 28))
568 core_count = (regs[1] >> 16) & 0xff;
569
570 core_count = util_next_power_of_two(core_count);
571
572 /* Query the L3 cache count. */
573 cpuid_count(0x8000001D, 3, regs);
574 unsigned cache_level = (regs[0] >> 5) & 0x7;
575 unsigned cores_per_L3 = ((regs[0] >> 14) & 0xfff) + 1;
576
577 if (cache_level != 3)
578 continue;
579
580 unsigned local_core_id = apic_id & (core_count - 1);
581 unsigned phys_id = (apic_id & ~(core_count - 1)) >> util_logbase2(core_count);
582 unsigned local_l3_cache_index = local_core_id / util_next_power_of_two(cores_per_L3);
583 #define L3_ID(p, i) (p << 16 | i << 1 | 1);
584
585 unsigned l3_id = L3_ID(phys_id, local_l3_cache_index);
586 int idx = -1;
587 for (unsigned c = 0; c < num_L3_caches; c++) {
588 if (L3_found[c] == l3_id) {
589 idx = c;
590 break;
591 }
592 }
593 if (idx == -1) {
594 idx = num_L3_caches;
595 L3_found[num_L3_caches++] = l3_id;
596 L3_affinity_masks = realloc(L3_affinity_masks, sizeof(util_affinity_mask) * num_L3_caches);
597 if (!L3_affinity_masks)
598 return;
599 memset(&L3_affinity_masks[num_L3_caches - 1], 0, sizeof(util_affinity_mask));
600 }
601 util_cpu_caps.cpu_to_L3[i] = idx;
602 L3_affinity_masks[idx][i / 32] |= cpu_bit;
603
604 }
605 mask[i / 32] = 0;
606 }
607
608 util_cpu_caps.num_L3_caches = num_L3_caches;
609 util_cpu_caps.L3_affinity_mask = L3_affinity_masks;
610
611 if (saved) {
612 if (debug_get_option_dump_cpu()) {
613 fprintf(stderr, "CPU <-> L3 cache mapping:\n");
614 for (unsigned i = 0; i < util_cpu_caps.num_L3_caches; i++) {
615 fprintf(stderr, " - L3 %u mask = ", i);
616 for (int j = util_cpu_caps.max_cpus - 1; j >= 0; j -= 32)
617 fprintf(stderr, "%08x ", util_cpu_caps.L3_affinity_mask[i][j / 32]);
618 fprintf(stderr, "\n");
619 }
620 }
621
622 /* Restore the original affinity mask. */
623 util_set_current_thread_affinity(saved_mask, NULL,
624 util_cpu_caps.num_cpu_mask_bits);
625 } else {
626 if (debug_get_option_dump_cpu())
627 fprintf(stderr, "Cannot set thread affinity for any thread.\n");
628 }
629 }
630 #endif
631 }
632
633 static
check_cpu_caps_override(void)634 void check_cpu_caps_override(void)
635 {
636 const char *override_cpu_caps = debug_get_option("GALLIUM_OVERRIDE_CPU_CAPS", NULL);
637 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
638 if (debug_get_bool_option("GALLIUM_NOSSE", false)) {
639 util_cpu_caps.has_sse = 0;
640 }
641 #ifdef DEBUG
642 /* For simulating less capable machines */
643 if (debug_get_bool_option("LP_FORCE_SSE2", false)) {
644 util_cpu_caps.has_sse3 = 0;
645 }
646 #endif
647 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
648
649 if (override_cpu_caps != NULL) {
650 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
651 if (!strcmp(override_cpu_caps, "nosse")) {
652 util_cpu_caps.has_sse = 0;
653 } else if (!strcmp(override_cpu_caps, "sse")) {
654 util_cpu_caps.has_sse2 = 0;
655 } else if (!strcmp(override_cpu_caps, "sse2")) {
656 util_cpu_caps.has_sse3 = 0;
657 } else if (!strcmp(override_cpu_caps, "sse3")) {
658 util_cpu_caps.has_ssse3 = 0;
659 } else if (!strcmp(override_cpu_caps, "ssse3")) {
660 util_cpu_caps.has_sse4_1 = 0;
661 } else if (!strcmp(override_cpu_caps, "sse4.1")) {
662 util_cpu_caps.has_avx = 0;
663 } else if (!strcmp(override_cpu_caps, "avx")) {
664 util_cpu_caps.has_avx512f = 0;
665 }
666 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
667 }
668
669 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
670 if (!util_cpu_caps.has_sse) {
671 util_cpu_caps.has_sse2 = 0;
672 }
673 if (!util_cpu_caps.has_sse2) {
674 util_cpu_caps.has_sse3 = 0;
675 }
676 if (!util_cpu_caps.has_sse3) {
677 util_cpu_caps.has_ssse3 = 0;
678 }
679 if (!util_cpu_caps.has_ssse3) {
680 util_cpu_caps.has_sse4_1 = 0;
681 }
682 if (!util_cpu_caps.has_sse4_1) {
683 util_cpu_caps.has_sse4_2 = 0;
684 util_cpu_caps.has_avx = 0;
685 }
686 if (!util_cpu_caps.has_avx) {
687 util_cpu_caps.has_avx2 = 0;
688 util_cpu_caps.has_f16c = 0;
689 util_cpu_caps.has_fma = 0;
690 util_cpu_caps.has_avx512f = 0;
691 }
692 if (!util_cpu_caps.has_avx512f) {
693 /* avx512 are cleared */
694 util_cpu_caps.has_avx512dq = 0;
695 util_cpu_caps.has_avx512ifma = 0;
696 util_cpu_caps.has_avx512pf = 0;
697 util_cpu_caps.has_avx512er = 0;
698 util_cpu_caps.has_avx512cd = 0;
699 util_cpu_caps.has_avx512bw = 0;
700 util_cpu_caps.has_avx512vl = 0;
701 util_cpu_caps.has_avx512vbmi = 0;
702 }
703 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
704 }
705
706 static
check_max_vector_bits(void)707 void check_max_vector_bits(void)
708 {
709 /* Leave it at 128, even when no SIMD extensions are available.
710 * Really needs to be a multiple of 128 so can fit 4 floats.
711 */
712 util_cpu_caps.max_vector_bits = 128;
713 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
714 if (util_cpu_caps.has_avx512f) {
715 util_cpu_caps.max_vector_bits = 512;
716 } else if (util_cpu_caps.has_avx) {
717 util_cpu_caps.max_vector_bits = 256;
718 }
719 #endif
720 }
721
722 void _util_cpu_detect_once(void);
723
724 void
_util_cpu_detect_once(void)725 _util_cpu_detect_once(void)
726 {
727 int available_cpus = 0;
728 int total_cpus = 0;
729
730 memset(&util_cpu_caps, 0, sizeof util_cpu_caps);
731
732 /* Count the number of CPUs in system */
733 #if DETECT_OS_WINDOWS
734 {
735 SYSTEM_INFO system_info;
736 GetSystemInfo(&system_info);
737 available_cpus = MAX2(1, system_info.dwNumberOfProcessors);
738 }
739 #elif DETECT_OS_UNIX
740 # if defined(HAS_SCHED_GETAFFINITY)
741 {
742 /* sched_setaffinity() can be used to further restrict the number of
743 * CPUs on which the process can run. Use sched_getaffinity() to
744 * determine the true number of available CPUs.
745 *
746 * FIXME: The Linux manual page for sched_getaffinity describes how this
747 * simple implementation will fail with > 1024 CPUs, and we'll fall back
748 * to the _SC_NPROCESSORS_ONLN path. Support for > 1024 CPUs can be
749 * added to this path once someone has such a system for testing.
750 */
751 cpu_set_t affin;
752 if (sched_getaffinity(getpid(), sizeof(affin), &affin) == 0)
753 available_cpus = CPU_COUNT(&affin);
754 }
755 # endif
756
757 /* Linux, FreeBSD, DragonFly, and Mac OS X should have
758 * _SC_NOPROCESSORS_ONLN. NetBSD and OpenBSD should have HW_NCPUONLINE.
759 * This is what FFmpeg uses on those platforms.
760 */
761 # if DETECT_OS_BSD && defined(HW_NCPUONLINE)
762 if (available_cpus == 0) {
763 const int mib[] = { CTL_HW, HW_NCPUONLINE };
764 int ncpu;
765 size_t len = sizeof(ncpu);
766
767 sysctl(mib, 2, &ncpu, &len, NULL, 0);
768 available_cpus = ncpu;
769 }
770 # elif defined(_SC_NPROCESSORS_ONLN)
771 if (available_cpus == 0) {
772 available_cpus = sysconf(_SC_NPROCESSORS_ONLN);
773 if (available_cpus == ~0)
774 available_cpus = 1;
775 }
776 # elif DETECT_OS_BSD
777 if (available_cpus == 0) {
778 const int mib[] = { CTL_HW, HW_NCPU };
779 int ncpu;
780 int len = sizeof(ncpu);
781
782 sysctl(mib, 2, &ncpu, &len, NULL, 0);
783 available_cpus = ncpu;
784 }
785 # endif /* DETECT_OS_BSD */
786
787 /* Determine the maximum number of CPUs configured in the system. This is
788 * used to properly set num_cpu_mask_bits below. On BSDs that don't have
789 * HW_NCPUONLINE, it was not clear whether HW_NCPU is the number of
790 * configured or the number of online CPUs. For that reason, prefer the
791 * _SC_NPROCESSORS_CONF path on all BSDs.
792 */
793 # if defined(_SC_NPROCESSORS_CONF)
794 total_cpus = sysconf(_SC_NPROCESSORS_CONF);
795 if (total_cpus == ~0)
796 total_cpus = 1;
797 # elif DETECT_OS_BSD
798 {
799 const int mib[] = { CTL_HW, HW_NCPU };
800 int ncpu;
801 int len = sizeof(ncpu);
802
803 sysctl(mib, 2, &ncpu, &len, NULL, 0);
804 total_cpus = ncpu;
805 }
806 # endif /* DETECT_OS_BSD */
807 #endif /* DETECT_OS_UNIX */
808
809 util_cpu_caps.nr_cpus = MAX2(1, available_cpus);
810 total_cpus = MAX2(total_cpus, util_cpu_caps.nr_cpus);
811
812 util_cpu_caps.max_cpus = total_cpus;
813 util_cpu_caps.num_cpu_mask_bits = align(total_cpus, 32);
814
815 /* Make the fallback cacheline size nonzero so that it can be
816 * safely passed to align().
817 */
818 util_cpu_caps.cacheline = sizeof(void *);
819
820 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
821 if (has_cpuid()) {
822 uint32_t regs[4];
823 uint32_t regs2[4];
824
825 util_cpu_caps.cacheline = 32;
826
827 /* Get max cpuid level */
828 cpuid(0x00000000, regs);
829
830 if (regs[0] >= 0x00000001) {
831 unsigned int cacheline;
832
833 cpuid (0x00000001, regs2);
834
835 util_cpu_caps.x86_cpu_type = (regs2[0] >> 8) & 0xf;
836 /* Add "extended family". */
837 if (util_cpu_caps.x86_cpu_type == 0xf)
838 util_cpu_caps.x86_cpu_type += ((regs2[0] >> 20) & 0xff);
839
840 switch (util_cpu_caps.x86_cpu_type) {
841 case 0x17:
842 util_cpu_caps.family = CPU_AMD_ZEN1_ZEN2;
843 break;
844 case 0x18:
845 util_cpu_caps.family = CPU_AMD_ZEN_HYGON;
846 break;
847 case 0x19:
848 util_cpu_caps.family = CPU_AMD_ZEN3;
849 break;
850 default:
851 if (util_cpu_caps.x86_cpu_type > 0x19)
852 util_cpu_caps.family = CPU_AMD_ZEN_NEXT;
853 }
854
855 /* general feature flags */
856 util_cpu_caps.has_mmx = (regs2[3] >> 23) & 1; /* 0x0800000 */
857 util_cpu_caps.has_sse = (regs2[3] >> 25) & 1; /* 0x2000000 */
858 util_cpu_caps.has_sse2 = (regs2[3] >> 26) & 1; /* 0x4000000 */
859 util_cpu_caps.has_sse3 = (regs2[2] >> 0) & 1; /* 0x0000001 */
860 util_cpu_caps.has_ssse3 = (regs2[2] >> 9) & 1; /* 0x0000020 */
861 util_cpu_caps.has_sse4_1 = (regs2[2] >> 19) & 1;
862 util_cpu_caps.has_sse4_2 = (regs2[2] >> 20) & 1;
863 util_cpu_caps.has_popcnt = (regs2[2] >> 23) & 1;
864 util_cpu_caps.has_avx = ((regs2[2] >> 28) & 1) && // AVX
865 ((regs2[2] >> 27) & 1) && // OSXSAVE
866 ((xgetbv() & 6) == 6); // XMM & YMM
867 util_cpu_caps.has_f16c = ((regs2[2] >> 29) & 1) && util_cpu_caps.has_avx;
868 util_cpu_caps.has_fma = ((regs2[2] >> 12) & 1) && util_cpu_caps.has_avx;
869 util_cpu_caps.has_mmx2 = util_cpu_caps.has_sse; /* SSE cpus supports mmxext too */
870 #if DETECT_ARCH_X86_64
871 util_cpu_caps.has_daz = 1;
872 #else
873 util_cpu_caps.has_daz = util_cpu_caps.has_sse3 ||
874 (util_cpu_caps.has_sse2 && sse2_has_daz());
875 #endif
876
877 cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
878 if (cacheline > 0)
879 util_cpu_caps.cacheline = cacheline;
880 }
881 if (regs[0] >= 0x00000007) {
882 uint32_t regs7[4];
883 cpuid_count(0x00000007, 0x00000000, regs7);
884 util_cpu_caps.has_clflushopt = (regs7[1] >> 23) & 1;
885 if (util_cpu_caps.has_avx) {
886 util_cpu_caps.has_avx2 = (regs7[1] >> 5) & 1;
887
888 // check for avx512
889 if (xgetbv() & (0x7 << 5)) { // OPMASK: upper-256 enabled by OS
890 util_cpu_caps.has_avx512f = (regs7[1] >> 16) & 1;
891 util_cpu_caps.has_avx512dq = (regs7[1] >> 17) & 1;
892 util_cpu_caps.has_avx512ifma = (regs7[1] >> 21) & 1;
893 util_cpu_caps.has_avx512pf = (regs7[1] >> 26) & 1;
894 util_cpu_caps.has_avx512er = (regs7[1] >> 27) & 1;
895 util_cpu_caps.has_avx512cd = (regs7[1] >> 28) & 1;
896 util_cpu_caps.has_avx512bw = (regs7[1] >> 30) & 1;
897 util_cpu_caps.has_avx512vl = (regs7[1] >> 31) & 1;
898 util_cpu_caps.has_avx512vbmi = (regs7[2] >> 1) & 1;
899 }
900 }
901 }
902
903 if (regs[1] == 0x756e6547 && regs[2] == 0x6c65746e && regs[3] == 0x49656e69) {
904 /* GenuineIntel */
905 util_cpu_caps.has_intel = 1;
906 }
907
908 cpuid(0x80000000, regs);
909
910 if (regs[0] >= 0x80000001) {
911
912 cpuid(0x80000001, regs2);
913
914 util_cpu_caps.has_mmx |= (regs2[3] >> 23) & 1;
915 util_cpu_caps.has_mmx2 |= (regs2[3] >> 22) & 1;
916 util_cpu_caps.has_3dnow = (regs2[3] >> 31) & 1;
917 util_cpu_caps.has_3dnow_ext = (regs2[3] >> 30) & 1;
918
919 util_cpu_caps.has_xop = util_cpu_caps.has_avx &&
920 ((regs2[2] >> 11) & 1);
921 }
922
923 if (regs[0] >= 0x80000006) {
924 /* should we really do this if the clflush size above worked? */
925 unsigned int cacheline;
926 cpuid(0x80000006, regs2);
927 cacheline = regs2[2] & 0xFF;
928 if (cacheline > 0)
929 util_cpu_caps.cacheline = cacheline;
930 }
931 }
932 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
933
934 #if DETECT_ARCH_ARM || DETECT_ARCH_AARCH64
935 check_os_arm_support();
936 #endif
937
938 #if DETECT_ARCH_PPC
939 check_os_altivec_support();
940 #endif /* DETECT_ARCH_PPC */
941
942 #if DETECT_ARCH_MIPS64
943 check_os_mips64_support();
944 #endif /* DETECT_ARCH_MIPS64 */
945
946 #if DETECT_ARCH_S390
947 util_cpu_caps.family = CPU_S390X;
948 #endif
949
950 check_cpu_caps_override();
951
952 /* max_vector_bits should be checked after cpu caps override */
953 check_max_vector_bits();
954
955 get_cpu_topology();
956
957 if (debug_get_option_dump_cpu()) {
958 printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps.nr_cpus);
959
960 printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps.x86_cpu_type);
961 printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps.cacheline);
962
963 printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps.has_mmx);
964 printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps.has_mmx2);
965 printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps.has_sse);
966 printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps.has_sse2);
967 printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps.has_sse3);
968 printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps.has_ssse3);
969 printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps.has_sse4_1);
970 printf("util_cpu_caps.has_sse4_2 = %u\n", util_cpu_caps.has_sse4_2);
971 printf("util_cpu_caps.has_avx = %u\n", util_cpu_caps.has_avx);
972 printf("util_cpu_caps.has_avx2 = %u\n", util_cpu_caps.has_avx2);
973 printf("util_cpu_caps.has_f16c = %u\n", util_cpu_caps.has_f16c);
974 printf("util_cpu_caps.has_popcnt = %u\n", util_cpu_caps.has_popcnt);
975 printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps.has_3dnow);
976 printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps.has_3dnow_ext);
977 printf("util_cpu_caps.has_xop = %u\n", util_cpu_caps.has_xop);
978 printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps.has_altivec);
979 printf("util_cpu_caps.has_vsx = %u\n", util_cpu_caps.has_vsx);
980 printf("util_cpu_caps.has_neon = %u\n", util_cpu_caps.has_neon);
981 printf("util_cpu_caps.has_msa = %u\n", util_cpu_caps.has_msa);
982 printf("util_cpu_caps.has_daz = %u\n", util_cpu_caps.has_daz);
983 printf("util_cpu_caps.has_avx512f = %u\n", util_cpu_caps.has_avx512f);
984 printf("util_cpu_caps.has_avx512dq = %u\n", util_cpu_caps.has_avx512dq);
985 printf("util_cpu_caps.has_avx512ifma = %u\n", util_cpu_caps.has_avx512ifma);
986 printf("util_cpu_caps.has_avx512pf = %u\n", util_cpu_caps.has_avx512pf);
987 printf("util_cpu_caps.has_avx512er = %u\n", util_cpu_caps.has_avx512er);
988 printf("util_cpu_caps.has_avx512cd = %u\n", util_cpu_caps.has_avx512cd);
989 printf("util_cpu_caps.has_avx512bw = %u\n", util_cpu_caps.has_avx512bw);
990 printf("util_cpu_caps.has_avx512vl = %u\n", util_cpu_caps.has_avx512vl);
991 printf("util_cpu_caps.has_avx512vbmi = %u\n", util_cpu_caps.has_avx512vbmi);
992 printf("util_cpu_caps.has_clflushopt = %u\n", util_cpu_caps.has_clflushopt);
993 printf("util_cpu_caps.num_L3_caches = %u\n", util_cpu_caps.num_L3_caches);
994 printf("util_cpu_caps.num_cpu_mask_bits = %u\n", util_cpu_caps.num_cpu_mask_bits);
995 }
996 _util_cpu_caps_state.caps = util_cpu_caps;
997
998 /* This must happen at the end as it's used to guard everything else */
999 p_atomic_set(&_util_cpu_caps_state.detect_done, 1);
1000 }
1001