• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright 2011 The LibYuv Project Authors. All rights reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS. All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "libyuv/cpu_id.h"
12 
13 #if defined(_MSC_VER)
14 #include <intrin.h>  // For __cpuidex()
15 #endif
16 #if !defined(__pnacl__) && !defined(__CLR_VER) &&                           \
17     !defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \
18     defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
19 #include <immintrin.h>  // For _xgetbv()
20 #endif
21 
22 // For ArmCpuCaps() but unittested on all platforms
23 #include <stdio.h>  // For fopen()
24 #include <string.h>
25 
26 #ifdef __cplusplus
27 namespace libyuv {
28 extern "C" {
29 #endif
30 
31 // For functions that use the stack and have runtime checks for overflow,
32 // use SAFEBUFFERS to avoid additional check.
33 #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) && \
34     !defined(__clang__)
35 #define SAFEBUFFERS __declspec(safebuffers)
36 #else
37 #define SAFEBUFFERS
38 #endif
39 
40 // cpu_info_ variable for SIMD instruction sets detected.
41 LIBYUV_API int cpu_info_ = 0;
42 
43 // Low level cpuid for X86.
44 #if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
45      defined(__x86_64__)) &&                                     \
46     !defined(__pnacl__) && !defined(__CLR_VER)
47 LIBYUV_API
CpuId(int info_eax,int info_ecx,int * cpu_info)48 void CpuId(int info_eax, int info_ecx, int* cpu_info) {
49 #if defined(_MSC_VER)
50 // Visual C version uses intrinsic or inline x86 assembly.
51 #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
52   __cpuidex(cpu_info, info_eax, info_ecx);
53 #elif defined(_M_IX86)
54   __asm {
55     mov        eax, info_eax
56     mov        ecx, info_ecx
57     mov        edi, cpu_info
58     cpuid
59     mov        [edi], eax
60     mov        [edi + 4], ebx
61     mov        [edi + 8], ecx
62     mov        [edi + 12], edx
63   }
64 #else  // Visual C but not x86
65   if (info_ecx == 0) {
66     __cpuid(cpu_info, info_eax);
67   } else {
68     cpu_info[3] = cpu_info[2] = cpu_info[1] = cpu_info[0] = 0u;
69   }
70 #endif
71 // GCC version uses inline x86 assembly.
72 #else  // defined(_MSC_VER)
73   int info_ebx, info_edx;
74   asm volatile(
75 #if defined(__i386__) && defined(__PIC__)
76       // Preserve ebx for fpic 32 bit.
77       "mov         %%ebx, %%edi                  \n"
78       "cpuid                                     \n"
79       "xchg        %%edi, %%ebx                  \n"
80       : "=D"(info_ebx),
81 #else
82       "cpuid                                     \n"
83       : "=b"(info_ebx),
84 #endif  //  defined( __i386__) && defined(__PIC__)
85         "+a"(info_eax), "+c"(info_ecx), "=d"(info_edx));
86   cpu_info[0] = info_eax;
87   cpu_info[1] = info_ebx;
88   cpu_info[2] = info_ecx;
89   cpu_info[3] = info_edx;
90 #endif  // defined(_MSC_VER)
91 }
92 #else  // (defined(_M_IX86) || defined(_M_X64) ...
93 LIBYUV_API
CpuId(int eax,int ecx,int * cpu_info)94 void CpuId(int eax, int ecx, int* cpu_info) {
95   (void)eax;
96   (void)ecx;
97   cpu_info[0] = cpu_info[1] = cpu_info[2] = cpu_info[3] = 0;
98 }
99 #endif
100 
101 // For VS2010 and earlier emit can be used:
102 //   _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0  // For VS2010 and earlier.
103 //  __asm {
104 //    xor        ecx, ecx    // xcr 0
105 //    xgetbv
106 //    mov        xcr0, eax
107 //  }
108 // For VS2013 and earlier 32 bit, the _xgetbv(0) optimizer produces bad code.
109 // https://code.google.com/p/libyuv/issues/detail?id=529
110 #if defined(_M_IX86) && defined(_MSC_VER) && (_MSC_VER < 1900)
111 #pragma optimize("g", off)
112 #endif
113 #if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
114      defined(__x86_64__)) &&                                     \
115     !defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__)
116 // X86 CPUs have xgetbv to detect OS saves high parts of ymm registers.
GetXCR0()117 static int GetXCR0() {
118   int xcr0 = 0;
119 #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
120   xcr0 = (int)_xgetbv(0);  // VS2010 SP1 required.  NOLINT
121 #elif defined(__i386__) || defined(__x86_64__)
122   asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0) : "c"(0) : "%edx");
123 #endif  // defined(__i386__) || defined(__x86_64__)
124   return xcr0;
125 }
126 #else
127 // xgetbv unavailable to query for OSSave support.  Return 0.
128 #define GetXCR0() 0
129 #endif  // defined(_M_IX86) || defined(_M_X64) ..
130 // Return optimization to previous setting.
131 #if defined(_M_IX86) && defined(_MSC_VER) && (_MSC_VER < 1900)
132 #pragma optimize("g", on)
133 #endif
134 
135 // Based on libvpx arm_cpudetect.c
136 // For Arm, but public to allow testing on any CPU
ArmCpuCaps(const char * cpuinfo_name)137 LIBYUV_API SAFEBUFFERS int ArmCpuCaps(const char* cpuinfo_name) {
138   char cpuinfo_line[512];
139   FILE* f = fopen(cpuinfo_name, "re");
140   if (!f) {
141     // Assume Neon if /proc/cpuinfo is unavailable.
142     // This will occur for Chrome sandbox for Pepper or Render process.
143     return kCpuHasNEON;
144   }
145   memset(cpuinfo_line, 0, sizeof(cpuinfo_line));
146   while (fgets(cpuinfo_line, sizeof(cpuinfo_line), f)) {
147     if (memcmp(cpuinfo_line, "Features", 8) == 0) {
148       char* p = strstr(cpuinfo_line, " neon");
149       if (p && (p[5] == ' ' || p[5] == '\n')) {
150         fclose(f);
151         return kCpuHasNEON;
152       }
153       // aarch64 uses asimd for Neon.
154       p = strstr(cpuinfo_line, " asimd");
155       if (p) {
156         fclose(f);
157         return kCpuHasNEON;
158       }
159     }
160   }
161   fclose(f);
162   return 0;
163 }
164 
RiscvCpuCaps(const char * cpuinfo_name)165 LIBYUV_API SAFEBUFFERS int RiscvCpuCaps(const char* cpuinfo_name) {
166   char cpuinfo_line[512];
167   int flag = 0;
168   FILE* f = fopen(cpuinfo_name, "re");
169   if (!f) {
170 #if defined(__riscv_vector)
171     // Assume RVV if /proc/cpuinfo is unavailable.
172     // This will occur for Chrome sandbox for Pepper or Render process.
173     return kCpuHasRVV;
174 #else
175     return 0;
176 #endif
177   }
178   memset(cpuinfo_line, 0, sizeof(cpuinfo_line));
179   while (fgets(cpuinfo_line, sizeof(cpuinfo_line), f)) {
180     if (memcmp(cpuinfo_line, "isa", 3) == 0) {
181       // ISA string must begin with rv64{i,e,g} for a 64-bit processor.
182       char* isa = strstr(cpuinfo_line, "rv64");
183       if (isa) {
184         size_t isa_len = strlen(isa);
185         char* extensions;
186         size_t extensions_len = 0;
187         size_t std_isa_len;
188         // Remove the new-line character at the end of string
189         if (isa[isa_len - 1] == '\n') {
190           isa[--isa_len] = '\0';
191         }
192         // 5 ISA characters
193         if (isa_len < 5) {
194           fclose(f);
195           return 0;
196         }
197         // Skip {i,e,g} canonical checking.
198         // Skip rvxxx
199         isa += 5;
200         // Find the very first occurrence of 's', 'x' or 'z'.
201         // To detect multi-letter standard, non-standard, and
202         // supervisor-level extensions.
203         extensions = strpbrk(isa, "zxs");
204         if (extensions) {
205           // Multi-letter extensions are seperated by a single underscore
206           // as described in RISC-V User-Level ISA V2.2.
207           char* ext = strtok(extensions, "_");
208           extensions_len = strlen(extensions);
209           while (ext) {
210             // Search for the ZVFH (Vector FP16) extension.
211             if (!strcmp(ext, "zvfh")) {
212               flag |= kCpuHasRVVZVFH;
213             }
214             ext = strtok(NULL, "_");
215           }
216         }
217         std_isa_len = isa_len - extensions_len - 5;
218         // Detect the v in the standard single-letter extensions.
219         if (memchr(isa, 'v', std_isa_len)) {
220           // The RVV implied the F extension.
221           flag |= kCpuHasRVV;
222         }
223       }
224     }
225 #if defined(__riscv_vector)
226     // Assume RVV if /proc/cpuinfo is from x86 host running QEMU.
227     else if ((memcmp(cpuinfo_line, "vendor_id\t: GenuineIntel", 24) == 0) ||
228              (memcmp(cpuinfo_line, "vendor_id\t: AuthenticAMD", 24) == 0)) {
229       fclose(f);
230       return kCpuHasRVV;
231     }
232 #endif
233   }
234   fclose(f);
235   return flag;
236 }
237 
MipsCpuCaps(const char * cpuinfo_name)238 LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) {
239   char cpuinfo_line[512];
240   int flag = 0;
241   FILE* f = fopen(cpuinfo_name, "re");
242   if (!f) {
243     // Assume nothing if /proc/cpuinfo is unavailable.
244     // This will occur for Chrome sandbox for Pepper or Render process.
245     return 0;
246   }
247   memset(cpuinfo_line, 0, sizeof(cpuinfo_line));
248   while (fgets(cpuinfo_line, sizeof(cpuinfo_line), f)) {
249     if (memcmp(cpuinfo_line, "cpu model", 9) == 0) {
250       // Workaround early kernel without MSA in ASEs line.
251       if (strstr(cpuinfo_line, "Loongson-2K")) {
252         flag |= kCpuHasMSA;
253       }
254     }
255     if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) {
256       if (strstr(cpuinfo_line, "msa")) {
257         flag |= kCpuHasMSA;
258       }
259       // ASEs is the last line, so we can break here.
260       break;
261     }
262   }
263   fclose(f);
264   return flag;
265 }
266 
267 #define LOONGARCH_CFG2 0x2
268 #define LOONGARCH_CFG2_LSX (1 << 6)
269 #define LOONGARCH_CFG2_LASX (1 << 7)
270 
271 #if defined(__loongarch__)
LoongarchCpuCaps(void)272 LIBYUV_API SAFEBUFFERS int LoongarchCpuCaps(void) {
273   int flag = 0;
274   uint32_t cfg2 = 0;
275 
276   __asm__ volatile("cpucfg %0, %1 \n\t" : "+&r"(cfg2) : "r"(LOONGARCH_CFG2));
277 
278   if (cfg2 & LOONGARCH_CFG2_LSX)
279     flag |= kCpuHasLSX;
280 
281   if (cfg2 & LOONGARCH_CFG2_LASX)
282     flag |= kCpuHasLASX;
283   return flag;
284 }
285 #endif
286 
GetCpuFlags(void)287 static SAFEBUFFERS int GetCpuFlags(void) {
288   int cpu_info = 0;
289 #if !defined(__pnacl__) && !defined(__CLR_VER) &&                   \
290     (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \
291      defined(_M_IX86))
292   int cpu_info0[4] = {0, 0, 0, 0};
293   int cpu_info1[4] = {0, 0, 0, 0};
294   int cpu_info7[4] = {0, 0, 0, 0};
295   int cpu_einfo7[4] = {0, 0, 0, 0};
296   CpuId(0, 0, cpu_info0);
297   CpuId(1, 0, cpu_info1);
298   if (cpu_info0[0] >= 7) {
299     CpuId(7, 0, cpu_info7);
300     CpuId(7, 1, cpu_einfo7);
301   }
302   cpu_info = kCpuHasX86 | ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) |
303              ((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) |
304              ((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) |
305              ((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) |
306              ((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0);
307 
308   // AVX requires OS saves YMM registers.
309   if (((cpu_info1[2] & 0x1c000000) == 0x1c000000) &&  // AVX and OSXSave
310       ((GetXCR0() & 6) == 6)) {  // Test OS saves YMM registers
311     cpu_info |= kCpuHasAVX | ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) |
312                 ((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) |
313                 ((cpu_info1[2] & 0x20000000) ? kCpuHasF16C : 0) |
314                 ((cpu_einfo7[0] & 0x00000010) ? kCpuHasAVXVNNI : 0) |
315                 ((cpu_einfo7[3] & 0x00000010) ? kCpuHasAVXVNNIINT8 : 0);
316 
317     // Detect AVX512bw
318     if ((GetXCR0() & 0xe0) == 0xe0) {
319       cpu_info |= (cpu_info7[1] & 0x40000000) ? kCpuHasAVX512BW : 0;
320       cpu_info |= (cpu_info7[1] & 0x80000000) ? kCpuHasAVX512VL : 0;
321       cpu_info |= (cpu_info7[2] & 0x00000002) ? kCpuHasAVX512VBMI : 0;
322       cpu_info |= (cpu_info7[2] & 0x00000040) ? kCpuHasAVX512VBMI2 : 0;
323       cpu_info |= (cpu_info7[2] & 0x00000800) ? kCpuHasAVX512VNNI : 0;
324       cpu_info |= (cpu_info7[2] & 0x00001000) ? kCpuHasAVX512VBITALG : 0;
325       cpu_info |= (cpu_einfo7[3] & 0x00080000) ? kCpuHasAVX10 : 0;
326     }
327   }
328 #endif
329 #if defined(__mips__) && defined(__linux__)
330   cpu_info = MipsCpuCaps("/proc/cpuinfo");
331   cpu_info |= kCpuHasMIPS;
332 #endif
333 #if defined(__loongarch__) && defined(__linux__)
334   cpu_info = LoongarchCpuCaps();
335   cpu_info |= kCpuHasLOONGARCH;
336 #endif
337 #if defined(__arm__) || defined(__aarch64__)
338 // gcc -mfpu=neon defines __ARM_NEON__
339 // __ARM_NEON__ generates code that requires Neon.  NaCL also requires Neon.
340 // For Linux, /proc/cpuinfo can be tested but without that assume Neon.
341 #if defined(__ARM_NEON__) || defined(__native_client__) || !defined(__linux__)
342   cpu_info = kCpuHasNEON;
343 // For aarch64(arm64), /proc/cpuinfo's feature is not complete, e.g. no neon
344 // flag in it.
345 // So for aarch64, neon enabling is hard coded here.
346 #endif
347 #if defined(__aarch64__)
348   cpu_info = kCpuHasNEON;
349 #else
350   // Linux arm parse text file for neon detect.
351   cpu_info = ArmCpuCaps("/proc/cpuinfo");
352 #endif
353   cpu_info |= kCpuHasARM;
354 #endif  // __arm__
355 #if defined(__riscv) && defined(__linux__)
356   cpu_info = RiscvCpuCaps("/proc/cpuinfo");
357   cpu_info |= kCpuHasRISCV;
358 #endif  // __riscv
359   cpu_info |= kCpuInitialized;
360   return cpu_info;
361 }
362 
363 // Note that use of this function is not thread safe.
364 LIBYUV_API
MaskCpuFlags(int enable_flags)365 int MaskCpuFlags(int enable_flags) {
366   int cpu_info = GetCpuFlags() & enable_flags;
367   SetCpuFlags(cpu_info);
368   return cpu_info;
369 }
370 
371 LIBYUV_API
InitCpuFlags(void)372 int InitCpuFlags(void) {
373   return MaskCpuFlags(-1);
374 }
375 
376 #ifdef __cplusplus
377 }  // extern "C"
378 }  // namespace libyuv
379 #endif
380