1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9
10 #include "base/cpu.h"
11
12 #include "base/containers/contains.h"
13 #include "base/logging.h"
14 #include "base/memory/protected_memory_buildflags.h"
15 #include "base/strings/string_util.h"
16 #include "base/test/gtest_util.h"
17 #include "build/build_config.h"
18 #include "testing/gtest/include/gtest/gtest.h"
19
20 // Tests whether we can run extended instructions represented by the CPU
21 // information. This test actually executes some extended instructions (such as
22 // MMX, SSE, etc.) supported by the CPU and sees we can run them without
23 // "undefined instruction" exceptions. That is, this test succeeds when this
24 // test finishes without a crash.
TEST(CPU,RunExtendedInstructions)25 TEST(CPU, RunExtendedInstructions) {
26 // Retrieve the CPU information.
27 base::CPU cpu;
28 #if defined(ARCH_CPU_X86_FAMILY)
29 ASSERT_TRUE(cpu.has_mmx());
30 ASSERT_TRUE(cpu.has_sse());
31 ASSERT_TRUE(cpu.has_sse2());
32 ASSERT_TRUE(cpu.has_sse3());
33
34 // Execute an MMX instruction.
35 __asm__ __volatile__("emms\n" : : : "mm0");
36
37 // Execute an SSE instruction.
38 __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
39
40 // Execute an SSE 2 instruction.
41 __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
42
43 // Execute an SSE 3 instruction.
44 __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
45
46 if (cpu.has_ssse3()) {
47 // Execute a Supplimental SSE 3 instruction.
48 __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
49 }
50
51 if (cpu.has_sse41()) {
52 // Execute an SSE 4.1 instruction.
53 __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
54 }
55
56 if (cpu.has_sse42()) {
57 // Execute an SSE 4.2 instruction.
58 __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
59 }
60
61 if (cpu.has_popcnt()) {
62 // Execute a POPCNT instruction.
63 __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
64 }
65
66 if (cpu.has_avx()) {
67 // Execute an AVX instruction.
68 __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
69 }
70
71 if (cpu.has_fma3()) {
72 // Execute a FMA3 instruction.
73 __asm__ __volatile__("vfmadd132ps %%xmm0, %%xmm0, %%xmm0\n" : : : "xmm0");
74 }
75
76 if (cpu.has_avx2()) {
77 // Execute an AVX 2 instruction.
78 __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
79 }
80
81 if (cpu.has_avx_vnni()) {
82 // Execute an AVX VNNI instruction. {vex} prevents EVEX encoding, which
83 // would shift it to AVX512 VNNI.
84 __asm__ __volatile__("%{vex%} vpdpbusd %%ymm0, %%ymm0, %%ymm0\n"
85 :
86 :
87 : "ymm0");
88 }
89
90 if (cpu.has_avx512_f()) {
91 // Execute an AVX-512 Foundation (F) instruction.
92 __asm__ __volatile__("vpxorq %%zmm0, %%zmm0, %%zmm0\n" : : : "zmm0");
93 }
94
95 if (cpu.has_avx512_bw()) {
96 // Execute an AVX-512 Byte & Word (BW) instruction.
97 __asm__ __volatile__("vpabsw %%zmm0, %%zmm0\n" : : : "zmm0");
98 }
99
100 if (cpu.has_avx512_vnni()) {
101 // Execute an AVX-512 VNNI instruction.
102 __asm__ __volatile__("vpdpbusd %%zmm0, %%zmm0, %%zmm0\n" : : : "zmm0");
103 }
104
105 if (cpu.has_pku()) {
106 // rdpkru
107 uint32_t pkru;
108 __asm__ __volatile__(".byte 0x0f,0x01,0xee\n"
109 : "=a"(pkru)
110 : "c"(0), "d"(0));
111 }
112 #endif // defined(ARCH_CPU_X86_FAMILY)
113
114 #if defined(ARCH_CPU_ARM64)
115 // Check that the CPU is correctly reporting support for the Armv8.5-A memory
116 // tagging extension. The new MTE instructions aren't encoded in NOP space
117 // like BTI/Pointer Authentication and will crash older cores with a SIGILL if
118 // used incorrectly. This test demonstrates how it should be done and that
119 // this approach works.
120 if (cpu.has_mte()) {
121 #if !defined(__ARM_FEATURE_MEMORY_TAGGING)
122 // In this section, we're running on an MTE-compatible core, but we're
123 // building this file without MTE support. Fail this test to indicate that
124 // there's a problem with the base/ build configuration.
125 GTEST_FAIL()
126 << "MTE support detected (but base/ built without MTE support)";
127 #else
128 char ptr[32];
129 uint64_t val;
130 // Execute a trivial MTE instruction. Normally, MTE should be used via the
131 // intrinsics documented at
132 // https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics,
133 // this test uses the irg (Insert Random Tag) instruction directly to make
134 // sure that it's not optimized out by the compiler.
135 __asm__ __volatile__("irg %0, %1" : "=r"(val) : "r"(ptr));
136 #endif // __ARM_FEATURE_MEMORY_TAGGING
137 }
138 #endif // ARCH_CPU_ARM64
139 }
140
141 // For https://crbug.com/249713
TEST(CPU,BrandAndVendorContainsNoNUL)142 TEST(CPU, BrandAndVendorContainsNoNUL) {
143 base::CPU cpu;
144 EXPECT_FALSE(base::Contains(cpu.cpu_brand(), '\0'));
145 EXPECT_FALSE(base::Contains(cpu.vendor_name(), '\0'));
146 }
147
148 #if defined(ARCH_CPU_X86_FAMILY)
149 // Tests that we compute the correct CPU family and model based on the vendor
150 // and CPUID signature.
TEST(CPU,X86FamilyAndModel)151 TEST(CPU, X86FamilyAndModel) {
152 base::internal::X86ModelInfo info;
153
154 // Check with an Intel Skylake signature.
155 info = base::internal::ComputeX86FamilyAndModel("GenuineIntel", 0x000406e3);
156 EXPECT_EQ(info.family, 6);
157 EXPECT_EQ(info.model, 78);
158 EXPECT_EQ(info.ext_family, 0);
159 EXPECT_EQ(info.ext_model, 4);
160
161 // Check with an Intel Airmont signature.
162 info = base::internal::ComputeX86FamilyAndModel("GenuineIntel", 0x000406c2);
163 EXPECT_EQ(info.family, 6);
164 EXPECT_EQ(info.model, 76);
165 EXPECT_EQ(info.ext_family, 0);
166 EXPECT_EQ(info.ext_model, 4);
167
168 // Check with an Intel Prescott signature.
169 info = base::internal::ComputeX86FamilyAndModel("GenuineIntel", 0x00000f31);
170 EXPECT_EQ(info.family, 15);
171 EXPECT_EQ(info.model, 3);
172 EXPECT_EQ(info.ext_family, 0);
173 EXPECT_EQ(info.ext_model, 0);
174
175 // Check with an AMD Excavator signature.
176 info = base::internal::ComputeX86FamilyAndModel("AuthenticAMD", 0x00670f00);
177 EXPECT_EQ(info.family, 21);
178 EXPECT_EQ(info.model, 112);
179 EXPECT_EQ(info.ext_family, 6);
180 EXPECT_EQ(info.ext_model, 7);
181 }
182 #endif // defined(ARCH_CPU_X86_FAMILY)
183
184 #if BUILDFLAG(PROTECTED_MEMORY_ENABLED)
TEST(CPUDeathTest,VerifyModifyingCPUInstanceNoAllocationCrashes)185 TEST(CPUDeathTest, VerifyModifyingCPUInstanceNoAllocationCrashes) {
186 const base::CPU& cpu = base::CPU::GetInstanceNoAllocation();
187 uint8_t* const bytes =
188 const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(&cpu));
189
190 // We try and flip a couple of bits and expect the test to die immediately.
191 // Checks are limited to every 15th byte, otherwise the tests run into
192 // time-outs.
193 for (size_t byte_index = 0; byte_index < sizeof(cpu); byte_index += 15) {
194 const size_t local_bit_index = byte_index % 8;
195 EXPECT_CHECK_DEATH_WITH(bytes[byte_index] ^= (0x01 << local_bit_index), "");
196 }
197 }
198 #endif
199