/external/zlib/ |
D | crc32_simd.c | 60 x5 = _mm_clmulepi64_si128(x1, x0, 0x00); in crc32_sse42_simd_() 61 x6 = _mm_clmulepi64_si128(x2, x0, 0x00); in crc32_sse42_simd_() 62 x7 = _mm_clmulepi64_si128(x3, x0, 0x00); in crc32_sse42_simd_() 63 x8 = _mm_clmulepi64_si128(x4, x0, 0x00); in crc32_sse42_simd_() 65 x1 = _mm_clmulepi64_si128(x1, x0, 0x11); in crc32_sse42_simd_() 66 x2 = _mm_clmulepi64_si128(x2, x0, 0x11); in crc32_sse42_simd_() 67 x3 = _mm_clmulepi64_si128(x3, x0, 0x11); in crc32_sse42_simd_() 68 x4 = _mm_clmulepi64_si128(x4, x0, 0x11); in crc32_sse42_simd_() 94 x5 = _mm_clmulepi64_si128(x1, x0, 0x00); in crc32_sse42_simd_() 95 x1 = _mm_clmulepi64_si128(x1, x0, 0x11); in crc32_sse42_simd_() [all …]
|
D | crc_folding.c | 70 *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); in fold_1() 71 *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); in fold_1() 97 *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); in fold_2() 98 *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); in fold_2() 104 *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); in fold_2() 105 *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); in fold_2() 130 *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); in fold_3() 131 *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); in fold_3() 137 *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); in fold_3() 138 *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); in fold_3() [all …]
|
/external/v8/third_party/zlib/ |
D | crc32_simd.c | 60 x5 = _mm_clmulepi64_si128(x1, x0, 0x00); in crc32_sse42_simd_() 61 x6 = _mm_clmulepi64_si128(x2, x0, 0x00); in crc32_sse42_simd_() 62 x7 = _mm_clmulepi64_si128(x3, x0, 0x00); in crc32_sse42_simd_() 63 x8 = _mm_clmulepi64_si128(x4, x0, 0x00); in crc32_sse42_simd_() 65 x1 = _mm_clmulepi64_si128(x1, x0, 0x11); in crc32_sse42_simd_() 66 x2 = _mm_clmulepi64_si128(x2, x0, 0x11); in crc32_sse42_simd_() 67 x3 = _mm_clmulepi64_si128(x3, x0, 0x11); in crc32_sse42_simd_() 68 x4 = _mm_clmulepi64_si128(x4, x0, 0x11); in crc32_sse42_simd_() 94 x5 = _mm_clmulepi64_si128(x1, x0, 0x00); in crc32_sse42_simd_() 95 x1 = _mm_clmulepi64_si128(x1, x0, 0x11); in crc32_sse42_simd_() [all …]
|
D | crc_folding.c | 72 *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); in fold_1() 73 *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); in fold_1() 99 *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); in fold_2() 100 *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); in fold_2() 106 *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); in fold_2() 107 *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); in fold_2() 132 *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); in fold_3() 133 *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); in fold_3() 139 *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); in fold_3() 140 *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); in fold_3() [all …]
|
/external/zlib/patches/ |
D | 0001-simd.patch | 118 + *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); 119 + *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); 145 + *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); 146 + *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); 152 + *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); 153 + *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); 178 + *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); 179 + *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); 185 + *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); 186 + *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); [all …]
|
/external/v8/third_party/zlib/patches/ |
D | 0001-simd.patch | 118 + *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); 119 + *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); 145 + *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); 146 + *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); 152 + *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); 153 + *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); 178 + *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); 179 + *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); 185 + *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); 186 + *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); [all …]
|
/external/clang/test/CodeGen/ |
D | pclmul-builtins.c | 10 return _mm_clmulepi64_si128(a, b, 0); in test_mm_clmulepi64_si128()
|
/external/clang/lib/Headers/ |
D | __wmmintrin_pclmul.h | 54 #define _mm_clmulepi64_si128(__X, __Y, __I) \ macro
|