Home
last modified time | relevance | path

Searched refs:__C (Results 1 – 25 of 32) sorted by relevance

12

/external/llvm-project/clang/lib/Headers/ppc_wrappers/
Dtmmintrin.h140 __v2du __C = { __B, __A }; in _mm_alignr_pi8() local
143 __C = (__v2du) vec_sro ((__v16qu) __C, (__v16qu) __shift); in _mm_alignr_pi8()
146 __C = (__v2du) vec_slo ((__v16qu) __C, (__v16qu) __shift); in _mm_alignr_pi8()
148 return (__m64) __C[0]; in _mm_alignr_pi8()
165 __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P); in _mm_hadd_epi16() local
167 return (__m128i) vec_add (__C, __D); in _mm_hadd_epi16()
178 __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P); in _mm_hadd_epi32() local
180 return (__m128i) vec_add (__C, __D); in _mm_hadd_epi32()
187 __v8hi __C = (__v8hi) (__v2du) { __A, __B }; in _mm_hadd_pi16()
192 __v8hi __D = vec_perm (__C, __C, __Q); in _mm_hadd_pi16()
[all …]
/external/clang/lib/Headers/
Dfmaintrin.h35 _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_fmadd_ps() argument
37 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ps()
41 _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_fmadd_pd() argument
43 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_pd()
47 _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) in _mm_fmadd_ss() argument
49 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ss()
53 _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) in _mm_fmadd_sd() argument
55 return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_sd()
59 _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_fmsub_ps() argument
61 return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmsub_ps()
[all …]
Dfma4intrin.h37 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_macc_ps() argument
39 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ps()
43 _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_macc_pd() argument
45 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_pd()
49 _mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) in _mm_macc_ss() argument
51 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ss()
55 _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) in _mm_macc_sd() argument
57 return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_sd()
61 _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_msub_ps() argument
63 return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_msub_ps()
[all …]
Dxopintrin.h37 _mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_maccs_epi16() argument
39 return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); in _mm_maccs_epi16()
43 _mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_macc_epi16() argument
45 return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); in _mm_macc_epi16()
49 _mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_maccsd_epi16() argument
51 return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); in _mm_maccsd_epi16()
55 _mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_maccd_epi16() argument
57 return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); in _mm_maccd_epi16()
61 _mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C) in _mm_maccs_epi32() argument
63 return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C); in _mm_maccs_epi32()
[all …]
Davx512vlintrin.h1241 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) in _mm_mask_fmadd_pd() argument
1245 (__v2df) __C, in _mm_mask_fmadd_pd()
1250 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) in _mm_mask3_fmadd_pd() argument
1254 (__v2df) __C, in _mm_mask3_fmadd_pd()
1259 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) in _mm_maskz_fmadd_pd() argument
1263 (__v2df) __C, in _mm_maskz_fmadd_pd()
1268 _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) in _mm_mask_fmsub_pd() argument
1272 -(__v2df) __C, in _mm_mask_fmsub_pd()
1277 _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) in _mm_maskz_fmsub_pd() argument
1281 -(__v2df) __C, in _mm_maskz_fmsub_pd()
[all …]
Davx512fintrin.h345 _mm512_set4_epi32 (int __A, int __B, int __C, int __D) in _mm512_set4_epi32() argument
348 { __D, __C, __B, __A, __D, __C, __B, __A, in _mm512_set4_epi32()
349 __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_epi32()
353 _mm512_set4_epi64 (long long __A, long long __B, long long __C, in _mm512_set4_epi64() argument
357 { __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_epi64()
361 _mm512_set4_pd (double __A, double __B, double __C, double __D) in _mm512_set4_pd() argument
364 { __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_pd()
368 _mm512_set4_ps (float __A, float __B, float __C, float __D) in _mm512_set4_ps() argument
371 { __D, __C, __B, __A, __D, __C, __B, __A, in _mm512_set4_ps()
372 __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_ps()
[all …]
Dsmmintrin.h476 _mm_crc32_u8(unsigned int __C, unsigned char __D) in _mm_crc32_u8() argument
478 return __builtin_ia32_crc32qi(__C, __D); in _mm_crc32_u8()
482 _mm_crc32_u16(unsigned int __C, unsigned short __D) in _mm_crc32_u16() argument
484 return __builtin_ia32_crc32hi(__C, __D); in _mm_crc32_u16()
488 _mm_crc32_u32(unsigned int __C, unsigned int __D) in _mm_crc32_u32() argument
490 return __builtin_ia32_crc32si(__C, __D); in _mm_crc32_u32()
495 _mm_crc32_u64(unsigned long long __C, unsigned long long __D) in _mm_crc32_u64() argument
497 return __builtin_ia32_crc32di(__C, __D); in _mm_crc32_u64()
/external/llvm-project/clang/lib/Headers/
Dfmaintrin.h22 _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_fmadd_ps() argument
24 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ps()
28 _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_fmadd_pd() argument
30 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_pd()
34 _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) in _mm_fmadd_ss() argument
36 return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ss()
40 _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) in _mm_fmadd_sd() argument
42 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_sd()
46 _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_fmsub_ps() argument
48 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_fmsub_ps()
[all …]
Dfma4intrin.h24 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_macc_ps() argument
26 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ps()
30 _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_macc_pd() argument
32 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_pd()
36 _mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) in _mm_macc_ss() argument
38 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ss()
42 _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) in _mm_macc_sd() argument
44 return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_sd()
48 _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_msub_ps() argument
50 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_msub_ps()
[all …]
Dia32intrin.h286 __crc32b(unsigned int __C, unsigned char __D) in __crc32b() argument
288 return __builtin_ia32_crc32qi(__C, __D); in __crc32b()
307 __crc32w(unsigned int __C, unsigned short __D) in __crc32w() argument
309 return __builtin_ia32_crc32hi(__C, __D); in __crc32w()
328 __crc32d(unsigned int __C, unsigned int __D) in __crc32d() argument
330 return __builtin_ia32_crc32si(__C, __D); in __crc32d()
350 __crc32q(unsigned long long __C, unsigned long long __D) in __crc32q() argument
352 return __builtin_ia32_crc32di(__C, __D); in __crc32q()
377 __rolb(unsigned char __X, int __C) { in __rolb() argument
378 return __builtin_rotateleft8(__X, __C); in __rolb()
[all …]
Davx512vlvbmi2intrin.h410 _mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C) in _mm256_shldv_epi64() argument
413 (__v4di)__C); in _mm256_shldv_epi64()
417 _mm256_mask_shldv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) in _mm256_mask_shldv_epi64() argument
420 (__v4di)_mm256_shldv_epi64(__A, __B, __C), in _mm256_mask_shldv_epi64()
425 _mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) in _mm256_maskz_shldv_epi64() argument
428 (__v4di)_mm256_shldv_epi64(__A, __B, __C), in _mm256_maskz_shldv_epi64()
433 _mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C) in _mm_shldv_epi64() argument
436 (__v2di)__C); in _mm_shldv_epi64()
440 _mm_mask_shldv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) in _mm_mask_shldv_epi64() argument
443 (__v2di)_mm_shldv_epi64(__A, __B, __C), in _mm_mask_shldv_epi64()
[all …]
Davx512vbmi2intrin.h216 _mm512_shldv_epi64(__m512i __A, __m512i __B, __m512i __C) in _mm512_shldv_epi64() argument
219 (__v8di)__C); in _mm512_shldv_epi64()
223 _mm512_mask_shldv_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C) in _mm512_mask_shldv_epi64() argument
226 (__v8di)_mm512_shldv_epi64(__A, __B, __C), in _mm512_mask_shldv_epi64()
231 _mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C) in _mm512_maskz_shldv_epi64() argument
234 (__v8di)_mm512_shldv_epi64(__A, __B, __C), in _mm512_maskz_shldv_epi64()
239 _mm512_shldv_epi32(__m512i __A, __m512i __B, __m512i __C) in _mm512_shldv_epi32() argument
242 (__v16si)__C); in _mm512_shldv_epi32()
246 _mm512_mask_shldv_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) in _mm512_mask_shldv_epi32() argument
249 (__v16si)_mm512_shldv_epi32(__A, __B, __C), in _mm512_mask_shldv_epi32()
[all …]
Dxopintrin.h24 _mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_maccs_epi16() argument
26 return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); in _mm_maccs_epi16()
30 _mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_macc_epi16() argument
32 return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); in _mm_macc_epi16()
36 _mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_maccsd_epi16() argument
38 return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); in _mm_maccsd_epi16()
42 _mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C) in _mm_maccd_epi16() argument
44 return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); in _mm_maccd_epi16()
48 _mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C) in _mm_maccs_epi32() argument
50 return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C); in _mm_maccs_epi32()
[all …]
Davx512vlintrin.h894 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) in _mm_mask_fmadd_pd() argument
899 (__v2df) __C), in _mm_mask_fmadd_pd()
904 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) in _mm_mask3_fmadd_pd() argument
909 (__v2df) __C), in _mm_mask3_fmadd_pd()
910 (__v2df) __C); in _mm_mask3_fmadd_pd()
914 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) in _mm_maskz_fmadd_pd() argument
919 (__v2df) __C), in _mm_maskz_fmadd_pd()
924 _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) in _mm_mask_fmsub_pd() argument
929 -(__v2df) __C), in _mm_mask_fmsub_pd()
934 _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) in _mm_maskz_fmsub_pd() argument
[all …]
Davx512fintrin.h342 _mm512_set4_epi32 (int __A, int __B, int __C, int __D) in _mm512_set4_epi32() argument
345 { __D, __C, __B, __A, __D, __C, __B, __A, in _mm512_set4_epi32()
346 __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_epi32()
350 _mm512_set4_epi64 (long long __A, long long __B, long long __C, in _mm512_set4_epi64() argument
354 { __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_epi64()
358 _mm512_set4_pd (double __A, double __B, double __C, double __D) in _mm512_set4_pd() argument
361 { __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_pd()
365 _mm512_set4_ps (float __A, float __B, float __C, float __D) in _mm512_set4_ps() argument
368 { __D, __C, __B, __A, __D, __C, __B, __A, in _mm512_set4_ps()
369 __D, __C, __B, __A, __D, __C, __B, __A }; in _mm512_set4_ps()
[all …]
Dsmmintrin.h2359 _mm_crc32_u8(unsigned int __C, unsigned char __D) in _mm_crc32_u8() argument
2361 return __builtin_ia32_crc32qi(__C, __D); in _mm_crc32_u8()
2379 _mm_crc32_u16(unsigned int __C, unsigned short __D) in _mm_crc32_u16() argument
2381 return __builtin_ia32_crc32hi(__C, __D); in _mm_crc32_u16()
2399 _mm_crc32_u32(unsigned int __C, unsigned int __D) in _mm_crc32_u32() argument
2401 return __builtin_ia32_crc32si(__C, __D); in _mm_crc32_u32()
2420 _mm_crc32_u64(unsigned long long __C, unsigned long long __D) in _mm_crc32_u64() argument
2422 return __builtin_ia32_crc32di(__C, __D); in _mm_crc32_u64()
/external/clang/test/CodeGen/
Davx512f-builtins.c437 __m512d test_mm512_fmadd_round_pd(__m512d __A, __m512d __B, __m512d __C) { in test_mm512_fmadd_round_pd() argument
440 return _mm512_fmadd_round_pd(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT); in test_mm512_fmadd_round_pd()
443 __m512d test_mm512_mask_fmadd_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { in test_mm512_mask_fmadd_round_pd() argument
446 return _mm512_mask_fmadd_round_pd(__A, __U, __B, __C, _MM_FROUND_TO_NEAREST_INT); in test_mm512_mask_fmadd_round_pd()
448 __m512d test_mm512_mask3_fmadd_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { in test_mm512_mask3_fmadd_round_pd() argument
451 return _mm512_mask3_fmadd_round_pd(__A, __B, __C, __U, _MM_FROUND_TO_NEAREST_INT); in test_mm512_mask3_fmadd_round_pd()
453 __m512d test_mm512_maskz_fmadd_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { in test_mm512_maskz_fmadd_round_pd() argument
456 return _mm512_maskz_fmadd_round_pd(__U, __A, __B, __C, _MM_FROUND_TO_NEAREST_INT); in test_mm512_maskz_fmadd_round_pd()
458 __m512d test_mm512_fmsub_round_pd(__m512d __A, __m512d __B, __m512d __C) { in test_mm512_fmsub_round_pd() argument
461 return _mm512_fmsub_round_pd(__A, __B, __C, _MM_FROUND_TO_NEAREST_INT); in test_mm512_fmsub_round_pd()
[all …]
Davx512vl-builtins.c1072 __m128d test_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { in test_mm_mask_fmadd_pd() argument
1075 return _mm_mask_fmadd_pd(__A, __U, __B, __C); in test_mm_mask_fmadd_pd()
1078 __m128d test_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { in test_mm_mask_fmsub_pd() argument
1081 return _mm_mask_fmsub_pd(__A, __U, __B, __C); in test_mm_mask_fmsub_pd()
1084 __m128d test_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { in test_mm_mask3_fmadd_pd() argument
1087 return _mm_mask3_fmadd_pd(__A, __B, __C, __U); in test_mm_mask3_fmadd_pd()
1090 __m128d test_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { in test_mm_mask3_fnmadd_pd() argument
1093 return _mm_mask3_fnmadd_pd(__A, __B, __C, __U); in test_mm_mask3_fnmadd_pd()
1096 __m128d test_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { in test_mm_maskz_fmadd_pd() argument
1099 return _mm_maskz_fmadd_pd(__U, __A, __B, __C); in test_mm_maskz_fmadd_pd()
[all …]
/external/llvm-project/clang/test/CodeGen/X86/
Davx512f-builtins.c497 __m512d test_mm512_fmadd_round_pd(__m512d __A, __m512d __B, __m512d __C) { in test_mm512_fmadd_round_pd() argument
500 return _mm512_fmadd_round_pd(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_fmadd_round_pd()
503 __m512d test_mm512_mask_fmadd_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) { in test_mm512_mask_fmadd_round_pd() argument
508 return _mm512_mask_fmadd_round_pd(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_mask_fmadd_round_pd()
510 __m512d test_mm512_mask3_fmadd_round_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) { in test_mm512_mask3_fmadd_round_pd() argument
515 return _mm512_mask3_fmadd_round_pd(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_mask3_fmadd_round_pd()
517 __m512d test_mm512_maskz_fmadd_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) { in test_mm512_maskz_fmadd_round_pd() argument
522 return _mm512_maskz_fmadd_round_pd(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_maskz_fmadd_round_pd()
524 __m512d test_mm512_fmsub_round_pd(__m512d __A, __m512d __B, __m512d __C) { in test_mm512_fmsub_round_pd() argument
528 return _mm512_fmsub_round_pd(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_fmsub_round_pd()
[all …]
Davx512bw-builtins.c23 __mmask32 test_kand_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i… in test_kand_mask32() argument
30 _mm512_cmpneq_epu16_mask(__C, __D)), in test_kand_mask32()
34 __mmask64 test_kand_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i… in test_kand_mask64() argument
41 _mm512_cmpneq_epu8_mask(__C, __D)), in test_kand_mask64()
45 __mmask32 test_kandn_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512… in test_kandn_mask32() argument
53 _mm512_cmpneq_epu16_mask(__C, __D)), in test_kandn_mask32()
57 __mmask64 test_kandn_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512… in test_kandn_mask64() argument
65 _mm512_cmpneq_epu8_mask(__C, __D)), in test_kandn_mask64()
69 __mmask32 test_kor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i … in test_kor_mask32() argument
76 _mm512_cmpneq_epu16_mask(__C, __D)), in test_kor_mask32()
[all …]
Davx512dq-builtins.c14 __mmask8 test_kand_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i _… in test_kand_mask8() argument
21 _mm512_cmpneq_epu64_mask(__C, __D)), in test_kand_mask8()
25 __mmask8 test_kandn_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i … in test_kandn_mask8() argument
33 _mm512_cmpneq_epu64_mask(__C, __D)), in test_kandn_mask8()
37 __mmask8 test_kor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __… in test_kor_mask8() argument
44 _mm512_cmpneq_epu64_mask(__C, __D)), in test_kor_mask8()
48 __mmask8 test_kxnor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i … in test_kxnor_mask8() argument
56 _mm512_cmpneq_epu64_mask(__C, __D)), in test_kxnor_mask8()
60 __mmask8 test_kxor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i _… in test_kxor_mask8() argument
67 _mm512_cmpneq_epu64_mask(__C, __D)), in test_kxor_mask8()
[all …]
Davx512vl-builtins.c2837 __m128d test_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { in test_mm_mask_fmadd_pd() argument
2843 return _mm_mask_fmadd_pd(__A, __U, __B, __C); in test_mm_mask_fmadd_pd()
2846 __m128d test_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { in test_mm_mask_fmsub_pd() argument
2853 return _mm_mask_fmsub_pd(__A, __U, __B, __C); in test_mm_mask_fmsub_pd()
2856 __m128d test_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { in test_mm_mask3_fmadd_pd() argument
2862 return _mm_mask3_fmadd_pd(__A, __B, __C, __U); in test_mm_mask3_fmadd_pd()
2865 __m128d test_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { in test_mm_mask3_fnmadd_pd() argument
2872 return _mm_mask3_fnmadd_pd(__A, __B, __C, __U); in test_mm_mask3_fnmadd_pd()
2875 __m128d test_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { in test_mm_maskz_fmadd_pd() argument
2881 return _mm_maskz_fmadd_pd(__U, __A, __B, __C); in test_mm_maskz_fmadd_pd()
[all …]
/external/llvm-project/llvm/test/CodeGen/X86/
Davx512vl-intrinsics-canonical.ll7 … @test_mm_mask_fmadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
23 … tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
30 … @test_mm_mask_fmsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
46 %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
54 define <2 x double> @test_mm_mask3_fmadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C
72 … tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
75 %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
79 …test_mm_mask3_fnmadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
98 …ail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %__B, <2 x double> %__C) #9
101 %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
[all …]
Davx512-intrinsics-canonical.ll7 … <8 x double> @test_mm512_fmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
13 …ble> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8)
19 …512_mask_fmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
33 …ble> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8)
39 …12_mask3_fmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
55 …ble> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8)
57 %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
61 …12_maskz_fmadd_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
75 …ble> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8)
81 … <8 x double> @test_mm512_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
[all …]
Davx512vl-intrinsics-fast-isel.ll3457 define <2 x i64> @test_mm_ternarylogic_epi32(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> %__C) {
3465 %2 = bitcast <2 x i64> %__C to <4 x i32>
3473 …@test_mm_mask_ternarylogic_epi32(<2 x i64> %__A, i8 zeroext %__U, <2 x i64> %__B, <2 x i64> %__C) {
3489 %2 = bitcast <2 x i64> %__C to <4 x i32>
3498 …test_mm_maskz_ternarylogic_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B, <2 x i64> %__C) {
3514 %2 = bitcast <2 x i64> %__C to <4 x i32>
3523 define <4 x i64> @test_mm256_ternarylogic_epi32(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> %__C) {
3531 %2 = bitcast <4 x i64> %__C to <8 x i32>
3539 …st_mm256_mask_ternarylogic_epi32(<4 x i64> %__A, i8 zeroext %__U, <4 x i64> %__B, <4 x i64> %__C) {
3555 %2 = bitcast <4 x i64> %__C to <8 x i32>
[all …]

12