Home
last modified time | relevance | path

Searched refs:__v4sf (Results 1 – 17 of 17) sorted by relevance

/external/clang/lib/Headers/
Dxmmintrin.h30 typedef float __v4sf __attribute__((__vector_size__(16))); typedef
83 return (__m128)((__v4sf)__a + (__v4sf)__b); in _mm_add_ps()
126 return (__m128)((__v4sf)__a - (__v4sf)__b); in _mm_sub_ps()
168 return (__m128)((__v4sf)__a * (__v4sf)__b); in _mm_mul_ps()
209 return (__m128)((__v4sf)__a / (__v4sf)__b); in _mm_div_ps()
227 __m128 __c = __builtin_ia32_sqrtss((__v4sf)__a); in _mm_sqrt_ss()
245 return __builtin_ia32_sqrtps((__v4sf)__a); in _mm_sqrt_ps()
263 __m128 __c = __builtin_ia32_rcpss((__v4sf)__a); in _mm_rcp_ss()
281 return __builtin_ia32_rcpps((__v4sf)__a); in _mm_rcp_ps()
300 __m128 __c = __builtin_ia32_rsqrtss((__v4sf)__a); in _mm_rsqrt_ss()
[all …]
Dfma4intrin.h39 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ps()
51 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ss()
63 return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_msub_ps()
75 return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_msub_ss()
87 return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_nmacc_ps()
99 return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_nmacc_ss()
111 return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_nmsub_ps()
123 return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_nmsub_ss()
135 return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_maddsub_ps()
147 return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_msubadd_ps()
Dfmaintrin.h37 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ps()
49 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ss()
61 return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmsub_ps()
73 return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmsub_ss()
85 return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fnmadd_ps()
97 return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fnmadd_ss()
109 return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fnmsub_ps()
121 return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fnmsub_ss()
133 return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmaddsub_ps()
145 return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmsubadd_ps()
Davx512vldqintrin.h131 return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, in _mm_mask_andnot_ps()
132 (__v4sf) __B, in _mm_mask_andnot_ps()
133 (__v4sf) __W, in _mm_mask_andnot_ps()
139 return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, in _mm_maskz_andnot_ps()
140 (__v4sf) __B, in _mm_maskz_andnot_ps()
141 (__v4sf) in _mm_maskz_andnot_ps()
199 return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, in _mm_mask_and_ps()
200 (__v4sf) __B, in _mm_mask_and_ps()
201 (__v4sf) __W, in _mm_mask_and_ps()
207 return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, in _mm_maskz_and_ps()
[all …]
Dpmmintrin.h67 return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b); in _mm_addsub_ps()
90 return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b); in _mm_hadd_ps()
113 return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b); in _mm_hsub_ps()
135 return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3); in _mm_movehdup_ps()
157 return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2); in _mm_moveldup_ps()
Davx512erintrin.h129 (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
130 (__v4sf)(__m128)(B), \
131 (__v4sf)_mm_setzero_ps(), \
135 (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
136 (__v4sf)(__m128)(B), \
137 (__v4sf)(__m128)(S), \
141 (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
142 (__v4sf)(__m128)(B), \
143 (__v4sf)_mm_setzero_ps(), \
232 (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
[all …]
Davx512fintrin.h339 return (__m512)__builtin_shufflevector((__v4sf) __A, in _mm512_broadcastss_ps()
340 (__v4sf)_mm_undefined_ps(), in _mm512_broadcastss_ps()
935 return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, in _mm_mask_max_ss()
936 (__v4sf) __B, in _mm_mask_max_ss()
937 (__v4sf) __W, in _mm_mask_max_ss()
944 return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, in _mm_maskz_max_ss()
945 (__v4sf) __B, in _mm_maskz_max_ss()
946 (__v4sf) _mm_setzero_ps (), in _mm_maskz_max_ss()
952 (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
953 (__v4sf)(__m128)(B), \
[all …]
Davx512vlintrin.h1221 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
1222 (__v4sf)(__m128)(b), (int)(p), \
1226 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
1227 (__v4sf)(__m128)(b), (int)(p), \
1387 return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A, in _mm_mask_fmadd_ps()
1388 (__v4sf) __B, in _mm_mask_fmadd_ps()
1389 (__v4sf) __C, in _mm_mask_fmadd_ps()
1396 return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A, in _mm_mask3_fmadd_ps()
1397 (__v4sf) __B, in _mm_mask3_fmadd_ps()
1398 (__v4sf) __C, in _mm_mask3_fmadd_ps()
[all …]
Dsmmintrin.h60 (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); })
63 (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
64 (__v4sf)(__m128)(Y), (M)); })
81 (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
97 return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2, in _mm_blendv_ps()
98 (__v4sf)__M); in _mm_blendv_ps()
135 (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
136 (__v4sf)(__m128)(Y), (M)); })
202 __v4sf __a = (__v4sf)(__m128)(X); \
208 #define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
Davx512dqintrin.h789 (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
790 (__v4sf)(__m128)(B), \
791 (__v4sf)_mm_setzero_ps(), \
798 (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
799 (__v4sf)(__m128)(B), \
800 (__v4sf)(__m128)(W),\
807 (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
808 (__v4sf)(__m128)(B), \
809 (__v4sf)_mm_setzero_ps(), \
909 (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
[all …]
Df16cintrin.h49 __v4sf r = __builtin_ia32_vcvtph2ps(v); in _cvtsh_ss()
76 ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
103 ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
Davxintrin.h880 return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c); in _mm_permutevar_ps()
1102 (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
1103 (__v4sf)_mm_undefined_ps(), \
1707 (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
1708 (__v4sf)(__m128)(b), (c)); })
1849 (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
1850 (__v4sf)(__m128)(b), (c)); })
2114 return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df); in _mm256_cvtps_pd()
2220 return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); in _mm_testz_ps()
2226 return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); in _mm_testc_ps()
[all …]
Davx2intrin.h841 return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); in _mm_broadcastss_ps()
853 return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); in _mm256_broadcastss_ps()
1115 (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
1118 (__v4sf)(__m128)(mask), (s)); })
1127 (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
1130 (__v4sf)(__m128)(mask), (s)); })
1133 (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
1136 (__v4sf)(__m128)(mask), (s)); })
1221 (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
1224 (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
[all …]
Dammintrin.h189 __builtin_ia32_movntss(__p, (__v4sf)__a); in _mm_stream_ss()
Dxopintrin.h736 (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
747 return (__m128)__builtin_ia32_vfrczss((__v4sf)__A); in _mm_frcz_ss()
759 return (__m128)__builtin_ia32_vfrczps((__v4sf)__A); in _mm_frcz_ps()
Demmintrin.h395 __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df); in _mm_cvtps_pd()
1742 return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a); in _mm_cvtps_epi32()
1758 return (__m128i)__builtin_convertvector((__v4sf)__a, __v4si); in _mm_cvttps_epi32()
/external/clang/test/CodeGen/
Dtarget-builtin-error-2.c10 …return __builtin_ia32_vpermilvarps((__v4sf) {0.0f, 1.0f, 2.0f, 3.0f}, (__v4si)a); // expected-erro… in wombat()