Home
last modified time | relevance | path

Searched refs:__m256d (Results 1 – 25 of 27) sorted by relevance

12

/external/clang/test/CodeGen/
Davx-builtins.c11 __m256d test_mm256_add_pd(__m256d A, __m256d B) { in test_mm256_add_pd()
23 __m256d test_mm256_addsub_pd(__m256d A, __m256d B) { in test_mm256_addsub_pd()
35 __m256d test_mm256_and_pd(__m256d A, __m256d B) { in test_mm256_and_pd()
47 __m256d test_mm256_andnot_pd(__m256d A, __m256d B) { in test_mm256_andnot_pd()
61 __m256d test_mm256_blend_pd(__m256d A, __m256d B) { in test_mm256_blend_pd()
73 __m256d test_mm256_blendv_pd(__m256d V1, __m256d V2, __m256d V3) { in test_mm256_blendv_pd()
85 __m256d test_mm256_broadcast_pd(__m128d* A) { in test_mm256_broadcast_pd()
97 __m256d test_mm256_broadcast_sd(double* A) { in test_mm256_broadcast_sd()
117 __m256d test_mm256_broadcast_ss(float* A) { in test_mm256_broadcast_ss()
131 __m256 test_mm256_castpd_ps(__m256d A) { in test_mm256_castpd_ps()
[all …]
Davx512vldq-builtins.c44 __m256d test_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { in test_mm256_mask_andnot_pd()
47 return (__m256d) _mm256_mask_andnot_pd ( __W, __U, __A, __B); in test_mm256_mask_andnot_pd()
50 __m256d test_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) { in test_mm256_maskz_andnot_pd()
53 return (__m256d) _mm256_maskz_andnot_pd (__U, __A, __B); in test_mm256_maskz_andnot_pd()
92 __m256d test_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { in test_mm256_mask_and_pd()
95 return (__m256d) _mm256_mask_and_pd ( __W, __U, __A, __B); in test_mm256_mask_and_pd()
98 __m256d test_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) { in test_mm256_maskz_and_pd()
101 return (__m256d) _mm256_maskz_and_pd (__U, __A, __B); in test_mm256_maskz_and_pd()
140 __m256d test_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { in test_mm256_mask_xor_pd()
143 return (__m256d) _mm256_mask_xor_pd ( __W, __U, __A, __B); in test_mm256_mask_xor_pd()
[all …]
Dfma4-builtins.c134 __m256d test_mm256_macc_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_macc_pd()
146 __m256d test_mm256_msub_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_msub_pd()
158 __m256d test_mm256_nmacc_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_nmacc_pd()
170 __m256d test_mm256_nmsub_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_nmsub_pd()
182 __m256d test_mm256_maddsub_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_maddsub_pd()
194 __m256d test_mm256_msubadd_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_msubadd_pd()
Dfma-builtins.c113 __m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_fmadd_pd()
123 __m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_fmsub_pd()
133 __m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_fnmadd_pd()
143 __m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_fnmsub_pd()
153 __m256d test_mm256_fmaddsub_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_fmaddsub_pd()
163 __m256d test_mm256_fmsubadd_pd(__m256d a, __m256d b, __m256d c) { in test_mm256_fmsubadd_pd()
Davx-shuffle-builtins.c25 __m256d test_mm256_permute_pd(__m256d a) { in test_mm256_permute_pd()
50 __m256d test_mm256_permute2f128_pd(__m256d a, __m256d b) { in test_mm256_permute2f128_pd()
78 __m256d
110 __m256d test_mm256_insertf128_pd_0(__m256d a, __m128d b) { in test_mm256_insertf128_pd_0()
128 __m256d test_mm256_insertf128_pd_1(__m256d a, __m128d b) { in test_mm256_insertf128_pd_1()
148 __m128d test_mm256_extractf128_pd_0(__m256d a) { in test_mm256_extractf128_pd_0()
166 __m128d test_mm256_extractf128_pd_1(__m256d a) { in test_mm256_extractf128_pd_1()
184 __m256d test_mm256_set_m128d(__m128d hi, __m128d lo) { in test_mm256_set_m128d()
202 __m256d test_mm256_setr_m128d(__m128d hi, __m128d lo) { in test_mm256_setr_m128d()
Davx512vl-builtins.c1048 __mmask8 test_mm256_cmp_pd_mask(__m256d __A, __m256d __B) { in test_mm256_cmp_pd_mask()
1054 __mmask8 test_mm256_mask_cmp_pd_mask(__mmask8 m, __m256d __A, __m256d __B) { in test_mm256_mask_cmp_pd_mask()
1120 __m256d test_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { in test_mm256_mask_fmadd_pd()
1126 __m256d test_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) { in test_mm256_mask_fmsub_pd()
1132 __m256d test_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { in test_mm256_mask3_fmadd_pd()
1138 __m256d test_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) { in test_mm256_mask3_fnmadd_pd()
1144 __m256d test_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { in test_mm256_maskz_fmadd_pd()
1150 __m256d test_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { in test_mm256_maskz_fmsub_pd()
1156 __m256d test_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { in test_mm256_maskz_fnmadd_pd()
1162 __m256d test_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) { in test_mm256_maskz_fnmsub_pd()
[all …]
Davx-cmp-builtins.c25 __m256d test_cmp_pd256(__m256d a, __m256d b) { in test_cmp_pd256()
31 __m256d test_cmp_ps256(__m256 a, __m256 b) { in test_cmp_ps256()
Dxop-builtins.c341 __m256d test_mm256_permute2_pd(__m256d a, __m256d b, __m256i c) { in test_mm256_permute2_pd()
389 __m256d test_mm256_frcz_pd(__m256d a) { in test_mm256_frcz_pd()
Davx2-builtins.c192 __m256d test_mm256_broadcastsd_pd(__m128d a) { in test_mm256_broadcastsd_pd()
489 __m256d test_mm256_i32gather_pd(double const *b, __m128i c) { in test_mm256_i32gather_pd()
498 __m256d test_mm256_mask_i32gather_pd(__m256d a, double const *b, __m128i c, __m256d d) { in test_mm256_mask_i32gather_pd()
597 __m256d test_mm256_i64gather_pd(double const *b, __m256i c) { in test_mm256_i64gather_pd()
606 __m256d test_mm256_mask_i64gather_pd(__m256d a, double const *b, __m256i c, __m256d d) { in test_mm256_mask_i64gather_pd()
904 __m256d test_mm256_permute4x64_pd(__m256d a) { in test_mm256_permute4x64_pd()
Davx512f-builtins.c1047 __m256d test_mm512_extractf64x4_pd(__m512d a) in test_mm512_extractf64x4_pd()
1054 __m256d test_mm512_mask_extractf64x4_pd(__m256d __W,__mmask8 __U,__m512d __A){ in test_mm512_mask_extractf64x4_pd()
1060 __m256d test_mm512_maskz_extractf64x4_pd(__mmask8 __U,__m512d __A){ in test_mm512_maskz_extractf64x4_pd()
4351 __m512d test_mm512_broadcast_f64x4(__m256d __A) { in test_mm512_broadcast_f64x4()
4357 __m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A) { in test_mm512_mask_broadcast_f64x4()
4363 __m512d test_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A) { in test_mm512_maskz_broadcast_f64x4()
4881 __m512d test_mm512_insertf64x4(__m512d __A, __m256d __B) { in test_mm512_insertf64x4()
4887 __m512d test_mm512_mask_insertf64x4(__m512d __W, __mmask8 __U, __m512d __A, __m256d __B) { in test_mm512_mask_insertf64x4()
4893 __m512d test_mm512_maskz_insertf64x4(__mmask8 __U, __m512d __A, __m256d __B) { in test_mm512_maskz_insertf64x4()
6409 __m512d test_mm512_castpd256_pd512(__m256d a) in test_mm512_castpd256_pd512()
[all …]
/external/clang/lib/Headers/
Davxintrin.h49 typedef double __m256d __attribute__((__vector_size__(32))); typedef
68 static __inline __m256d __DEFAULT_FN_ATTRS
69 _mm256_add_pd(__m256d __a, __m256d __b) in _mm256_add_pd()
71 return (__m256d)((__v4df)__a+(__v4df)__b); in _mm256_add_pd()
104 static __inline __m256d __DEFAULT_FN_ATTRS
105 _mm256_sub_pd(__m256d __a, __m256d __b) in _mm256_sub_pd()
107 return (__m256d)((__v4df)__a-(__v4df)__b); in _mm256_sub_pd()
141 static __inline __m256d __DEFAULT_FN_ATTRS
142 _mm256_addsub_pd(__m256d __a, __m256d __b) in _mm256_addsub_pd()
144 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); in _mm256_addsub_pd()
[all …]
Dfmaintrin.h160 static __inline__ __m256d __DEFAULT_FN_ATTRS
161 _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) in _mm256_fmadd_pd()
163 return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); in _mm256_fmadd_pd()
172 static __inline__ __m256d __DEFAULT_FN_ATTRS
173 _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) in _mm256_fmsub_pd()
175 return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); in _mm256_fmsub_pd()
184 static __inline__ __m256d __DEFAULT_FN_ATTRS
185 _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) in _mm256_fnmadd_pd()
187 return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); in _mm256_fnmadd_pd()
196 static __inline__ __m256d __DEFAULT_FN_ATTRS
[all …]
Dfma4intrin.h162 static __inline__ __m256d __DEFAULT_FN_ATTRS
163 _mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) in _mm256_macc_pd()
165 return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); in _mm256_macc_pd()
174 static __inline__ __m256d __DEFAULT_FN_ATTRS
175 _mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) in _mm256_msub_pd()
177 return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); in _mm256_msub_pd()
186 static __inline__ __m256d __DEFAULT_FN_ATTRS
187 _mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) in _mm256_nmacc_pd()
189 return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); in _mm256_nmacc_pd()
198 static __inline__ __m256d __DEFAULT_FN_ATTRS
[all …]
Davx512vldqintrin.h78 static __inline__ __m256d __DEFAULT_FN_ATTRS
79 _mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { in _mm256_mask_andnot_pd()
80 return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, in _mm256_mask_andnot_pd()
86 static __inline__ __m256d __DEFAULT_FN_ATTRS
87 _mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) { in _mm256_maskz_andnot_pd()
88 return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, in _mm256_maskz_andnot_pd()
146 static __inline__ __m256d __DEFAULT_FN_ATTRS
147 _mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { in _mm256_mask_and_pd()
148 return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A, in _mm256_mask_and_pd()
154 static __inline__ __m256d __DEFAULT_FN_ATTRS
[all …]
Davx512vlintrin.h1211 (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
1212 (__v4df)(__m256d)(b), (int)(p), \
1216 (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
1217 (__v4df)(__m256d)(b), (int)(p), \
1312 static __inline__ __m256d __DEFAULT_FN_ATTRS
1313 _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) in _mm256_mask_fmadd_pd()
1315 return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A, in _mm256_mask_fmadd_pd()
1321 static __inline__ __m256d __DEFAULT_FN_ATTRS
1322 _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) in _mm256_mask3_fmadd_pd()
1324 return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A, in _mm256_mask3_fmadd_pd()
[all …]
Davx2intrin.h856 static __inline__ __m256d __DEFAULT_FN_ATTRS
859 return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0); in _mm256_broadcastsd_pd()
944 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \
1097 (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
1100 (__v4df)(__m256d)(mask), (s)); })
1109 (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
1112 (__v4df)(__m256d)(mask), (s)); })
1195 (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
1212 (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
Dxopintrin.h731 (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
732 (__v4df)(__m256d)(Y), \
774 static __inline__ __m256d __DEFAULT_FN_ATTRS
775 _mm256_frcz_pd(__m256d __A) in _mm256_frcz_pd()
777 return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A); in _mm256_frcz_pd()
/external/clang/test/CodeGenCXX/
Dx86_64-arguments-avx.cpp4 typedef double __m256d __attribute__((__vector_size__(32))); typedef
8 __m256d data;
20 typedef double __m256d __attribute__((__vector_size__(32))); typedef
40 __m256d v2;
45 __m256d v3;
Dmangle-ms-vector-types.cpp22 void foo256d(__m256d) {} in foo256d() argument
/external/mesa3d/src/gallium/drivers/swr/rasterizer/core/
Drasterizer.cpp48 const __m256d gMaskToVecpd[] =
81 __m256d vQuadOffsets; // offsets for 4 samples of a quad
82 __m256d vRasterTileOffsets; // offsets for the 4 corners of a raster tile
96 __m256d vEdges[NumEdges]; in rasterizePartialTile()
97 __m256d vStepX[NumEdges]; in rasterizePartialTile()
98 __m256d vStepY[NumEdges]; in rasterizePartialTile()
218 __m256d vStartOfRowEdge[NumEdges]; in rasterizePartialTile()
263 INLINE void adjustTopLeftRuleIntFix16(const __m128i vA, const __m128i vB, __m256d &vEdge) in adjustTopLeftRuleIntFix16()
268 __m256d vEdgeOut = vEdge; in adjustTopLeftRuleIntFix16()
269 __m256d vEdgeAdjust = _mm256_sub_pd(vEdge, _mm256_set1_pd(1.0)); in adjustTopLeftRuleIntFix16()
[all …]
/external/clang/test/Headers/
Dx86intrin-2.c43 __m256d __attribute__((__target__("avx"))) mm256_add_pd_wrap(__m256d a, __m256d b) { in mm256_add_pd_wrap()
63 __m256d __attribute__((__target__("fma"))) mm256_fmsubadd_pd_wrap(__m256d a, __m256d b, __m256d c) { in mm256_fmsubadd_pd_wrap()
/external/eigen/Eigen/src/Core/arch/AVX/
DComplex.h243 EIGEN_STRONG_INLINE explicit Packet2cd(const __m256d& a) : v(a) {}
244 __m256d v;
277 …const __m256d mask = _mm256_castsi256_pd(_mm256_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x…
283 __m256d tmp1 = _mm256_shuffle_pd(a.v,a.v,0x0);
284 __m256d even = _mm256_mul_pd(tmp1, b.v);
285 __m256d tmp2 = _mm256_shuffle_pd(a.v,a.v,0xF);
286 __m256d tmp3 = _mm256_shuffle_pd(b.v,b.v,0x5);
287 __m256d odd = _mm256_mul_pd(tmp2, tmp3);
336 __m256d result = _mm256_permute2f128_pd(a.v, a.v, 1);
424 __m256d tmp = _mm256_mul_pd(b.v, b.v);
[all …]
DPacketMath.h33 typedef __m256d Packet4d;
37 template<> struct is_arithmetic<__m256d> { enum { value = true }; };
334 __m256d tmp = _mm256_shuffle_pd(a,a,5);
337 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
518 __m256d tmp = _mm256_permute_pd(first, 5);
530 __m256d tmp = _mm256_permute_pd(first, 5);
585 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
586 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
587 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
588 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
[all …]
/external/mesa3d/src/gallium/drivers/swr/rasterizer/common/
Dsimd16intrin.h39 __m256d lo;
40 __m256d hi;
/external/eigen/Eigen/src/Core/arch/AVX512/
DPacketMath.h846 __m256d final_0 = _mm256_blend_pd(tmp0, tmp1, 0xC);
862 __m256d final_1 = _mm256_blend_pd(tmp0, tmp1, 0xC);

12