Lines Matching refs:__v4df
31 typedef double __v4df __attribute__ ((__vector_size__ (32))); typedef
71 return (__m256d)((__v4df)__a+(__v4df)__b); in _mm256_add_pd()
107 return (__m256d)((__v4df)__a-(__v4df)__b); in _mm256_sub_pd()
144 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); in _mm256_addsub_pd()
181 return (__m256d)((__v4df)__a/(__v4df)__b); in _mm256_div_pd()
218 return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b); in _mm256_max_pd()
256 return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b); in _mm256_min_pd()
293 return (__m256d)((__v4df)__a * (__v4df)__b); in _mm256_mul_pd()
328 return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a); in _mm256_sqrt_pd()
412 (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
687 return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b); in _mm256_hadd_pd()
733 return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b); in _mm256_hsub_pd()
825 return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c); in _mm256_permutevar_pd()
1042 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
1043 (__v4df)_mm256_undefined_pd(), \
1245 (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
1246 (__v4df)(__m256d)(V2), (M)); })
1355 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
1356 (__v4df)(__m256d)(V2), \
1423 (__v4df)__a, (__v4df)__b, (__v4df)__c); in _mm256_blendv_pd()
1597 (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
1598 (__v4df)(__m256d)(b), \
1743 (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
1744 (__v4df)(__m256d)(b), (c)); })
2062 return (__m256d)__builtin_convertvector((__v4si)__a, __v4df); in _mm256_cvtepi32_pd()
2093 return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a); in _mm256_cvtpd_ps()
2114 return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df); in _mm256_cvtps_pd()
2120 return (__m128i)__builtin_convertvector((__v4df) __a, __v4si); in _mm256_cvttpd_epi32()
2126 return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); in _mm256_cvtpd_epi32()
2170 return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2); in _mm256_movedup_pd()
2177 return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2); in _mm256_unpackhi_pd()
2183 return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2); in _mm256_unpacklo_pd()
2238 return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b); in _mm256_testz_pd()
2244 return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b); in _mm256_testc_pd()
2250 return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b); in _mm256_testnzc_pd()
2293 return __builtin_ia32_movmskpd256((__v4df)__a); in _mm256_movemask_pd()
2327 return (__m256d)(__v4df){ __d, __d, __d, __d }; in _mm256_broadcast_sd()
2457 return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p, in _mm256_maskload_pd()
2489 __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a); in _mm256_maskstore_pd()
2508 __builtin_nontemporal_store((__v4df)__b, (__v4df*)__a); in _mm256_stream_pd()
2743 return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1); in _mm256_castpd256_pd128()
2796 (__v4df)(__m256d)(V1), \
2797 (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
2828 (__v4df)(__m256d)(V), \
2829 (__v4df)(_mm256_undefined_pd()), \