Lines Matching refs:__v8sf
32 typedef float __v8sf __attribute__ ((__vector_size__ (32))); typedef
89 return (__m256)((__v8sf)__a+(__v8sf)__b); in _mm256_add_ps()
125 return (__m256)((__v8sf)__a-(__v8sf)__b); in _mm256_sub_ps()
163 return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b); in _mm256_addsub_ps()
199 return (__m256)((__v8sf)__a/(__v8sf)__b); in _mm256_div_ps()
237 return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b); in _mm256_max_ps()
275 return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b); in _mm256_min_ps()
311 return (__m256)((__v8sf)__a * (__v8sf)__b); in _mm256_mul_ps()
345 return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a); in _mm256_sqrt_ps()
362 return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a); in _mm256_rsqrt_ps()
379 return (__m256)__builtin_ia32_rcpps256((__v8sf)__a); in _mm256_rcp_ps()
444 (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
710 return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b); in _mm256_hadd_ps()
756 return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b); in _mm256_hsub_ps()
971 return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c); in _mm256_permutevar_ps()
1196 (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
1197 (__v8sf)_mm256_undefined_ps(), \
1286 (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
1287 (__v8sf)(__m256)(V2), (M)); })
1387 (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
1388 (__v8sf)(__m256)(V2), \
1451 (__v8sf)__a, (__v8sf)__b, (__v8sf)__c); in _mm256_blendv_ps()
1492 (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
1493 (__v8sf)(__m256)(V2), (M)); })
1544 (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
1545 (__v8sf)(__m256)(b), \
1779 (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
1780 (__v8sf)(__m256)(b), (c)); })
2108 return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a); in _mm256_cvtps_epi32()
2132 return (__m256i)__builtin_convertvector((__v8sf) __a, __v8si); in _mm256_cvttps_epi32()
2158 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7); in _mm256_movehdup_ps()
2164 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6); in _mm256_moveldup_ps()
2189 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); in _mm256_unpackhi_ps()
2195 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); in _mm256_unpacklo_ps()
2256 return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b); in _mm256_testz_ps()
2262 return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b); in _mm256_testc_ps()
2268 return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b); in _mm256_testnzc_ps()
2299 return __builtin_ia32_movmskps256((__v8sf)__a); in _mm256_movemask_ps()
2334 return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f }; in _mm256_broadcast_ss()
2470 return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m); in _mm256_maskload_ps()
2477 __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a); in _mm256_maskstore_ps()
2514 __builtin_nontemporal_store((__v8sf)__a, (__v8sf*)__p); in _mm256_stream_ps()
2749 return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3); in _mm256_castps256_ps128()
2783 (__v8sf)(__m256)(V1), \
2784 (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
2819 (__v8sf)(__m256)(V), \
2820 (__v8sf)(_mm256_undefined_ps()), \