/external/llvm-project/clang/test/CodeGen/X86/ |
D | avx512vl-builtins-constrained.c | 54 return _mm_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm_mask_cvtps_ph() 60 return _mm_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm_maskz_cvtps_ph() 66 return _mm256_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm256_mask_cvtps_ph() 72 return _mm256_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm256_maskz_cvtps_ph() 78 return _mm_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO); in test_mm_mask_cvt_roundps_ph() 84 return _mm_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO); in test_mm_maskz_cvt_roundps_ph() 90 return _mm256_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO); in test_mm256_mask_cvt_roundps_ph() 96 return _mm256_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO); in test_mm256_maskz_cvt_roundps_ph()
|
D | avx512f-builtins.c | 37 return _mm512_mask_sqrt_round_pd(__W,__U,__A,_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_mask_sqrt_round_pd() 46 return _mm512_maskz_sqrt_round_pd(__U,__A,_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_maskz_sqrt_round_pd() 53 return _mm512_sqrt_round_pd(__A,_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_sqrt_round_pd() 87 return _mm512_mask_sqrt_round_ps(__W,__U,__A,_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_mask_sqrt_round_ps() 96 return _mm512_maskz_sqrt_round_ps(__U,__A,_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_maskz_sqrt_round_ps() 103 return _mm512_sqrt_round_ps(__A,_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_sqrt_round_ps() 500 return _mm512_fmadd_round_pd(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_fmadd_round_pd() 508 return _mm512_mask_fmadd_round_pd(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_mask_fmadd_round_pd() 515 return _mm512_mask3_fmadd_round_pd(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_mask3_fmadd_round_pd() 522 return _mm512_maskz_fmadd_round_pd(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_maskz_fmadd_round_pd() [all …]
|
D | avx512f-builtins-constrained.c | 77 return _mm512_cvt_roundps_ph(__A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_cvt_roundps_ph() 84 return _mm512_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_mask_cvt_roundps_ph() 91 return _mm512_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm512_maskz_cvt_roundps_ph()
|
D | avx512vl-builtins.c | 9672 return _mm_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm_mask_cvtps_ph() 9678 return _mm_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm_maskz_cvtps_ph() 9684 return _mm256_mask_cvtps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm256_mask_cvtps_ph() 9690 return _mm256_maskz_cvtps_ph(__U, __A, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in test_mm256_maskz_cvtps_ph() 9696 return _mm_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO); in test_mm_mask_cvt_roundps_ph() 9702 return _mm_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO); in test_mm_maskz_cvt_roundps_ph() 9708 return _mm256_mask_cvt_roundps_ph(__W, __U, __A, _MM_FROUND_TO_ZERO); in test_mm256_mask_cvt_roundps_ph() 9714 return _mm256_maskz_cvt_roundps_ph(__U, __A, _MM_FROUND_TO_ZERO); in test_mm256_maskz_cvt_roundps_ph()
|
/external/XNNPACK/src/f32-vrnd/gen/ |
D | vrndz-avx512f-x32.c | 34 const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_ZERO); in xnn_f32_vrndz_ukernel__avx512f_x32() 35 const __m512 vyGHIJKLMNOPQRSTUV = _mm512_roundscale_ps(vxGHIJKLMNOPQRSTUV, _MM_FROUND_TO_ZERO); in xnn_f32_vrndz_ukernel__avx512f_x32() 45 const __m512 vy = _mm512_roundscale_ps(vx, _MM_FROUND_TO_ZERO); in xnn_f32_vrndz_ukernel__avx512f_x32() 58 const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_ZERO); in xnn_f32_vrndz_ukernel__avx512f_x32()
|
D | vrndz-sse41-x8.c | 33 const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__sse41_x8() 34 const __m128 vy4567 = _mm_round_ps(vx4567, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__sse41_x8() 44 const __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__sse41_x8() 51 __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__sse41_x8()
|
D | vrndz-avx-x16.c | 35 const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__avx_x16() 36 const __m256 vy89ABCDEF = _mm256_round_ps(vx89ABCDEF, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__avx_x16() 46 const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__avx_x16() 57 const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__avx_x16()
|
D | vrndz-sse41-x4.c | 32 const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__sse41_x4() 39 __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__sse41_x4()
|
D | vrndz-avx512f-x16.c | 33 const __m512 vy0123456789ABCDEF = _mm512_roundscale_ps(vx0123456789ABCDEF, _MM_FROUND_TO_ZERO); in xnn_f32_vrndz_ukernel__avx512f_x16() 46 const __m512 vy = _mm512_maskz_roundscale_ps(vmask, vx, _MM_FROUND_TO_ZERO); in xnn_f32_vrndz_ukernel__avx512f_x16()
|
D | vrndz-avx-x8.c | 34 const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__avx_x8() 45 const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_f32_vrndz_ukernel__avx_x8()
|
/external/XNNPACK/src/math/ |
D | roundz-sse41.c | 25 const __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); in xnn_math_f32_roundz__sse41()
|
/external/XNNPACK/src/f32-vrnd/ |
D | sse41.c.in | 21 $ "RNDZ": "_MM_FROUND_TO_ZERO",
|
D | avx512f.c.in | 22 $ "RNDZ": "_MM_FROUND_TO_ZERO",
|
D | avx.c.in | 23 $ "RNDZ": "_MM_FROUND_TO_ZERO",
|
/external/llvm-project/clang/lib/Headers/ |
D | smmintrin.h | 22 #define _MM_FROUND_TO_ZERO 0x03 macro 31 #define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
|
D | avx512fintrin.h | 44 #define _MM_FROUND_TO_ZERO 0x03 macro
|
/external/clang/lib/Headers/ |
D | smmintrin.h | 36 #define _MM_FROUND_TO_ZERO 0x03 macro 45 #define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
|
D | avx512fintrin.h | 54 #define _MM_FROUND_TO_ZERO 0x03 macro
|
/external/mesa3d/src/gallium/drivers/swr/rasterizer/core/ |
D | depthstencil.h | 107 result = _simd_round_ps(result, _MM_FROUND_TO_ZERO); in QuantizeDepth()
|
/external/mesa3d/src/gallium/drivers/swr/rasterizer/jitter/ |
D | blend_jit.cpp | 238 src[swizComp] = VROUND(src[swizComp], C(_MM_FROUND_TO_ZERO)); in Quantize()
|
/external/pffft/ |
D | sse2neon.h | 135 #define _MM_FROUND_TO_ZERO 0x03 macro 4846 case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC): in _mm_round_ps() 4864 case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC): in _mm_round_ps()
|