Home
last modified time | relevance | path

Searched refs:vd3 (Results 1 – 25 of 77) sorted by relevance

1234

/external/eigen/unsupported/test/
Dopenglsupport.cpp188 Vector3d vd3; vd3.setRandom(); in test_openglsupport() local
189 VERIFY_MATRIX(glTranslate(vd3), Projective3d(Translation3d(vd3)).matrix()); in test_openglsupport()
207 Vector3d vd3; vd3.setRandom(); in test_openglsupport() local
208 VERIFY_MATRIX(glScale(vd3), Projective3d(Scaling(vd3)).matrix()); in test_openglsupport()
/external/XNNPACK/src/f32-ibilinear/gen/
Dscalar-c4.c83 const float vd3 = vb3 - vt3; in xnn_f32_ibilinear_ukernel__scalar_c4() local
88 const float vo3 = vt3 + vd3 * valphav; in xnn_f32_ibilinear_ukernel__scalar_c4()
/external/deqp-deps/glslang/Test/baseResults/
Dhlsl.intrinsics.double.frag.out5 0:5 Function Definition: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
62 0:5 Function Call: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
89 0:5 Function Definition: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
146 0:5 Function Call: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
178 Name 26 "@PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1;"
312 …89: 16(float) FunctionCall 26(@PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1;) 73(param) 75(par…
316 26(@PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1;): 16(float) Function None 17
/external/angle/third_party/vulkan-deps/glslang/src/Test/baseResults/
Dhlsl.intrinsics.double.frag.out5 0:5 Function Definition: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
62 0:5 Function Call: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
89 0:5 Function Definition: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
146 0:5 Function Call: @PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1; ( temp float)
178 Name 26 "@PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1;"
312 …89: 16(float) FunctionCall 26(@PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1;) 73(param) 75(par…
316 26(@PixelShaderFunction(d1;d1;d1;vd2;vd3;vd4;u1;u1;): 16(float) Function None 17
/external/XNNPACK/src/f32-ibilinear-chw/gen/
Dscalar-p4.c93 const float vd3 = vb3 - vt3; in xnn_f32_ibilinear_chw_ukernel__scalar_p4() local
98 const float vo3 = vt3 + vd3 * valphav3; in xnn_f32_ibilinear_chw_ukernel__scalar_p4()
/external/XNNPACK/src/f32-sigmoid/gen/
Davx2-rr1-p5-nr2fma-x32.c105 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32() local
110 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32()
115 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32()
120 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32()
Davx512f-rr1-p5-scalef-nr1fma-x64.c97 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x64() local
102 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x64()
107 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x64()
Davx2-rr1-p5-nr2fma-x40.c117 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40() local
123 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40()
129 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40()
135 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40()
Dscalar-lut2048-p1-div-x4.c98 const float vd3 = vy3 + vone; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x4() local
103 float vf3 = vy3 / vd3; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x4()
Dscalar-lut64-p2-div-x4.c103 const float vd3 = vy3 + vone; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x4() local
108 float vf3 = vy3 / vd3; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x4()
Dscalar-p5-div-x4.c109 const float vd3 = ve3 + vone; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
114 float vf3 = ve3 / vd3; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4()
Davx-rr2-p5-nr2-x32.c120 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x32() local
125 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x32()
133 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x32()
134 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x32()
Davx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x64.c106 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x64() local
111 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x64()
116 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x64()
Davx512f-rr1-lut16-p3-perm-scalef-nr1fma-x64.c100 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x64() local
105 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x64()
110 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x64()
Davx2-rr1-p5-nr2fma-x48.c129 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48() local
136 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48()
143 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48()
150 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48()
Davx512f-rr1-lut16-p3-perm-scalef-nr1fma-x80.c111 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x80() local
117 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x80()
123 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x80()
Davx512f-rr1-p5-scalef-nr1fma-x80.c108 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x80() local
114 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x80()
120 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x80()
Davx2-rr1-p5-nr1fma-x32.c105 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32() local
110 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32()
115 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32()
Davx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x80.c117 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x80() local
123 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x80()
129 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x80()
Davx-rr2-p5-nr2-x40.c135 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x40() local
141 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x40()
150 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x40()
151 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x40()
Davx512f-rr1-p5-scalef-nr1fma-x96.c119 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x96() local
126 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x96()
133 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x96()
Davx2-rr1-p5-nr2fma-x56.c141 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56() local
149 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56()
157 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56()
165 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56()
Davx2-rr1-p5-nr1fma-x40.c117 const __m256 vd3 = _mm256_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40() local
123 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40()
129 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40()
Davx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x96.c128 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x96() local
135 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x96()
142 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x96()
Davx512f-rr1-lut16-p3-perm-scalef-nr1fma-x96.c122 const __m512 vd3 = _mm512_add_ps(ve3, vone); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x96() local
129 __m512 vr3 = _mm512_rcp14_ps(vd3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x96()
136 vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x96()

1234