/external/clang/test/CodeGenCXX/ |
D | member-function-pointer-calls.cpp | 4 virtual int vf1() { return 1; } in vf1() function
|
/external/clang/test/CodeGen/ |
D | Nontemporal.cpp | 14 float __attribute__((vector_size(16))) vf1, vf2; variable
|
D | builtins-ppc-altivec.c | 8973 vector float vf1 = (vector float)(1.0); in test7() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-p5-x2.c | 98 float vf1 = vt1 * vp1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() local
|
D | scalar-p5-x2-acc2.c | 99 float vf1 = vt1 * vp1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() local
|
D | scalar-lut64-p2-x2-acc2.c | 109 float vf1 = vp1 * vs1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() local
|
D | scalar-lut64-p2-x2.c | 108 float vf1 = vp1 * vs1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() local
|
D | scalar-p5-x4.c | 122 float vf1 = vt1 * vp1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4() local
|
D | scalar-p5-x4-acc2.c | 123 float vf1 = vt1 * vp1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc2() local
|
D | scalar-p5-x4-acc4.c | 125 float vf1 = vt1 * vp1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc4() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | scalar-p5-div-x2.c | 110 float vf1 = ve1 / (ve1 + vone); in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2() local
|
D | scalar-lut2048-p1-div-x2.c | 115 float vf1 = vy1 / (vy1 + vone); in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x2() local
|
D | scalar-lut64-p2-div-x2.c | 119 float vf1 = vy1 / (vy1 + vone); in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2() local
|
D | avx2-rr1-p5-div-x16.c | 109 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16() local
|
D | scalar-p5-div-x4.c | 136 float vf1 = ve1 / (ve1 + vone); in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x32.c | 83 __m512 vf1 = _mm512_scalef_ps(vp1, vn1); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32() local
|
D | avx512f-p5-scalef-x48.c | 93 __m512 vf1 = _mm512_scalef_ps(vp1, vn1); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x48() local
|
D | avx2-p5-x16.c | 97 __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() local
|
D | avx2-p5-x24.c | 109 __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x24() local
|
D | avx512f-p5-scalef-x64.c | 103 __m512 vf1 = _mm512_scalef_ps(vp1, vn1); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x32.c | 83 __m512 vf1 = _mm512_mul_ps(vp1, vscalev); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32() local
|
D | avx512f-p5-scalef-x48.c | 92 __m512 vf1 = _mm512_mul_ps(vp1, vscalev); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48() local
|
D | avx2-p5-x16.c | 89 __m256 vf1 = _mm256_mul_ps(vp1, vscalev); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x16() local
|
D | avx512f-p5-scalef-x64.c | 101 __m512 vf1 = _mm512_mul_ps(vp1, vscalev); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x24.c | 98 __m256 vf1 = _mm256_mul_ps(vp1, vscalev); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x24() local
|