/external/clang/test/CodeGen/ |
D | enable_if.c | 23 void *vp2 = (void*)foo; in test1() local 46 void *vp2 = (void*)bar; in test2() local
|
D | overloadable.c | 53 void *vp2 = (void (*)(void *)) & addrof_many; in foo() local
|
/external/clang/test/SemaCXX/ |
D | address-space-conversion.cpp | 50 void test_static_cast(void_ptr vp, void_ptr_1 vp1, void_ptr_2 vp2, in test_static_cast() 131 void test_reinterpret_cast(void_ptr vp, void_ptr_1 vp1, void_ptr_2 vp2, in test_reinterpret_cast() 157 void test_cstyle_cast(void_ptr vp, void_ptr_1 vp1, void_ptr_2 vp2, in test_cstyle_cast() 181 void test_implicit_conversion(void_ptr vp, void_ptr_1 vp1, void_ptr_2 vp2, in test_implicit_conversion()
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x48.c | 71 __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x48() local
|
D | avx2-p5-x24.c | 86 __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x24() local
|
D | avx512f-p5-scalef-x64.c | 76 __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x32.c | 93 __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x32() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x48.c | 67 __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48() local
|
D | avx512f-p5-scalef-x64.c | 71 __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x24.c | 73 __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x24() local
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-scalar-rr2-p6-x3.c | 86 float vp2 = vc6 * vt2 + vc5; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() local
|
D | velu-wasm-rr2-p6-x3.c | 74 float vp2 = vc6 * vt2 + vc5; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3() local
|
D | velu-wasm-rr2-p6-x4.c | 81 float vp2 = vc6 * vt2 + vc5; in xnn_f32_velu_ukernel__wasm_rr2_p6_x4() local
|
D | velu-avx512f-rr1-p6-x48.c | 68 __m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48() local
|
D | velu-scalar-rr2-lut16-p3-x3.c | 91 float vp2 = vc3 * vt2 + vc2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() local
|
D | velu-wasm-rr2-lut16-p3-x3.c | 79 float vp2 = vc3 * vt2 + vc2; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3() local
|
D | velu-avx2-rr1-p6-x24.c | 66 __m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5); in xnn_f32_velu_ukernel__avx2_rr1_p6_x24() local
|
D | velu-scalar-rr2-p6-x4.c | 97 float vp2 = vc6 * vt2 + vc5; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() local
|
/external/XNNPACK/src/f32-vsigmoid/gen/ |
D | vsigmoid-avx512f-rr1-p5-scalef-div-x48.c | 61 __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x48() local
|
D | vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x48.c | 64 __m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x48() local
|
D | vsigmoid-avx512f-rr1-p5-scalef-div-x64.c | 66 __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x64() local
|
D | vsigmoid-avx2-rr1-p5-div-x24.c | 66 __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4); in xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x24() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-rr2-p5-x4.c | 93 float vp2 = vc5 * vt2 + vc4; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4() local
|
D | scalar-rr2-p5-x4-acc2.c | 94 float vp2 = vc5 * vt2 + vc4; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc2() local
|
D | scalar-rr2-p5-x4-acc4.c | 96 float vp2 = vc5 * vt2 + vc4; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc4() local
|