/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x80.c | 77 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80() local
|
D | avx512f-p5-scalef-x96.c | 81 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96() local
|
D | avx2-p5-x40.c | 83 __m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x40() local
|
D | avx512f-p5-scalef-x112.c | 85 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x112() local
|
D | avx512f-p5-scalef-x128.c | 89 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x128() local
|
D | avx2-p5-x48.c | 87 __m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x48() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x80.c | 83 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x80() local
|
D | avx512f-p5-scalef-x96.c | 88 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x96() local
|
D | avx2-p5-x40.c | 102 __m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x40() local
|
D | avx512f-p5-scalef-x112.c | 93 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x112() local
|
D | avx2-p5-x48.c | 109 __m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x48() local
|
D | avx512f-p5-scalef-x128.c | 98 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x128() local
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-wasm-rr2-p6-x5.c | 90 float vp4 = vc6 * vt4 + vc5; in xnn_f32_velu_ukernel__wasm_rr2_p6_x5() local
|
D | velu-scalar-rr2-p6-x5.c | 110 float vp4 = vc6 * vt4 + vc5; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local
|
D | velu-wasm-rr2-p6-x6.c | 97 float vp4 = vc6 * vt4 + vc5; in xnn_f32_velu_ukernel__wasm_rr2_p6_x6() local
|
D | velu-scalar-rr2-p6-x6.c | 121 float vp4 = vc6 * vt4 + vc5; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6() local
|
D | velu-avx2-rr1-p6-x40.c | 80 __m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5); in xnn_f32_velu_ukernel__avx2_rr1_p6_x40() local
|
D | velu-avx512f-rr1-p6-x80.c | 82 __m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80() local
|
D | velu-wasm-rr2-lut16-p3-x5.c | 99 float vp4 = vc3 * vt4 + vc2; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5() local
|
/external/XNNPACK/src/f32-vsigmoid/gen/ |
D | vsigmoid-avx512f-rr1-p5-scalef-div-x80.c | 73 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x80() local
|
D | vsigmoid-avx512f-rr1-p5-scalef-div-x96.c | 78 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x96() local
|
D | vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x80.c | 86 __m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1); in xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x80() local
|
/external/XNNPACK/src/f32-raddextexp/gen/ |
D | avx512f-p5-scalef-x128.c | 89 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128() local
|
/external/XNNPACK/src/f32-raddexpminusmax/gen/ |
D | avx512f-p5-scalef-x128-acc2.c | 98 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc2() local
|
D | avx512f-p5-scalef-x128.c | 97 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4); in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128() local
|