/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x48.c | 97 const __m512 ve2 = _mm512_add_ps(vn2, vscalee); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48() local
|
D | avx512f-p5-scalef-x64.c | 107 const __m512 ve2 = _mm512_add_ps(vn2, vscalee); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x24.c | 103 __m256 ve2 = _mm256_add_ps(vn2, vscalee); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x24() local
|
D | avx2-p5-x32.c | 113 __m256 ve2 = _mm256_add_ps(vn2, vscalee); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x32() local
|
D | avx512f-p5-scalef-x80.c | 117 const __m512 ve2 = _mm512_add_ps(vn2, vscalee); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80() local
|
D | avx512f-p5-scalef-x96.c | 127 const __m512 ve2 = _mm512_add_ps(vn2, vscalee); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96() local
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-scalar-rr2-p6-x3.c | 120 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() local
|
D | velu-wasm-rr2-p6-x3.c | 108 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3() local
|
D | velu-scalar-rr2-p6-x4.c | 139 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() local
|
D | velu-wasm-rr2-p6-x4.c | 123 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x4() local
|
D | velu-wasm-rr2-lut16-p3-x3.c | 101 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3() local
|
D | velu-scalar-rr2-lut16-p3-x3.c | 113 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() local
|
D | velu-avx2-rr1-p6-x24.c | 101 const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx2_rr1_p6_x24() local
|
D | velu-scalar-rr2-p6-x5.c | 158 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local
|
D | velu-scalar-rr2-lut16-p3-x4.c | 131 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | avx512f-rr1-p5-scalef-div-x48.c | 81 const __m512 ve2 = _mm512_scalef_ps(vp2, vn2); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x48() local
|
D | avx512f-rr1-lut16-p3-perm-scalef-div-x48.c | 84 const __m512 ve2 = _mm512_scalef_ps(vp2, vn2); in xnn_f32_sigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x48() local
|
D | avx512f-rr2-lut32-p2-perm2-scalef-div-x48.c | 90 const __m512 ve2 = _mm512_scalef_ps(vp2, vn2); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x48() local
|
D | scalar-p5-div-x4.c | 103 const float ve2 = vt2 * vp2 + vs2; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
|
D | avx512f-rr1-p5-scalef-div-x64.c | 91 const __m512 ve2 = _mm512_scalef_ps(vp2, vn2); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x64() local
|
D | avx512f-rr1-p5-scalef-nr1fma-x48.c | 81 const __m512 ve2 = _mm512_scalef_ps(vp2, vn2); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x48() local
|
D | avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x48.c | 90 const __m512 ve2 = _mm512_scalef_ps(vp2, vn2); in xnn_f32_sigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x48() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-lut64-p2-x4.c | 81 const uint32_t ve2 = (fp32_to_bits(vn2) & UINT32_C(0xFFFFFFC0)) << 17; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4() local
|
D | scalar-lut64-p2-x4-acc2.c | 82 const uint32_t ve2 = (fp32_to_bits(vn2) & UINT32_C(0xFFFFFFC0)) << 17; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4_acc2() local
|
D | scalar-lut64-p2-x4-acc4.c | 84 const uint32_t ve2 = (fp32_to_bits(vn2) & UINT32_C(0xFFFFFFC0)) << 17; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4_acc4() local
|