/external/XNNPACK/src/u8-rmax/ |
D | scalar.c | 22 const uint8_t vt1 = x[1]; in xnn_u8_rmax_ukernel__scalar() local
|
/external/XNNPACK/src/u8-clamp/ |
D | scalar-x4.c | 24 uint8_t vt1 = x[1]; in xnn_u8_clamp_ukernel__scalar_x4() local
|
/external/XNNPACK/src/x8-lut/ |
D | scalar.c | 30 const uint8_t vt1 = t[vx1]; in xnn_x8_lut_ukernel__scalar() local
|
/external/XNNPACK/src/x32-packx/ |
D | x4-wasmsimd.c | 49 const v128_t vt1 = wasm_v32x4_shuffle(vx0, vx1, 2, 6, 3, 7); in xnn_x32_packx_ukernel_4x__wasmsimd() local
|
D | x4-sse.c | 50 const __m128 vt1 = _mm_unpackhi_ps(vx0, vx1); in xnn_x32_packx_ukernel_4x__sse() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-p5-x2.c | 72 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() local
|
D | scalar-p5-x2-acc2.c | 73 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() local
|
D | scalar-lut64-p2-x2.c | 90 float vt1 = vn1 * vminus_ln2_o64_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() local
|
D | scalar-lut64-p2-x2-acc2.c | 91 float vt1 = vn1 * vminus_ln2_o64_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | scalar-p5-div-x2.c | 57 float vt1 = vn1 * vln2_hi + vz1; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2() local
|
D | scalar-lut64-p2-div-x2.c | 62 float vt1 = vn1 * vln2_hi + vz1; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2() local
|
D | avx512f-rr1-p5-scalef-div-x32.c | 52 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32() local
|
D | scalar-lut2048-p1-div-x2.c | 62 float vt1 = vn1 * vln2_hi + vz1; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x2() local
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-wasm-rr2-p6-x2.c | 60 float vt1 = vn1 * vminus_ln2_hi + vz1; in xnn_f32_velu_ukernel__wasm_rr2_p6_x2() local
|
D | velu-scalar-rr2-p6-x2.c | 60 float vt1 = vn1 * vminus_ln2_hi + vz1; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2() local
|
D | velu-wasm-rr2-lut16-p3-x2.c | 63 float vt1 = vn1 * vminus_ln2_hi + vz1; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2() local
|
D | velu-scalar-rr2-lut16-p3-x2.c | 63 float vt1 = vn1 * vminus_ln2_hi + vz1; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2() local
|
D | velu-scalar-rr2-p6-x3.c | 65 float vt1 = vn1 * vminus_ln2_hi + vz1; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x32.c | 58 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32() local
|
D | avx2-p5-x16.c | 71 __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() local
|
D | avx512f-p5-scalef-x48.c | 61 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x48() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x32.c | 55 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32() local
|
D | avx512f-p5-scalef-x48.c | 57 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48() local
|
D | avx2-p5-x16.c | 61 __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x16() local
|
/external/XNNPACK/src/f32-ibilinear/gen/ |
D | scalar-c2.c | 61 const float vt1 = vtl1 + vtd1 * valphah; in xnn_f32_ibilinear_ukernel__scalar_c2() local
|