/external/XNNPACK/src/u8-clamp/ |
D | scalar-x4.c | 26 uint8_t vt3 = x[3]; in xnn_u8_clamp_ukernel__scalar_x4() local
|
/external/XNNPACK/src/x8-lut/ |
D | scalar.c | 32 const uint8_t vt3 = t[vx3]; in xnn_x8_lut_ukernel__scalar() local
|
/external/XNNPACK/src/x32-packx/ |
D | x4-wasmsimd.c | 51 const v128_t vt3 = wasm_v32x4_shuffle(vx2, vx3, 2, 6, 3, 7); in xnn_x32_packx_ukernel_4x__wasmsimd() local
|
D | x4-sse.c | 52 const __m128 vt3 = _mm_unpackhi_ps(vx2, vx3); in xnn_x32_packx_ukernel_4x__sse() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-p5-x4.c | 84 float vt3 = vn3 * vminus_ln2_hi + vx3; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4() local
|
D | scalar-p5-x4-acc2.c | 85 float vt3 = vn3 * vminus_ln2_hi + vx3; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc2() local
|
D | scalar-p5-x4-acc4.c | 87 float vt3 = vn3 * vminus_ln2_hi + vx3; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc4() local
|
D | scalar-lut64-p2-x4.c | 106 float vt3 = vn3 * vminus_ln2_o64_hi + vx3; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4() local
|
D | scalar-lut64-p2-x4-acc2.c | 107 float vt3 = vn3 * vminus_ln2_o64_hi + vx3; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4_acc2() local
|
D | scalar-lut64-p2-x4-acc4.c | 109 float vt3 = vn3 * vminus_ln2_o64_hi + vx3; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4_acc4() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x64.c | 61 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x32.c | 67 __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x32() local
|
D | avx512f-p5-scalef-x80.c | 63 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80() local
|
D | avx512f-p5-scalef-x96.c | 65 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x64.c | 66 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64() local
|
D | avx512f-p5-scalef-x80.c | 69 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x80() local
|
D | avx2-p5-x32.c | 83 __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x32() local
|
D | avx512f-p5-scalef-x96.c | 72 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x96() local
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-scalar-rr2-p6-x4.c | 72 float vt3 = vn3 * vminus_ln2_hi + vz3; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() local
|
D | velu-wasm-rr2-p6-x4.c | 72 float vt3 = vn3 * vminus_ln2_hi + vz3; in xnn_f32_velu_ukernel__wasm_rr2_p6_x4() local
|
D | velu-scalar-rr2-p6-x5.c | 77 float vt3 = vn3 * vminus_ln2_hi + vz3; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local
|
D | velu-scalar-rr2-lut16-p3-x4.c | 79 float vt3 = vn3 * vminus_ln2_hi + vz3; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() local
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | idct_msa.c | 90 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in idct4x4_addblk_msa() local 183 v4i32 hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3, res0, res1, res2, res3; in dequant_idct4x4_addblk_msa() local 219 v8i16 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3, res0, res1, res2, res3; in dequant_idct4x4_addblk_2x_msa() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | scalar-p5-div-x4.c | 69 float vt3 = vn3 * vln2_hi + vz3; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
|
D | avx512f-rr1-p5-scalef-div-x64.c | 62 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x64() local
|