| /external/XNNPACK/src/u8-rmax/ |
| D | scalar.c | 22 const uint8_t vt1 = x[1]; in xnn_u8_rmax_ukernel__scalar() local
|
| /external/XNNPACK/src/x8-lut/gen/ |
| D | lut-scalar-x2.c | 32 const uint32_t vt1 = (uint32_t) t[vx1]; in xnn_x8_lut_ukernel__scalar_x2() local
|
| D | lut-scalar-x4.c | 34 const uint32_t vt1 = (uint32_t) t[vx1]; in xnn_x8_lut_ukernel__scalar_x4() local
|
| D | lut-scalar-x8.c | 38 const uint32_t vt1 = (uint32_t) t[vx1]; in xnn_x8_lut_ukernel__scalar_x8() local
|
| D | lut-scalar-x16.c | 46 const uint32_t vt1 = (uint32_t) t[vx1]; in xnn_x8_lut_ukernel__scalar_x16() local
|
| D | lut-ssse3-x16.c | 30 const __m128i vt1 = _mm_load_si128((const __m128i*) (t + 16)); in xnn_x8_lut_ukernel__ssse3_x16() local
|
| D | lut-avx512skx-vpshufb-x64.c | 30 const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 16))); in xnn_x8_lut_ukernel__avx512skx_vpshufb_x64() local
|
| D | lut-avx-x16.c | 30 const __m128i vt1 = _mm_load_si128((const __m128i*) (t + 16)); in xnn_x8_lut_ukernel__avx_x16() local
|
| D | lut-avx2-x32.c | 30 const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 16))); in xnn_x8_lut_ukernel__avx2_x32() local
|
| D | lut-avx2-x64.c | 30 const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 16))); in xnn_x8_lut_ukernel__avx2_x64() local
|
| /external/XNNPACK/src/u8-vclamp/ |
| D | scalar-x4.c | 25 uint32_t vt1 = (uint32_t) x[1]; in xnn_u8_vclamp_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/s8-vclamp/ |
| D | scalar-x4.c | 25 int32_t vt1 = (int32_t) x[1]; in xnn_s8_vclamp_ukernel__scalar_x4() local
|
| /external/XNNPACK/src/x32-packx/ |
| D | x4-wasmsimd.c | 49 const v128_t vt1 = wasm_v32x4_shuffle(vx0, vx1, 2, 6, 3, 7); in xnn_x32_packx_ukernel_4x__wasmsimd() local
|
| D | x4-sse.c | 50 const __m128 vt1 = _mm_unpackhi_ps(vx0, vx1); in xnn_x32_packx_ukernel_4x__sse() local
|
| /external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
| D | scalar-rr2-p5-x2.c | 70 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2() local
|
| D | scalar-rr2-p5-x2-acc2.c | 71 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2_acc2() local
|
| D | scalar-rr2-lut64-p2-x2-acc2.c | 88 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2_acc2() local
|
| D | scalar-rr2-lut64-p2-x2.c | 87 float vt1 = vn1 * vminus_ln2_hi + vx1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2() local
|
| /external/XNNPACK/src/f32-vscaleextexp/gen/ |
| D | avx512f-p5-scalef-x32.c | 55 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32() local
|
| /external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
| D | avx512f-p5-scalef-x32.c | 58 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32() local
|
| /external/XNNPACK/src/f32-velu/gen/ |
| D | velu-scalar-rr2-p6-x2.c | 58 float vt1 = vn1 * vminus_ln2_hi + vz1; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2() local
|
| D | velu-wasm-rr2-p6-x2.c | 58 float vt1 = vn1 * vminus_ln2_hi + vz1; in xnn_f32_velu_ukernel__wasm_rr2_p6_x2() local
|
| /external/XNNPACK/src/f32-vsigmoid/gen/ |
| D | vsigmoid-scalar-rr2-p5-div-x2.c | 56 float vt1 = vn1 * vln2_hi + vz1; in xnn_f32_vsigmoid_ukernel__scalar_rr2_p5_div_x2() local
|
| D | vsigmoid-scalar-rr2-lut64-p2-div-x2.c | 61 float vt1 = vn1 * vln2_hi + vz1; in xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x2() local
|
| /external/XNNPACK/src/f32-ibilinear/gen/ |
| D | scalar-c2.c | 61 const float vt1 = vtl1 + vtd1 * valphah; in xnn_f32_ibilinear_ukernel__scalar_c2() local
|