/external/XNNPACK/src/x32-packx/ |
D | x4-psimd.c | 44 const psimd_u32 vx3 = psimd_load_u32(x3); in xnn_x32_packx_ukernel_4x__psimd() local 74 const psimd_u32 vx3 = psimd_load1_u32(x3); in xnn_x32_packx_ukernel_4x__psimd() local
|
D | x4-sse.c | 46 const __m128 vx3 = _mm_loadu_ps(x3); in xnn_x32_packx_ukernel_4x__sse() local 76 const __m128 vx3 = _mm_load_ss(x3); in xnn_x32_packx_ukernel_4x__sse() local
|
D | x4-scalar.c | 41 const float vx3 = *x3++; in xnn_x32_packx_ukernel_4x__scalar() local
|
D | x4-neon-st4.c | 41 const uint32x4_t vx3 = vld1q_u32(x3); x3 += 4; in xnn_x32_packx_ukernel_4x__neon_st4() local
|
/external/XNNPACK/src/x8-lut/ |
D | scalar.c | 26 const size_t vx3 = x[3]; in xnn_x8_lut_ukernel__scalar() local
|
/external/XNNPACK/src/f32-rmax/ |
D | sse.c | 30 const __m128 vx3 = _mm_loadu_ps(x + 12); in xnn_f32_rmax_ukernel__sse() local
|
D | avx.c | 29 const __m256 vx3 = _mm256_loadu_ps(x + 24); in xnn_f32_rmax_ukernel__avx() local
|
D | scalar.c | 28 const float vx3 = x[3]; in xnn_f32_rmax_ukernel__scalar() local
|
D | psimd.c | 30 const psimd_f32 vx3 = psimd_load_f32(x + 12); in xnn_f32_rmax_ukernel__psimd() local
|
D | avx512f.c | 30 const __m512 vx3 = _mm512_loadu_ps(x + 48); in xnn_f32_rmax_ukernel__avx512f() local
|
D | neon.c | 30 const float32x4_t vx3 = vld1q_f32(x); x += 4; in xnn_f32_rmax_ukernel__neon() local
|
/external/XNNPACK/src/u8-clamp/ |
D | sse2.c | 30 const __m128i vx3 = _mm_loadu_si128((const __m128i*) x + 3); in xnn_u8_clamp_ukernel__sse2() local
|
D | neon.c | 31 const uint8x16_t vx3 = vld1q_u8(x); x += 16; in xnn_u8_clamp_ukernel__neon() local
|
/external/XNNPACK/src/f32-hswish/gen/ |
D | scalar-x4.c | 36 const float vx3 = x[3]; in xnn_f32_hswish_ukernel__scalar_x4() local
|
D | wasm-x4.c | 36 const float vx3 = x[3]; in xnn_f32_hswish_ukernel__wasm_x4() local
|
/external/XNNPACK/src/f32-vscale/ |
D | avx-unroll32.c | 28 const __m256 vx3 = _mm256_loadu_ps(x + 24); in xnn_f32_vscale_ukernel__avx_unroll32() local
|
D | avx512f-unroll64.c | 29 const __m512 vx3 = _mm512_loadu_ps(x + 48); in xnn_f32_vscale_ukernel__avx512f_unroll64() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-p5-x4.c | 54 const float vx3 = vi3 - vi_max; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4() local
|
D | scalar-p5-x4-acc2.c | 55 const float vx3 = vi3 - vi_max; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc2() local
|
D | scalar-p5-x4-acc4.c | 57 const float vx3 = vi3 - vi_max; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc4() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x64.c | 47 const __m512 vx3 = _mm512_loadu_ps(x + 48); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x32.c | 53 const __m256 vx3 = _mm256_loadu_ps(x + 24); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x32() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x64.c | 53 const __m512 vx3 = _mm512_sub_ps(vi3, vi_max); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64() local
|
D | avx512f-p5-scalef-x80.c | 54 const __m512 vx3 = _mm512_sub_ps(vi3, vi_max); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x80() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | scalar-p5-div-x4.c | 47 const float vx3 = x[3]; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
|