/external/XNNPACK/src/f32-gemm/gen/ |
D | 4x2-minmax-neonfma-lane-ld64.c | 84 const float32x2_t va3c0 = vdup_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64() local
|
D | 4x8-minmax-neonfma-dup-ld64.c | 83 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64() local
|
D | 6x2-minmax-neonfma-lane-ld64.c | 102 const float32x2_t va3c0 = vdup_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_6x2__neonfma_lane_ld64() local
|
D | 4x8-minmax-neon-dup-ld64.c | 83 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld64() local
|
D | 4x8-wasmrelaxedsimd-fma-splat.c | 84 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemm_ukernel_4x8__wasmrelaxedsimd_fma_splat() local
|
D | 4x8-wasmsimd-splat.c | 84 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat() local
|
D | 4x8-relu-wasmrelaxedsimd-fma-splat.c | 84 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_splat() local
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 4x2-minmax-neonfma-lane-ld64.c | 106 const float32x2_t va3c0 = vdup_lane_f32(va3, 0); in xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64() local
|
D | 4x8-minmax-neonfma-dup-ld64.c | 105 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64() local
|
D | 4x8-minmax-neon-dup-ld64.c | 105 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64() local
|
D | 6x2-minmax-neonfma-lane-ld64.c | 130 const float32x2_t va3c0 = vdup_lane_f32(va3, 0); in xnn_f32_igemm_minmax_ukernel_6x2__neonfma_lane_ld64() local
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 4x8inc-minmax-neonfma-dup-ld64.c | 85 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld64() local
|
D | 4x8inc-minmax-neon-dup-ld64.c | 85 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld64() local
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 4x8-minmax-neonfp16arith-ld64.c | 86 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0); in xnn_f16_gemm_minmax_ukernel_4x8__neonfp16arith_ld64() local
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 4x8c2-minmax-rndnu-neon-mull-ld4r.c | 91 const int8x8_t va3c0 = vreinterpret_s8_s16(va3.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local 200 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local
|
D | 4x8c4-minmax-rndnu-neon-mull-dup.c | 100 const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_dup() local 213 const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_dup() local
|
D | 4x8c2-minmax-rndnu-neon-mull-ld1r.c | 103 const int8x8_t va3c0 = vreinterpret_s8_s16(va30); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local 212 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local
|
D | 4x8c2-minmax-rndnu-neon-mull-ld2r.c | 95 const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local 204 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local
|
D | 4x8c4-minmax-rndnu-neon-mull-ld2r.c | 100 const int8x8_t va3c0 = vreinterpret_s8_s32(va3.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_ld2r() local 213 const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_ld2r() local
|
D | 4x8c2-minmax-rndnu-neon-mull-dup.c | 91 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local 200 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 4x8c2-minmax-rndnu-neon-mull-ld4r.c | 108 const int8x8_t va3c0 = vreinterpret_s8_s16(va3.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local 217 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local
|
D | 4x8c2-minmax-rndnu-neon-mull-dup.c | 108 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local 217 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local
|
D | 4x8c2-minmax-rndnu-neon-mull-ld2r.c | 112 const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local 221 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local
|
D | 4x8c2-minmax-rndnu-neon-mull-ld1r.c | 120 const int8x8_t va3c0 = vreinterpret_s8_s16(va30); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local 229 const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local
|
/external/XNNPACK/src/f16-gemm/gen-inc/ |
D | 4x8inc-minmax-neonfp16arith-ld64.c | 88 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0); in xnn_f16_gemminc_minmax_ukernel_4x8__neonfp16arith_ld64() local
|