| /external/XNNPACK/src/qs8-gemm/gen/ |
| D | 2x8c2-minmax-rndnu-neon-mull-ld2r.c | 95 const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() local 162 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() local
|
| D | 2x8c2-minmax-rndnu-neon-mull-ld4r.c | 93 const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld4r() local 160 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld4r() local
|
| D | 2x8c2-minmax-rndnu-neon-mull-dup.c | 93 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_dup() local 160 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_dup() local
|
| D | 2x8c2-minmax-rndnu-neon-mull-ld1r.c | 99 const int8x8_t va1c2 = vreinterpret_s8_s16(va12); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r() local 166 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-dup.c | 112 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_dup() local 200 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_dup() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-ld2r.c | 115 const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() local 203 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-ld4r.c | 112 const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld4r() local 200 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld4r() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-ld1r.c | 121 const int8x8_t va1c2 = vreinterpret_s8_s16(va12); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r() local 209 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r() local
|
| /external/XNNPACK/src/qs8-igemm/gen/ |
| D | 2x8c2-minmax-rndnu-neon-mull-dup.c | 106 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_dup() local 173 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_dup() local
|
| D | 2x8c2-minmax-rndnu-neon-mull-ld4r.c | 106 const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld4r() local 173 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld4r() local
|
| D | 2x8c2-minmax-rndnu-neon-mull-ld1r.c | 112 const int8x8_t va1c2 = vreinterpret_s8_s16(va12); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r() local 179 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r() local
|
| D | 2x8c2-minmax-rndnu-neon-mull-ld2r.c | 108 const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() local 175 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-dup.c | 127 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_dup() local 215 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_dup() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-ld4r.c | 127 const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld4r() local 215 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld4r() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-ld2r.c | 130 const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() local 218 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() local
|
| D | 3x8c2-minmax-rndnu-neon-mull-ld1r.c | 136 const int8x8_t va1c2 = vreinterpret_s8_s16(va12); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r() local 224 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r() local
|
| /external/XNNPACK/src/f32-gemm/gen/ |
| D | 3x8-wasmsimd-splat.c | 98 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_ukernel_3x8__wasmsimd_splat() local
|
| D | 3x8-wasmrelaxedsimd-fma-splat.c | 98 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_ukernel_3x8__wasmrelaxedsimd_fma_splat() local
|
| D | 3x8-relu-wasmrelaxedsimd-fma-splat.c | 98 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_relu_ukernel_3x8__wasmrelaxedsimd_fma_splat() local
|
| D | 3x8-relu-wasmsimd-splat.c | 98 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_relu_ukernel_3x8__wasmsimd_splat() local
|
| D | 3x8-minmax-wasmrelaxedsimd-fma-splat.c | 100 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_3x8__wasmrelaxedsimd_fma_splat() local
|
| /external/XNNPACK/src/f32-gemm/gen-inc/ |
| D | 3x8inc-minmax-wasmsimd-arm-splat.c | 102 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat() local
|
| D | 3x8inc-minmax-wasmrelaxedsimd-splat.c | 102 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmrelaxedsimd_splat() local
|
| /external/XNNPACK/src/f32-igemm/gen/ |
| D | 3x8-wasmsimd-splat.c | 117 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_ukernel_3x8__wasmsimd_splat() local
|
| D | 3x8-wasmrelaxedsimd-fma-splat.c | 117 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_ukernel_3x8__wasmrelaxedsimd_fma_splat() local
|