Home
last modified time | relevance | path

Searched defs:va3c2 (Results 1 – 25 of 125) sorted by relevance

12345

/external/XNNPACK/src/f16-gemm/gen/
D4x8-minmax-neonfp16arith-ld64.c122 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2); in xnn_f16_gemm_minmax_ukernel_4x8__neonfp16arith_ld64() local
/external/XNNPACK/src/qs8-gemm/gen/
D4x8c2-minmax-rndnu-neon-mull-ld1r.c145 const int8x8_t va3c2 = vreinterpret_s8_s16(va32); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local
262 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local
D4x8c2-minmax-rndnu-neon-mull-ld4r.c133 const int8x8_t va3c2 = vreinterpret_s8_s16(va3.val[2]); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local
250 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local
D4x8c2-minmax-rndnu-neon-mull-dup.c133 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local
250 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local
D4x8c2-minmax-rndnu-neon-mull-ld2r.c137 const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local
254 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local
/external/XNNPACK/src/qs8-igemm/gen/
D4x8c2-minmax-rndnu-neon-mull-ld4r.c150 const int8x8_t va3c2 = vreinterpret_s8_s16(va3.val[2]); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local
267 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r() local
D4x8c2-minmax-rndnu-neon-mull-ld2r.c154 const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local
271 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r() local
D4x8c2-minmax-rndnu-neon-mull-ld1r.c162 const int8x8_t va3c2 = vreinterpret_s8_s16(va32); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local
279 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r() local
D4x8c2-minmax-rndnu-neon-mull-dup.c150 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local
267 const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup() local
/external/XNNPACK/src/f16-igemm/gen/
D4x8-minmax-neonfp16arith-ld64.c142 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2); in xnn_f16_igemm_minmax_ukernel_4x8__neonfp16arith_ld64() local
/external/XNNPACK/src/f32-gemm/gen/
D4x8-relu-wasmrelaxedsimd-fma-splat.c116 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_splat() local
D4x8-wasmsimd-splat.c116 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat() local
D4x8-wasmrelaxedsimd-fma-splat.c116 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemm_ukernel_4x8__wasmrelaxedsimd_fma_splat() local
D4x8-relu-wasmsimd-splat.c116 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat() local
D4x8-minmax-wasmsimd-arm-splat.c118 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat() local
/external/XNNPACK/src/f16-gemm/gen-inc/
D4x8inc-minmax-neonfp16arith-ld64.c124 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2); in xnn_f16_gemminc_minmax_ukernel_4x8__neonfp16arith_ld64() local
/external/XNNPACK/src/f32-igemm/gen/
D4x8-relu-wasmrelaxedsimd-fma-splat.c138 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_igemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_splat() local
D4x8-minmax-wasmsimd-arm-splat.c140 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat() local
D4x8-wasmrelaxedsimd-fma-splat.c138 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_igemm_ukernel_4x8__wasmrelaxedsimd_fma_splat() local
D4x8-relu-wasmsimd-splat.c138 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat() local
D4x8-minmax-wasmrelaxedsimd-fma-splat.c140 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_4x8__wasmrelaxedsimd_fma_splat() local
D4x8-wasmsimd-splat.c138 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_igemm_ukernel_4x8__wasmsimd_splat() local
/external/XNNPACK/src/f32-gemm/gen-inc/
D4x8inc-minmax-wasmrelaxedsimd-splat.c120 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmrelaxedsimd_splat() local
D4x8inc-minmax-relaxedwasmsimd-splat.c120 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_4x8__relaxedwasmsimd_splat() local
D4x8inc-minmax-wasmsimd-x86-splat.c120 const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat() local

12345