Home
last modified time | relevance | path

Searched refs:va0c13 (Results 1 – 24 of 24) sorted by relevance

/external/XNNPACK/src/qc8-gemm/gen/
D1x2c4-minmax-fp32-armsimd32.c52 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local
60 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
67 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
D2x2c4-minmax-fp32-armsimd32.c61 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local
72 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
81 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
D1x1c4-minmax-fp32-armsimd32.c51 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local
59 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
D2x1c4-minmax-fp32-armsimd32.c59 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local
70 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
/external/XNNPACK/src/qs8-gemm/gen/
D1x2c4-minmax-fp32-armsimd32.c53 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local
61 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
68 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
D2x2c4-minmax-fp32-armsimd32.c62 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local
73 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
82 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
D1x1c4-minmax-fp32-armsimd32.c52 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local
60 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
D2x1c4-minmax-fp32-armsimd32.c60 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local
71 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
/external/XNNPACK/src/qu8-gemm/gen/
D1x2c4-minmax-fp32-armsimd32.c54 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local
62 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
69 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
D2x2c4-minmax-fp32-armsimd32.c63 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local
74 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
83 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
D1x1c4-minmax-fp32-armsimd32.c53 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local
61 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
D2x1c4-minmax-fp32-armsimd32.c61 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local
72 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
/external/XNNPACK/src/qc8-igemm/gen/
D1x2c4-minmax-fp32-armsimd32.c67 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local
75 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
82 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
D2x2c4-minmax-fp32-armsimd32.c79 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local
90 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
99 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
D1x1c4-minmax-fp32-armsimd32.c66 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local
74 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
D2x1c4-minmax-fp32-armsimd32.c77 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local
88 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
/external/XNNPACK/src/qs8-igemm/gen/
D1x2c4-minmax-fp32-armsimd32.c68 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local
76 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
83 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
D2x2c4-minmax-fp32-armsimd32.c80 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local
91 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
100 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
D1x1c4-minmax-fp32-armsimd32.c67 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local
75 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
D2x1c4-minmax-fp32-armsimd32.c78 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local
89 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
/external/XNNPACK/src/qu8-igemm/gen/
D1x2c4-minmax-fp32-armsimd32.c69 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local
77 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
84 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
D2x2c4-minmax-fp32-armsimd32.c81 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local
92 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
101 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
D1x1c4-minmax-fp32-armsimd32.c68 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local
76 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
D2x1c4-minmax-fp32-armsimd32.c79 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local
90 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()