/external/XNNPACK/src/qc8-gemm/gen/ |
D | 2x1c4-minmax-fp32-armsimd32.c | 69 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 70 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() 71 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 71 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 72 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 73 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 58 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 59 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 59 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 60 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x1c4-minmax-fp32-armsimd32.c | 70 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 71 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() 72 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 72 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 73 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 74 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 59 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 60 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 60 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 61 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
/external/XNNPACK/src/qu8-gemm/gen/ |
D | 2x1c4-minmax-fp32-armsimd32.c | 71 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 72 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() 73 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 73 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 74 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 75 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 60 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 61 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 61 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 62 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x1c4-minmax-fp32-armsimd32.c | 88 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 89 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() 90 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 90 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 91 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 92 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 74 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 75 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 75 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 76 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 2x1c4-minmax-fp32-armsimd32.c | 87 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 88 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() 89 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 89 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 90 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 91 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 73 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 74 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 74 const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 75 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
/external/XNNPACK/src/qu8-igemm/gen/ |
D | 2x1c4-minmax-fp32-armsimd32.c | 89 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 90 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() 91 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 91 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 92 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 93 vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 75 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 76 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 1x2c4-minmax-fp32-armsimd32.c | 76 const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 77 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|