/external/XNNPACK/src/qc8-gemm/gen/ |
D | 1x2c4-minmax-fp32-armsimd32.c | 52 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 60 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() 67 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 61 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 72 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 81 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 51 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 59 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 59 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 70 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x2c4-minmax-fp32-armsimd32.c | 53 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 61 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() 68 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 62 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 73 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 82 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 52 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 60 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 60 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 71 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
/external/XNNPACK/src/qu8-gemm/gen/ |
D | 1x2c4-minmax-fp32-armsimd32.c | 54 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 62 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32() 69 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 63 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 74 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32() 83 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_gemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 53 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 61 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 61 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 72 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_gemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 1x2c4-minmax-fp32-armsimd32.c | 67 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 75 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() 82 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 79 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 90 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 99 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 66 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 74 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 77 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 88 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x2c4-minmax-fp32-armsimd32.c | 68 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 76 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() 83 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 80 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 91 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 100 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 67 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 75 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 78 const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 89 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|
/external/XNNPACK/src/qu8-igemm/gen/ |
D | 1x2c4-minmax-fp32-armsimd32.c | 69 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() local 77 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32() 84 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32()
|
D | 2x2c4-minmax-fp32-armsimd32.c | 81 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() local 92 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32() 101 vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); in xnn_qu8_igemm_minmax_fp32_ukernel_2x2c4__armsimd32()
|
D | 1x1c4-minmax-fp32-armsimd32.c | 68 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32() local 76 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32()
|
D | 2x1c4-minmax-fp32-armsimd32.c | 79 const int16x2_t va0c13 = __uxtb16(__ror(va0, 8)); in xnn_qu8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32() local 90 vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); in xnn_qu8_igemm_minmax_fp32_ukernel_2x1c4__armsimd32()
|