/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x8c8-minmax-rndnu-neon-mlal.c | 140 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 144 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() 145 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() 146 vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal()
|
D | 4x8c8-minmax-rndnu-neon-mlal.c | 168 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 173 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 174 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 175 vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 176 vprod3x4 = vmlal_s8(vprod3x4, vb4x1, va3x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 113 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 116 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() 117 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 112 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 115 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() 116 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 112 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 115 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() 116 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal()
|
D | 1x8c8-minmax-rndnu-neon-mlal.c | 84 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c8__neon_mlal() local 86 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neon-mlal.c | 84 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 86 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 85 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 87 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 4x8c8-minmax-rndnu-neon-mlal.c | 185 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 190 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 191 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 192 vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 193 vprod3x4 = vmlal_s8(vprod3x4, vb4x1, va3x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 125 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 128 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() 129 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 125 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 128 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() 129 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 126 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 129 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() 130 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 3x8c8-minmax-rndnu-neon-mlal.c | 155 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 159 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() 160 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() 161 vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 96 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 98 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|
D | 1x8c8-minmax-rndnu-neon-mlal.c | 95 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c8__neon_mlal() local 97 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neon-mlal.c | 95 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 97 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 187 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 191 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 192 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 193 vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
/external/XNNPACK/src/qc8-gemm/gen/ |
D | 2x8c8-minmax-fp32-neon-mlal.c | 112 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 115 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() 116 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 113 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 116 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() 117 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 1x8c8-minmax-fp32-neon-mlal.c | 84 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 86 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 85 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 87 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 126 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 129 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() 130 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 125 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 128 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() 129 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neon-mlal.c | 95 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 97 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 96 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 98 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|