/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x16c8-minmax-neon-mlal-padal.c | 179 int16x8_t vprod1x8 = vmull_s8(vb8x0, va1x0); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 181 vprod1x8 = vmlal_s8(vprod1x8, vb8x1, va1x1); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() 183 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() 284 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 286 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 168 int16x8_t vprod1x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va1)); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 170 vprod1x8 = vmlal_s8(vprod1x8, vget_high_s8(vb8), vget_high_s8(va1)); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal() 172 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mlal-padal.c | 229 int16x8_t vprod1x8 = vmull_s8(vb8x0, va1x0); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 232 vprod1x8 = vmlal_s8(vprod1x8, vb8x1, va1x1); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 235 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 375 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 378 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal()
|
D | 3x16c16-minmax-neon-mlal-padal.c | 217 int16x8_t vprod1x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va1)); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() local 220 vprod1x8 = vmlal_s8(vprod1x8, vget_high_s8(vb8), vget_high_s8(va1)); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() 223 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal()
|
D | 4x16c8-minmax-neon-mlal-padal.c | 279 int16x8_t vprod1x8 = vmull_s8(vb8x0, va1x0); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 283 vprod1x8 = vmlal_s8(vprod1x8, vb8x1, va1x1); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 287 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 466 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 470 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal()
|
D | 2x16c8-minmax-neon-mull-padal.c | 145 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() local 147 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 4x16c16-minmax-neon-mlal-padal.c | 266 int16x8_t vprod1x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va1)); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() local 270 vprod1x8 = vmlal_s8(vprod1x8, vget_high_s8(vb8), vget_high_s8(va1)); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() 274 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 186 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() local 189 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 4x16c8-minmax-neon-mull-padal.c | 227 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal() local 231 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x16c8-minmax-neon-mlal-padal.c | 166 int16x8_t vprod1x8 = vmull_s8(vb8x0, va1x0); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 168 vprod1x8 = vmlal_s8(vprod1x8, vb8x1, va1x1); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() 170 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() 271 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 273 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 155 int16x8_t vprod1x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 157 vprod1x8 = vmlal_s8(vprod1x8, vget_high_s8(vb8), vget_high_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal() 159 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mlal-padal.c | 214 int16x8_t vprod1x8 = vmull_s8(vb8x0, va1x0); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 217 vprod1x8 = vmlal_s8(vprod1x8, vb8x1, va1x1); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 220 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 360 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 363 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal()
|
D | 3x16c16-minmax-neon-mlal-padal.c | 202 int16x8_t vprod1x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() local 205 vprod1x8 = vmlal_s8(vprod1x8, vget_high_s8(vb8), vget_high_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() 208 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal()
|
D | 4x16c8-minmax-neon-mlal-padal.c | 262 int16x8_t vprod1x8 = vmull_s8(vb8x0, va1x0); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 266 vprod1x8 = vmlal_s8(vprod1x8, vb8x1, va1x1); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 270 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 449 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 453 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal()
|
D | 2x16c8-minmax-neon-mull-padal.c | 132 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal() local 134 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 4x16c16-minmax-neon-mlal-padal.c | 249 int16x8_t vprod1x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() local 253 vprod1x8 = vmlal_s8(vprod1x8, vget_high_s8(vb8), vget_high_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() 257 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 171 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() local 174 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 4x16c8-minmax-neon-mull-padal.c | 210 const int16x8_t vprod1x8 = vmull_s8(vb8, va1); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal() local 214 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal()
|