/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 122 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 177 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 121 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 176 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 121 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 176 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local
|
D | 3x8c8-minmax-rndnu-neon-mlal.c | 152 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 227 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 113 int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local
|
D | 2x8c8-minmax-rndnu-neon-mull.c | 101 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local
|
D | 4x8c8-minmax-rndnu-neon-mlal.c | 183 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 278 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 145 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 256 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local
|
D | 3x8c8-minmax-rndnu-neon-mull.c | 126 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 143 int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x8c8-minmax-rndnu-neon-mlal.c | 134 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 189 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 135 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 190 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 134 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 189 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local
|
D | 3x8c8-minmax-rndnu-neon-mlal.c | 167 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 242 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local
|
D | 4x8c8-minmax-rndnu-neon-mlal.c | 200 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 295 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local
|
D | 2x8c8-minmax-rndnu-neon-mull.c | 114 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 126 int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 158 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 269 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 158 int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local
|
D | 3x8c8-minmax-rndnu-neon-mull.c | 141 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 199 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 354 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local
|
/external/XNNPACK/src/qc8-gemm/gen/ |
D | 2x8c8-minmax-fp32-neon-mlal.c | 121 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 176 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 122 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 177 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 135 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 190 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 134 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 189 const int16x8_t vprod1x5 = vmull_s8(vb5, va1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local
|