• Home
  • Raw
  • Download

Lines Matching refs:vget_low_s8

106       int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0));  in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
107 int16x8_t vprod1x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
112 int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
113 int16x8_t vprod1x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
118 int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
119 int16x8_t vprod1x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
124 int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
125 int16x8_t vprod1x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
130 int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
131 int16x8_t vprod1x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
136 int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
137 int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
142 int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
143 int16x8_t vprod1x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
148 int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
149 int16x8_t vprod1x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
154 int16x8_t vprod0x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
155 int16x8_t vprod1x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
160 int16x8_t vprod0x9 = vmull_s8(vget_low_s8(vb9), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
161 int16x8_t vprod1x9 = vmull_s8(vget_low_s8(vb9), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
166 int16x8_t vprod0x10 = vmull_s8(vget_low_s8(vb10), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
167 int16x8_t vprod1x10 = vmull_s8(vget_low_s8(vb10), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
172 int16x8_t vprod0x11 = vmull_s8(vget_low_s8(vb11), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
173 int16x8_t vprod1x11 = vmull_s8(vget_low_s8(vb11), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
178 int16x8_t vprod0x12 = vmull_s8(vget_low_s8(vb12), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
179 int16x8_t vprod1x12 = vmull_s8(vget_low_s8(vb12), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
184 int16x8_t vprod0x13 = vmull_s8(vget_low_s8(vb13), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
185 int16x8_t vprod1x13 = vmull_s8(vget_low_s8(vb13), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
190 int16x8_t vprod0x14 = vmull_s8(vget_low_s8(vb14), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
191 int16x8_t vprod1x14 = vmull_s8(vget_low_s8(vb14), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
196 int16x8_t vprod0x15 = vmull_s8(vget_low_s8(vb15), vget_low_s8(va0)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
197 int16x8_t vprod1x15 = vmull_s8(vget_low_s8(vb15), vget_low_s8(va1)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
358 …int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8 in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
360 vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8; in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()