Searched refs:vb11x0 (Results 1 – 8 of 8) sorted by relevance
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x16c8-minmax-rndnu-neon-mlal.c | 141 … const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 258 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 259 int16x8_t vprod1x11 = vmull_s8(vb11x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 260 int16x8_t vprod2x11 = vmull_s8(vb11x0, va2x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 115 … const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 199 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 200 int16x8_t vprod1x11 = vmull_s8(vb11x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 1x16c8-minmax-rndnu-neon-mlal.c | 89 … const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 140 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 167 … const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 317 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 318 int16x8_t vprod1x11 = vmull_s8(vb11x0, va1x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 319 int16x8_t vprod2x11 = vmull_s8(vb11x0, va2x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 320 int16x8_t vprod3x11 = vmull_s8(vb11x0, va3x0); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x16c8-minmax-rndnu-neon-mlal.c | 126 const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 243 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 244 int16x8_t vprod1x11 = vmull_s8(vb11x0, va1x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 245 int16x8_t vprod2x11 = vmull_s8(vb11x0, va2x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 102 const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 186 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 187 int16x8_t vprod1x11 = vmull_s8(vb11x0, va1x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 1x16c8-minmax-rndnu-neon-mlal.c | 78 const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 129 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 150 const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 300 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 301 int16x8_t vprod1x11 = vmull_s8(vb11x0, va1x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 302 int16x8_t vprod2x11 = vmull_s8(vb11x0, va2x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 303 int16x8_t vprod3x11 = vmull_s8(vb11x0, va3x0); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|