Searched refs:vb14x1 (Results 1 – 8 of 8) sorted by relevance
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x16c8-minmax-rndnu-neon-mlal.c | 287 … const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 291 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 292 vprod1x14 = vmlal_s8(vprod1x14, vb14x1, va1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 293 vprod2x14 = vmlal_s8(vprod2x14, vb14x1, va2x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 219 … const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 222 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 223 vprod1x14 = vmlal_s8(vprod1x14, vb14x1, va1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 1x16c8-minmax-rndnu-neon-mlal.c | 151 … const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 153 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 355 … const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 360 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 361 vprod1x14 = vmlal_s8(vprod1x14, vb14x1, va1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 362 vprod2x14 = vmlal_s8(vprod2x14, vb14x1, va2x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 363 vprod3x14 = vmlal_s8(vprod3x14, vb14x1, va3x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x16c8-minmax-rndnu-neon-mlal.c | 272 const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 276 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 277 vprod1x14 = vmlal_s8(vprod1x14, vb14x1, va1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 278 vprod2x14 = vmlal_s8(vprod2x14, vb14x1, va2x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 206 const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 209 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 210 vprod1x14 = vmlal_s8(vprod1x14, vb14x1, va1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 1x16c8-minmax-rndnu-neon-mlal.c | 140 const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 142 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 338 const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 343 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 344 vprod1x14 = vmlal_s8(vprod1x14, vb14x1, va1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 345 vprod2x14 = vmlal_s8(vprod2x14, vb14x1, va2x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 346 vprod3x14 = vmlal_s8(vprod3x14, vb14x1, va3x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|