Home
last modified time | relevance | path

Searched refs:vb15x0 (Results 1 – 8 of 8) sorted by relevance

/external/XNNPACK/src/qs8-igemm/gen/
D1x16c8-minmax-neon-mlal-padal.c93 … const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mlal_padal() local
156 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mlal_padal()
D2x16c8-minmax-neon-mlal-padal.c119 … const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() local
227 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal()
228 int16x8_t vprod1x15 = vmull_s8(vb15x0, va1x0); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal()
D4x16c8-minmax-neon-mlal-padal.c171 … const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() local
369 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal()
370 int16x8_t vprod1x15 = vmull_s8(vb15x0, va1x0); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal()
371 int16x8_t vprod2x15 = vmull_s8(vb15x0, va2x0); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal()
372 int16x8_t vprod3x15 = vmull_s8(vb15x0, va3x0); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal()
D3x16c8-minmax-neon-mlal-padal.c145 … const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() local
298 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal()
299 int16x8_t vprod1x15 = vmull_s8(vb15x0, va1x0); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal()
300 int16x8_t vprod2x15 = vmull_s8(vb15x0, va2x0); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal()
/external/XNNPACK/src/qs8-gemm/gen/
D1x16c8-minmax-neon-mlal-padal.c82 const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mlal_padal() local
145 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mlal_padal()
D2x16c8-minmax-neon-mlal-padal.c106 const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() local
214 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal()
215 int16x8_t vprod1x15 = vmull_s8(vb15x0, va1x0); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal()
D3x16c8-minmax-neon-mlal-padal.c130 const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() local
283 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal()
284 int16x8_t vprod1x15 = vmull_s8(vb15x0, va1x0); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal()
285 int16x8_t vprod2x15 = vmull_s8(vb15x0, va2x0); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal()
D4x16c8-minmax-neon-mlal-padal.c154 const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() local
352 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal()
353 int16x8_t vprod1x15 = vmull_s8(vb15x0, va1x0); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal()
354 int16x8_t vprod2x15 = vmull_s8(vb15x0, va2x0); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal()
355 int16x8_t vprod3x15 = vmull_s8(vb15x0, va3x0); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal()