/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x16c4s2-minmax-rndnu-neon-mlal.c | 73 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal() local 162 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal() local
|
D | 2x16c4s2-minmax-rndnu-neon-mlal.c | 89 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal() local 229 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal() local
|
D | 1x16c4s2-minmax-rndnu-neon-mull.c | 72 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull() local
|
D | 3x16c4s2-minmax-rndnu-neon-mlal.c | 105 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal() local 296 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal() local
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 87 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local
|
D | 4x16c4s2-minmax-rndnu-neon-mlal.c | 121 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal() local 363 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal() local
|
D | 1x16c4-minmax-rndnu-neon-mlal-ld1r.c | 76 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld1r() local
|
D | 1x16c4-minmax-rndnu-neon-mlal-ld2r.c | 74 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld2r() local
|
D | 1x16c4-minmax-rndnu-neon-mlal-dup.c | 74 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_dup() local
|
D | 3x16c4s2-minmax-rndnu-neon-mull.c | 102 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull() local
|
D | 4x16c4s2-minmax-rndnu-neon-mull.c | 117 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull() local
|
D | 2x16c4-minmax-rndnu-neon-mlal-dup.c | 90 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mlal_dup() local
|
D | 2x16c4-minmax-rndnu-neon-mlal-ld2r.c | 90 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mlal_ld2r() local
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x16c4s2-minmax-rndnu-neon-mlal.c | 84 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal() local 173 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal() local
|
D | 2x16c4s2-minmax-rndnu-neon-mlal.c | 102 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal() local 242 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal() local
|
D | 1x16c4s2-minmax-rndnu-neon-mull.c | 83 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull() local
|
D | 3x16c4s2-minmax-rndnu-neon-mlal.c | 120 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal() local 311 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal() local
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 100 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local
|
D | 4x16c4s2-minmax-rndnu-neon-mlal.c | 138 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal() local 380 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal() local
|
D | 1x16c4-minmax-rndnu-neon-mlal-ld1r.c | 87 … const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld1r() local
|
D | 1x16c4-minmax-rndnu-neon-mlal-ld2r.c | 85 … const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld2r() local
|
D | 1x16c4-minmax-rndnu-neon-mlal-dup.c | 85 … const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_dup() local
|
D | 3x16c4s2-minmax-rndnu-neon-mull.c | 117 const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull() local
|
D | 2x16c4-minmax-rndnu-neon-mlal-dup.c | 103 … const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mlal_dup() local
|
D | 2x16c4-minmax-rndnu-neon-mlal-ld2r.c | 103 … const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mlal_ld2r() local
|