/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x8c2-minmax-rndnu-neon-mull-ld2r.c | 50 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r() local 62 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r() 68 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r()
|
D | 1x16c2-minmax-rndnu-neon-mull-ld2r.c | 52 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r() local 72 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r() 82 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r()
|
D | 1x8c2-minmax-rndnu-neon-mlal-ld2r.c | 111 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r() local 123 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r() 129 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r()
|
D | 2x8c2-minmax-rndnu-neon-mull-ld2r.c | 58 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() local 72 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() 83 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r()
|
D | 1x8c2-minmax-fp32-neon-mlal-ld2r.c | 111 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() local 123 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() 129 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r()
|
D | 1x8c2-minmax-fp32-neonv8-mlal-ld2r.c | 112 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() local 124 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() 130 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r()
|
D | 1x8c2-minmax-rndnu-neon-mull-ld1r.c | 50 const int16x4_t va00 = vld1_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r() local 64 const int8x8_t va0c0 = vreinterpret_s8_s16(va00); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r()
|
D | 1x8c4-minmax-rndnu-neon-mull-ld1r.c | 53 const int32x2_t va00 = vld1_dup_s32((const void*)a0); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld1r() local 65 const int8x8_t va0c0 = vreinterpret_s8_s32(va00); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld1r()
|
D | 3x8c2-minmax-rndnu-neon-mull-ld2r.c | 66 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() local 82 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() 98 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x8c2-minmax-rndnu-neon-mull-ld2r.c | 61 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r() local 73 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r() 79 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r()
|
D | 1x8c2-minmax-fp32-neon-mlal-ld2r.c | 123 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() local 135 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() 141 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r()
|
D | 1x8c2-minmax-fp32-neonv8-mlal-ld2r.c | 124 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() local 136 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() 142 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r()
|
D | 2x8c2-minmax-rndnu-neon-mull-ld2r.c | 71 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() local 85 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r() 96 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r()
|
D | 1x8c2-minmax-rndnu-neon-mlal-ld2r.c | 123 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r() local 135 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r() 141 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r()
|
D | 1x16c2-minmax-rndnu-neon-mull-ld2r.c | 63 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r() local 83 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r() 93 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r()
|
D | 1x8c4-minmax-rndnu-neon-mull-ld1r.c | 64 const int32x2_t va00 = vld1_dup_s32((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld1r() local 76 const int8x8_t va0c0 = vreinterpret_s8_s32(va00); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld1r()
|
D | 1x8c2-minmax-rndnu-neon-mull-ld1r.c | 61 const int16x4_t va00 = vld1_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r() local 75 const int8x8_t va0c0 = vreinterpret_s8_s16(va00); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r()
|
D | 3x8c2-minmax-rndnu-neon-mull-ld2r.c | 81 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() local 97 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r() 113 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r()
|
D | 2x8c2-minmax-fp32-neonv8-mlal-ld2r.c | 170 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r() local 184 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r() 195 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r()
|
D | 2x8c2-minmax-rndnu-neon-mlal-ld2r.c | 169 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r() local 183 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r() 194 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 1x8c2-minmax-fp32-neonv8-mlal-ld2r.c | 124 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() local 136 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() 142 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r()
|
D | 1x8c2-minmax-fp32-neon-mlal-ld2r.c | 123 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() local 135 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() 141 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r()
|
D | 2x8c2-minmax-fp32-neon-mlal-ld2r.c | 169 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r() local 183 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r() 194 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r()
|
/external/XNNPACK/src/qc8-gemm/gen/ |
D | 1x8c2-minmax-fp32-neon-mlal-ld2r.c | 111 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() local 123 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r() 129 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r()
|
D | 1x8c2-minmax-fp32-neonv8-mlal-ld2r.c | 112 const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() local 124 const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r() 130 const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r()
|