/external/XNNPACK/src/qc8-gemm/gen/ |
D | 1x8c8-xw-minmax-fp32-avx2.c | 53 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qc8_gemm_xw_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 1x8c8-minmax-fp32-avx2.c | 53 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 2x8c8-xw-minmax-fp32-avx2.c | 59 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qc8_gemm_xw_minmax_fp32_ukernel_2x8c8__avx2() local
|
D | 2x8c8-minmax-fp32-avx2.c | 59 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__avx2() local
|
D | 3x8c8-xw-minmax-fp32-avx2.c | 65 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qc8_gemm_xw_minmax_fp32_ukernel_3x8c8__avx2() local
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x8c8-xw-minmax-fp32-avx2.c | 53 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 1x8c8-minmax-fp32-avx2.c | 53 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 1x8c4s2-minmax-rndnu-neon-mull.c | 46 int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull() local
|
D | 2x8c8-xw-minmax-fp32-avx2.c | 59 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qs8_gemm_xw_minmax_fp32_ukernel_2x8c8__avx2() local
|
D | 2x8c8-minmax-fp32-avx2.c | 59 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__avx2() local
|
D | 1x8c4-minmax-rndnu-neon-mull-ld1r.c | 46 …int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t)… in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld1r() local
|
D | 1x8c4-minmax-rndnu-neon-mull-ld2r.c | 46 …int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t)… in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld2r() local
|
D | 1x8c4-minmax-rndnu-neon-mull-dup.c | 46 …int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t)… in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_dup() local
|
/external/XNNPACK/src/qu8-gemm/gen/ |
D | 1x8c8-minmax-fp32-avx2.c | 53 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qu8_gemm_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 2x8c8-minmax-fp32-avx2.c | 59 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qu8_gemm_minmax_fp32_ukernel_2x8c8__avx2() local
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 1x8c8-minmax-fp32-avx2.c | 56 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 2x8c8-minmax-fp32-avx2.c | 60 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__avx2() local
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x8c8-minmax-fp32-avx2.c | 56 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 1x8c4s2-minmax-rndnu-neon-mull.c | 49 int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull() local
|
D | 2x8c8-minmax-fp32-avx2.c | 60 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__avx2() local
|
D | 1x8c4-minmax-rndnu-neon-mull-dup.c | 49 …int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t)… in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4__neon_mull_dup() local
|
D | 1x8c4-minmax-rndnu-neon-mull-ld2r.c | 49 …int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t)… in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld2r() local
|
D | 1x8c4-minmax-rndnu-neon-mull-ld1r.c | 49 …int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t)… in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld1r() local
|
/external/XNNPACK/src/qu8-igemm/gen/ |
D | 1x8c8-minmax-fp32-avx2.c | 56 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qu8_igemm_minmax_fp32_ukernel_1x8c8__avx2() local
|
D | 2x8c8-minmax-fp32-avx2.c | 60 __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); in xnn_qu8_igemm_minmax_fp32_ukernel_2x8c8__avx2() local
|