/external/XNNPACK/src/qs8-gavgpool/gen/ |
D | 7p7x-minmax-ssse3-c16-acc2.c | 57 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c16_acc2() local 75 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c16_acc2() 136 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c16_acc2() local 154 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c16_acc2() 234 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c16_acc2() local 252 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c16_acc2()
|
D | 7p7x-minmax-sse2-c16-acc2.c | 57 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c16_acc2() local 75 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c16_acc2() 136 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c16_acc2() local 154 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c16_acc2() 234 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c16_acc2() local 252 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c16_acc2()
|
D | 7p7x-minmax-neon-c16-acc2.c | 53 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() local 65 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() 107 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() local 119 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() 191 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() local 203 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
|
D | 7p7x-minmax-sse2-c24-acc2.c | 61 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c24_acc2() local 86 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c24_acc2() 211 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c24_acc2() local 236 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c24_acc2() 380 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c24_acc2() local 405 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c24_acc2()
|
D | 7p7x-minmax-ssse3-c24-acc2.c | 61 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c24_acc2() local 86 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c24_acc2() 211 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c24_acc2() local 236 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c24_acc2() 380 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c24_acc2() local 405 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7p7x__ssse3_c24_acc2()
|
D | 7p7x-minmax-neon-c24-acc2.c | 57 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() local 74 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 157 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() local 174 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 292 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() local 309 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
|
D | 7x-minmax-ssse3-c16-acc2.c | 74 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7x__ssse3_c16_acc2() local 92 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7x__ssse3_c16_acc2()
|
D | 7x-minmax-sse2-c16-acc2.c | 74 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2() local 92 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c16_acc2()
|
D | 7p7x-minmax-neon-c32-acc2.c | 61 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() local 83 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 178 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() local 200 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 332 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() local 354 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
|
D | 7x-minmax-ssse3-c24-acc2.c | 78 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7x__ssse3_c24_acc2() local 103 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7x__ssse3_c24_acc2()
|
D | 7x-minmax-sse2-c24-acc2.c | 78 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8)); in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2() local 103 …st __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4… in xnn_qs8_gavgpool_minmax_ukernel_7x__sse2_c24_acc2()
|
D | 7x-minmax-neon-c16-acc2.c | 76 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2() local 88 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
|
D | 7x-minmax-neon-c24-acc2.c | 80 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8; in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() local 97 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
|
/external/XNNPACK/src/qs8-dwconv/gen/ |
D | up16x9-minmax-neon-mul16.c | 137 const int16x8_t vi4x89ABCDEF = vmovl_s8(vld1_s8(i4)); i4 += 8; in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16() local 142 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16() 143 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16()
|
D | up24x9-minmax-neon-mul16.c | 155 const int16x8_t vi4x89ABCDEF = vmovl_s8(vld1_s8(i4)); i4 += 8; in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16() local 162 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16() 163 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16()
|
D | up16x9-minmax-avx2-mul32.c | 125 const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8))); in xnn_qs8_dwconv_minmax_ukernel_up16x9__avx2_mul32() local 130 vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__avx2_mul32()
|
D | up32x9-minmax-neon-mul16.c | 173 const int16x8_t vi4x89ABCDEF = vmovl_s8(vld1_s8(i4)); i4 += 8; in xnn_qs8_dwconv_minmax_ukernel_up32x9__neon_mul16() local 182 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up32x9__neon_mul16() 183 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up32x9__neon_mul16()
|
D | up24x9-minmax-avx2-mul32.c | 138 const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8))); in xnn_qs8_dwconv_minmax_ukernel_up24x9__avx2_mul32() local 145 vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF)); in xnn_qs8_dwconv_minmax_ukernel_up24x9__avx2_mul32()
|
D | up16x9-minmax-wasmsimd-mul16.c | 147 const v128_t vi4x89ABCDEF = wasm_i16x8_load_8x8(i4 + 8); in xnn_qs8_dwconv_minmax_ukernel_up16x9__wasmsimd_mul16() local 152 const v128_t vprod4x89ABCDEF = wasm_i16x8_mul(vi4x89ABCDEF, vk4x89ABCDEF); in xnn_qs8_dwconv_minmax_ukernel_up16x9__wasmsimd_mul16()
|
/external/XNNPACK/src/f32-dwconv/gen/ |
D | up16x9-minmax-fma3.c | 128 const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3() local 134 vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3()
|
D | up16x9-minmax-fma3-acc2.c | 128 const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2() local 134 vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2()
|
D | up16x9-minmax-avx-acc2.c | 128 const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2() local 134 vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2()
|
D | up16x9-minmax-avx.c | 128 const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx() local 134 vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx()
|
/external/XNNPACK/src/f16-dwconv/gen/ |
D | up16x9-minmax-neonfp16arith-acc2.c | 120 const float16x8_t vi4x89ABCDEF = vld1q_f16(i4); i4 += 8; in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2() local 124 vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2()
|
D | up16x9-minmax-neonfp16arith.c | 120 const float16x8_t vi4x89ABCDEF = vld1q_f16(i4); i4 += 8; in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith() local 124 vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith()
|