/external/XNNPACK/src/f32-dwconv/gen/ |
D | up16x9-minmax-fma3.c | 131 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3() local 133 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3() 214 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3() local 215 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3() 274 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3() local 275 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3()
|
D | up16x9-minmax-fma3-acc2.c | 131 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2() local 133 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2() 217 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2() local 218 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2() 279 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2() local 280 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x9__fma3_acc2()
|
D | up16x9-minmax-avx-acc2.c | 131 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2() local 133 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2() 217 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2() local 218 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2() 279 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2() local 280 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx_acc2()
|
D | up16x9-minmax-avx.c | 131 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx() local 133 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx() 214 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx() local 215 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx() 274 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx() local 275 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up16x9__avx()
|
D | up8x9-minmax-avx.c | 117 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx() local 118 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx() 177 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx() local 178 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx()
|
D | up8x9-minmax-fma3-acc2.c | 117 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3_acc2() local 118 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3_acc2() 179 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3_acc2() local 180 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3_acc2()
|
D | up8x9-minmax-fma3.c | 117 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3() local 118 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3() 177 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3() local 178 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x9__fma3()
|
D | up8x9-minmax-avx-acc2.c | 117 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx_acc2() local 118 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx_acc2() 179 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx_acc2() local 180 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up8x9__avx_acc2()
|
D | up8x25-minmax-fma3-acc2.c | 197 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3_acc2() local 198 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3_acc2() 355 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3_acc2() local 356 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3_acc2()
|
D | up8x25-minmax-fma3.c | 197 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3() local 198 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3() 353 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3() local 354 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3()
|
D | up8x25-minmax-avx.c | 197 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx() local 198 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx() 353 const __m256 vk4x01234567 = _mm256_load_ps(w + 40); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx() local 354 vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567)); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx()
|
D | up16x25-minmax-fma3.c | 211 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3() local 213 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3() 438 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3() local 439 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3() 594 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3() local 595 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3()
|
D | up16x25-minmax-fma3-acc2.c | 211 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2() local 213 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2() 441 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2() local 442 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2() 599 const __m256 vk4x01234567 = _mm256_load_ps(w + 80); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2() local 600 vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2()
|
/external/XNNPACK/src/f16-dwconv/gen/ |
D | up16x9-minmax-neonfp16arith-acc2.c | 121 const float16x8_t vk4x01234567 = vld1q_f16(w); w += 8; in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2() local 123 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2() 187 const float16x8_t vk4x01234567 = vld1q_f16(w + 72); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2() local 188 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2() 235 const float16x8_t vk4x01234567 = vld1q_f16(w + 80); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2() local 236 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2()
|
D | up16x9-minmax-neonfp16arith.c | 121 const float16x8_t vk4x01234567 = vld1q_f16(w); w += 8; in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith() local 123 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith() 184 const float16x8_t vk4x01234567 = vld1q_f16(w + 72); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith() local 185 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith() 230 const float16x8_t vk4x01234567 = vld1q_f16(w + 80); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith() local 231 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith()
|
D | up8x9-minmax-neonfp16arith-acc2.c | 107 const float16x8_t vk4x01234567 = vld1q_f16(w); w += 8; in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith_acc2() local 108 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith_acc2() 155 const float16x8_t vk4x01234567 = vld1q_f16(w); w += 8; in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith_acc2() local 156 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith_acc2()
|
D | up8x9-minmax-neonfp16arith.c | 107 const float16x8_t vk4x01234567 = vld1q_f16(w); w += 8; in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith() local 108 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith() 153 const float16x8_t vk4x01234567 = vld1q_f16(w); w += 8; in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith() local 154 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith()
|
/external/XNNPACK/src/qs8-dwconv/gen/ |
D | up8x9-minmax-neon-mul16.c | 118 …const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((uintptr_t) w + 8 * sizeof… in xnn_qs8_dwconv_minmax_ukernel_up8x9__neon_mul16() local 120 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up8x9__neon_mul16() 121 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up8x9__neon_mul16() 198 …const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) ((uintptr_t) w + 32 * sizeof(int8_t)… in xnn_qs8_dwconv_minmax_ukernel_up8x9__neon_mul16() local 200 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up8x9__neon_mul16() 201 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up8x9__neon_mul16()
|
D | up16x9-minmax-neon-mul16.c | 136 …const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((uintptr_t) w + 8 * sizeof… in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16() local 140 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16() 141 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16() 245 const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) (k + 56))); in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16() local 247 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16() 248 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__neon_mul16()
|
D | up8x9-minmax-avx2-mul32.c | 111 …const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w … in xnn_qs8_dwconv_minmax_ukernel_up8x9__avx2_mul32() local 114 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up8x9__avx2_mul32() 202 …const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w … in xnn_qs8_dwconv_minmax_ukernel_up8x9__avx2_mul32() local 204 … vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up8x9__avx2_mul32()
|
D | up8x9-minmax-sse2-mul16.c | 140 …const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)… in xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16() local 144 …st __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4… in xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16() 325 …const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)… in xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16() local 328 …st __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4… in xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16()
|
D | up24x9-minmax-neon-mul16.c | 154 …const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((uintptr_t) w + 8 * sizeof… in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16() local 160 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16() 161 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16() 296 const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) (k + 88))); in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16() local 298 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16() 299 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up24x9__neon_mul16()
|
D | up8x9-minmax-ssse3-mul16.c | 140 …const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)… in xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16() local 144 …st __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4… in xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16() 325 …const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)… in xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16() local 328 …st __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4… in xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16()
|
D | up8x9-minmax-wasmsimd-mul16.c | 124 …const v128_t vk4x01234567 = wasm_i16x8_load_8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t)… in xnn_qs8_dwconv_minmax_ukernel_up8x9__wasmsimd_mul16() local 127 const v128_t vprod4x01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567); in xnn_qs8_dwconv_minmax_ukernel_up8x9__wasmsimd_mul16() 246 …const v128_t vk4x01234567 = wasm_i16x8_load_8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t)… in xnn_qs8_dwconv_minmax_ukernel_up8x9__wasmsimd_mul16() local 248 const v128_t vprod4x01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567); in xnn_qs8_dwconv_minmax_ukernel_up8x9__wasmsimd_mul16()
|
D | up16x9-minmax-avx2-mul32.c | 124 …const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w … in xnn_qs8_dwconv_minmax_ukernel_up16x9__avx2_mul32() local 129 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__avx2_mul32() 245 … const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 64))); in xnn_qs8_dwconv_minmax_ukernel_up16x9__avx2_mul32() local 248 … vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567)); in xnn_qs8_dwconv_minmax_ukernel_up16x9__avx2_mul32()
|