/external/XNNPACK/src/qs8-dwconv/gen/ |
D | up16x25-minmax-rndnu-neon-mul16.c | 277 const int16x8_t vi10x89ABCDEF = vmovl_s8(vld1_s8(i10)); i10 += 8; in xnn_qs8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul16() local 282 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qs8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul16() 283 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qs8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul16()
|
D | up16x25-minmax-fp32-neonv8-mul16.c | 276 const int16x8_t vi10x89ABCDEF = vmovl_s8(vld1_s8(i10)); i10 += 8; in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16() local 281 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16() 282 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16()
|
D | up16x25-minmax-fp32-neon-mul16.c | 276 const int16x8_t vi10x89ABCDEF = vmovl_s8(vld1_s8(i10)); i10 += 8; in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16() local 281 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16() 282 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16()
|
D | up16x25-minmax-fp32-wasmsimd-mul16-add16.c | 291 const v128_t vi10x89ABCDEF = wasm_i16x8_load8x8(i10 + 8); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16_add16() local 296 vprod89ABCDEF = wasm_i16x8_mul(vi10x89ABCDEF, vk10x89ABCDEF); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16_add16()
|
D | up16x25-minmax-rndnu-neon-mla8-ld64.c | 276 const int8x8_t vi10x89ABCDEF = vld1_s8(i10); i10 += 8; in xnn_qs8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mla8_ld64() local 280 vprod89ABCDEF = vmull_s8(vi10x89ABCDEF, vk10x89ABCDEF); in xnn_qs8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mla8_ld64()
|
D | up16x25-minmax-fp32-avx2-mul32.c | 260 … const __m256i vi10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 8))); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32() local 265 … vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi10x89ABCDEF, vk10x89ABCDEF)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32()
|
/external/XNNPACK/src/qc8-dwconv/gen/ |
D | up16x25-minmax-fp32-neon-mul16.c | 275 const int16x8_t vi10x89ABCDEF = vmovl_s8(vld1_s8(i10)); i10 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16() local 280 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16() 281 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16()
|
D | up16x25-minmax-fp32-neonv8-mul16.c | 275 const int16x8_t vi10x89ABCDEF = vmovl_s8(vld1_s8(i10)); i10 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16() local 280 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16() 281 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16()
|
D | up16x25-minmax-fp32-avx2-mul32.c | 260 … const __m256i vi10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 8))); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32() local 265 … vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi10x89ABCDEF, vk10x89ABCDEF)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32()
|
D | up16x25-minmax-fp32-neon-mla8-ld64.c | 274 const int8x8_t vi10x89ABCDEF = vld1_s8(i10); i10 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neon_mla8_ld64() local 278 vprod89ABCDEF = vmull_s8(vi10x89ABCDEF, vk10x89ABCDEF); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neon_mla8_ld64()
|
D | up16x25-minmax-fp32-neonv8-mla8-ld64.c | 274 const int8x8_t vi10x89ABCDEF = vld1_s8(i10); i10 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mla8_ld64() local 278 vprod89ABCDEF = vmull_s8(vi10x89ABCDEF, vk10x89ABCDEF); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mla8_ld64()
|
D | up16x25-minmax-fp32-wasmsimd-mul16-add16.c | 291 const v128_t vi10x89ABCDEF = wasm_i16x8_load8x8(i10 + 8); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16_add16() local 296 vprod89ABCDEF = wasm_i16x8_mul(vi10x89ABCDEF, vk10x89ABCDEF); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16_add16()
|
D | up16x25-minmax-fp32-sse2-mul16-add16.c | 352 const __m128i vi10x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i10 + 8)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__sse2_mul16_add16() local 358 … const __m128i vxi10x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi10x89ABCDEF, vi10x89ABCDEF), 8); in xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__sse2_mul16_add16()
|
/external/XNNPACK/src/qu8-dwconv/gen/ |
D | up16x25-minmax-fp32-wasmsimd-mul16.c | 330 const v128_t vi10x89ABCDEF = wasm_u16x8_load8x8(i10 + 8); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16() local 333 vsumx89ABCDEF = wasm_i16x8_add(vsumx89ABCDEF, vi10x89ABCDEF); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16() 337 vprod89ABCDEF = wasm_i16x8_mul(vi10x89ABCDEF, vk10x89ABCDEF); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__wasmsimd_mul16()
|
D | up16x25-minmax-fp32-neon-mul16.c | 277 const int16x8_t vi10x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i10))); i10 += 8; in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16() local 282 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16() 283 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neon_mul16()
|
D | up16x25-minmax-fp32-neonv8-mul16.c | 277 const int16x8_t vi10x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i10))); i10 += 8; in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16() local 282 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16() 283 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__neonv8_mul16()
|
D | up16x25-minmax-rndnu-neon-mul16.c | 278 const int16x8_t vi10x89ABCDEF = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i10))); i10 += 8; in xnn_qu8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul16() local 283 vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi10x89ABCDEF), vget_low_s16(vk10x89ABCDEF)); in xnn_qu8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul16() 284 vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi10x89ABCDEF), vget_high_s16(vk10x89ABCDEF)); in xnn_qu8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul16()
|
D | up16x25-minmax-rndnu-neon-mul8.c | 318 const uint8x8_t vi10x89ABCDEF = vld1_u8(i10); i10 += 8; in xnn_qu8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul8() local 323 vprod89ABCDEF = vmull_u8(vi10x89ABCDEF, vk10x89ABCDEF); in xnn_qu8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul8() 324 vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi10x89ABCDEF); in xnn_qu8_dwconv_minmax_rndnu_ukernel_up16x25__neon_mul8()
|
D | up16x25-minmax-fp32-avx2-mul32.c | 261 … const __m256i vi10x89ABCDEF = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 8))); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32() local 266 … vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi10x89ABCDEF, vk10x89ABCDEF)); in xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32()
|
/external/XNNPACK/src/f32-dwconv/gen/ |
D | up16x25-minmax-avx.c | 260 const __m256 vi10x89ABCDEF = _mm256_loadu_ps(i10 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x25__avx() local 266 vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi10x89ABCDEF, vk10x89ABCDEF)); in xnn_f32_dwconv_minmax_ukernel_up16x25__avx()
|
D | up16x25-minmax-fma3-acc2.c | 260 const __m256 vi10x89ABCDEF = _mm256_loadu_ps(i10 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2() local 266 vacc89ABCDEFp0 = _mm256_fmadd_ps(vi10x89ABCDEF, vk10x89ABCDEF, vacc89ABCDEFp0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2()
|
D | up16x25-minmax-fma3.c | 260 const __m256 vi10x89ABCDEF = _mm256_loadu_ps(i10 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3() local 266 vacc89ABCDEFp0 = _mm256_fmadd_ps(vi10x89ABCDEF, vk10x89ABCDEF, vacc89ABCDEFp0); in xnn_f32_dwconv_minmax_ukernel_up16x25__fma3()
|
D | up16x25-minmax-avx-acc2.c | 260 const __m256 vi10x89ABCDEF = _mm256_loadu_ps(i10 + 8); in xnn_f32_dwconv_minmax_ukernel_up16x25__avx_acc2() local 266 vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi10x89ABCDEF, vk10x89ABCDEF)); in xnn_f32_dwconv_minmax_ukernel_up16x25__avx_acc2()
|
/external/XNNPACK/src/f16-dwconv/gen/ |
D | up16x25-minmax-neonfp16arith-acc2.c | 242 const float16x8_t vi10x89ABCDEF = vld1q_f16(i10); i10 += 8; in xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith_acc2() local 246 vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi10x89ABCDEF, vk10x89ABCDEF); in xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith_acc2()
|
D | up16x25-minmax-neonfp16arith.c | 242 const float16x8_t vi10x89ABCDEF = vld1q_f16(i10); i10 += 8; in xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith() local 246 vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi10x89ABCDEF, vk10x89ABCDEF); in xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith()
|