/external/XNNPACK/src/f32-dwconv/gen/ |
D | up8x25-minmax-fma3.c | 240 const __m256 vi12x01234567 = _mm256_loadu_ps(i12); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3() local 382 const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3() local
|
D | up8x25-minmax-avx.c | 240 const __m256 vi12x01234567 = _mm256_loadu_ps(i12); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx() local 382 const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx() local
|
D | up8x25-minmax-fma3-acc2.c | 240 const __m256 vi12x01234567 = _mm256_loadu_ps(i12); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3_acc2() local 384 const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask); in xnn_f32_dwconv_minmax_ukernel_up8x25__fma3_acc2() local
|
D | up8x25-minmax-avx-acc2.c | 240 const __m256 vi12x01234567 = _mm256_loadu_ps(i12); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx_acc2() local 384 const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask); in xnn_f32_dwconv_minmax_ukernel_up8x25__avx_acc2() local
|
/external/XNNPACK/src/f16-dwconv/gen/ |
D | up8x25-minmax-neonfp16arith-acc2.c | 218 const float16x8_t vi12x01234567 = vld1q_f16(i12); i12 += 8; in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith_acc2() local 330 const float16x8_t vi12x01234567 = vld1q_f16(i12); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith_acc2() local
|
D | up8x25-minmax-fma3-acc2.c | 243 const __m256 vi12x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i12)); in xnn_f16_dwconv_minmax_ukernel_up8x25__fma3_acc2() local 398 const __m256 vi12x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i12)); in xnn_f16_dwconv_minmax_ukernel_up8x25__fma3_acc2() local
|
D | up8x25-minmax-fma3.c | 243 const __m256 vi12x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i12)); in xnn_f16_dwconv_minmax_ukernel_up8x25__fma3() local 396 const __m256 vi12x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i12)); in xnn_f16_dwconv_minmax_ukernel_up8x25__fma3() local
|
D | up8x25-minmax-neonfp16arith.c | 218 const float16x8_t vi12x01234567 = vld1q_f16(i12); i12 += 8; in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith() local 328 const float16x8_t vi12x01234567 = vld1q_f16(i12); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith() local
|
/external/XNNPACK/src/qu8-dwconv/gen/ |
D | up8x25-minmax-rndnu-neon-mul16.c | 246 const int16x8_t vi12x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i12))); i12 += 8; in xnn_qu8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul16() local 418 const int16x8_t vi12x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i12))); in xnn_qu8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul16() local
|
D | up8x25-minmax-fp32-neonv8-mul16.c | 245 const int16x8_t vi12x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i12))); i12 += 8; in xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16() local 417 const int16x8_t vi12x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i12))); in xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16() local
|
D | up8x25-minmax-rndnu-neon-mul8.c | 271 const uint8x8_t vi12x01234567 = vld1_u8(i12); i12 += 8; in xnn_qu8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul8() local 469 const int16x8_t vi12x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i12))); in xnn_qu8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul8() local
|
D | up8x25-minmax-fp32-wasmsimd-mul16.c | 287 const v128_t vi12x01234567 = wasm_u16x8_load8x8(i12); in xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16() local 564 const v128_t vi12x01234567 = wasm_u16x8_load8x8(i12); in xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16() local
|
/external/XNNPACK/src/qs8-dwconv/gen/ |
D | up8x25-minmax-rndnu-neon-mul16.c | 245 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); i12 += 8; in xnn_qs8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul16() local 417 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); in xnn_qs8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul16() local
|
D | up8x25-minmax-fp32-neonv8-mul16.c | 244 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); i12 += 8; in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16() local 416 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16() local
|
D | up8x25-minmax-fp32-wasmsimd-mul16-add16.c | 263 const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12); in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16_add16() local 490 const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12); in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16_add16() local
|
D | up8x25-minmax-rndnu-neon-mul8-ld64.c | 256 const int8x8_t vi12x01234567 = vld1_s8(i12); i12 += 8; in xnn_qs8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul8_ld64() local 466 const int8x8_t vi12x01234567 = vld1_s8(i12); in xnn_qs8_dwconv_minmax_rndnu_ukernel_up8x25__neon_mul8_ld64() local
|
D | up8x25-minmax-fp32-avx2-mul32.c | 239 const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32() local 405 const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12)); in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32() local
|
D | up8x25-minmax-fp32-wasmsimd-mul16.c | 275 const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12); in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16() local 526 const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12); in xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16() local
|
/external/XNNPACK/src/qc8-dwconv/gen/ |
D | up8x25-minmax-fp32-neon-mla8-ld64.c | 242 const int8x8_t vi12x01234567 = vld1_s8(i12); i12 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neon_mla8_ld64() local 432 const int8x8_t vi12x01234567 = vld1_s8(i12); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neon_mla8_ld64() local
|
D | up8x25-minmax-fp32-wasmsimd-mul16-add16.c | 263 const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16_add16() local 493 const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16_add16() local
|
D | up8x25-minmax-fp32-neonv8-mul16.c | 243 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); i12 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16() local 418 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16() local
|
D | up8x25-minmax-fp32-neon-mul16.c | 243 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); i12 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16() local 419 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16() local
|
D | up8x25-minmax-fp32-neonv8-mul8-ld64.c | 254 const int8x8_t vi12x01234567 = vld1_s8(i12); i12 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul8_ld64() local 467 const int8x8_t vi12x01234567 = vld1_s8(i12); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul8_ld64() local
|
D | up8x25-minmax-fp32-avx2-mul32.c | 239 const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32() local 406 const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12)); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32() local
|
D | up8x25-minmax-fp32-neonv8-mla8-ld64.c | 242 const int8x8_t vi12x01234567 = vld1_s8(i12); i12 += 8; in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mla8_ld64() local 431 const int8x8_t vi12x01234567 = vld1_s8(i12); in xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mla8_ld64() local
|