Home
last modified time | relevance | path

Searched refs:vmovn_s64 (Results 1 – 25 of 25) sorted by relevance

/external/XNNPACK/src/qs8-gavgpool/gen/
D7x-minmax-neon-c32-acc2.c247 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
248 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
249 vacc89AB = vcombine_s32(vmovn_s64(vacc89), vmovn_s64(vaccAB)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
250 vaccCDEF = vcombine_s32(vmovn_s64(vaccCD), vmovn_s64(vaccEF)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
251 vaccGHIJ = vcombine_s32(vmovn_s64(vaccGH), vmovn_s64(vaccIJ)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
252 vaccKLMN = vcombine_s32(vmovn_s64(vaccKL), vmovn_s64(vaccMN)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
253 vaccOPQR = vcombine_s32(vmovn_s64(vaccOP), vmovn_s64(vaccQR)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
254 vaccSTUV = vcombine_s32(vmovn_s64(vaccST), vmovn_s64(vaccUV)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
337 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
338 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
D7x-minmax-neon-c24-acc2.c207 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
208 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
209 vacc89AB = vcombine_s32(vmovn_s64(vacc89), vmovn_s64(vaccAB)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
210 vaccCDEF = vcombine_s32(vmovn_s64(vaccCD), vmovn_s64(vaccEF)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
211 vaccGHIJ = vcombine_s32(vmovn_s64(vaccGH), vmovn_s64(vaccIJ)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
212 vaccKLMN = vcombine_s32(vmovn_s64(vaccKL), vmovn_s64(vaccMN)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
294 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
295 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
D7x-minmax-neon-c16-acc2.c166 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
167 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
168 vacc89AB = vcombine_s32(vmovn_s64(vacc89), vmovn_s64(vaccAB)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
169 vaccCDEF = vcombine_s32(vmovn_s64(vaccCD), vmovn_s64(vaccEF)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
246 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
247 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
D7x-minmax-neon-c8-acc2.c126 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2()
127 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2()
203 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2()
204 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2()
D7p7x-minmax-neon-c16-acc2.c286 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
287 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
288 vacc89AB = vcombine_s32(vmovn_s64(vacc89), vmovn_s64(vaccAB)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
289 vaccCDEF = vcombine_s32(vmovn_s64(vaccCD), vmovn_s64(vaccEF)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
369 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
370 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
D7p7x-minmax-neon-c32-acc2.c504 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
505 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
506 vacc89AB = vcombine_s32(vmovn_s64(vacc89), vmovn_s64(vaccAB)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
507 vaccCDEF = vcombine_s32(vmovn_s64(vaccCD), vmovn_s64(vaccEF)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
508 vaccGHIJ = vcombine_s32(vmovn_s64(vaccGH), vmovn_s64(vaccIJ)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
509 vaccKLMN = vcombine_s32(vmovn_s64(vaccKL), vmovn_s64(vaccMN)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
510 vaccOPQR = vcombine_s32(vmovn_s64(vaccOP), vmovn_s64(vaccQR)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
511 vaccSTUV = vcombine_s32(vmovn_s64(vaccST), vmovn_s64(vaccUV)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
597 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
598 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
D7p7x-minmax-neon-c24-acc2.c426 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
427 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
428 vacc89AB = vcombine_s32(vmovn_s64(vacc89), vmovn_s64(vaccAB)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
429 vaccCDEF = vcombine_s32(vmovn_s64(vaccCD), vmovn_s64(vaccEF)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
430 vaccGHIJ = vcombine_s32(vmovn_s64(vaccGH), vmovn_s64(vaccIJ)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
431 vaccKLMN = vcombine_s32(vmovn_s64(vaccKL), vmovn_s64(vaccMN)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
516 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
517 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
D7p7x-minmax-neon-c8-acc2.c208 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2()
209 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2()
288 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2()
289 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67)); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2()
/external/XNNPACK/src/qu8-requantization/
Dprecise-neon.c119 const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled)); in xnn_qu8_requantize_precise__neon()
120 const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled)); in xnn_qu8_requantize_precise__neon()
121 const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled)); in xnn_qu8_requantize_precise__neon()
122 const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled)); in xnn_qu8_requantize_precise__neon()
/external/XNNPACK/src/qs8-requantization/
Dprecise-neon.c119 const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled)); in xnn_qs8_requantize_precise__neon()
120 const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled)); in xnn_qs8_requantize_precise__neon()
121 const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled)); in xnn_qs8_requantize_precise__neon()
122 const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled)); in xnn_qs8_requantize_precise__neon()
/external/XNNPACK/src/qu8-gavgpool/
D7x-minmax-neon-c8.c122 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8()
123 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8()
192 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8()
193 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8()
D7p7x-minmax-neon-c8.c198 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8()
199 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8()
270 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8()
271 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8()
/external/XNNPACK/src/qu8-avgpool/
D9x-minmax-neon-c8.c177 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8()
178 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8()
250 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8()
251 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8()
D9p8x-minmax-neon-c8.c321 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8()
322 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8()
395 vacc_lo = vcombine_s32(vmovn_s64(vscaled_acc01), vmovn_s64(vscaled_acc23)); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8()
396 vacc_hi = vcombine_s32(vmovn_s64(vscaled_acc45), vmovn_s64(vscaled_acc67)); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8()
/external/libopus/silk/arm/
DLPC_inv_pred_gain_neon_intr.c124 t0_s32x4 = vcombine_s32( vmovn_s64( t0_s64x2 ), vmovn_s64( t1_s64x2 ) ); in LPC_inverse_pred_gain_QA_neon()
125 t1_s32x4 = vcombine_s32( vmovn_s64( t2_s64x2 ), vmovn_s64( t3_s64x2 ) ); in LPC_inverse_pred_gain_QA_neon()
/external/XNNPACK/src/qs8-gavgpool/
Dunipass-neon.c.in123 … vacc${ABC[C:C+4]} = vcombine_s32(vmovn_s64(vacc${ABC[C:C+2]}), vmovn_s64(vacc${ABC[C+2:C+4]}));
220 vacc${ABC[0:4]} = vcombine_s32(vmovn_s64(vacc${ABC[0:2]}), vmovn_s64(vacc${ABC[2:4]}));
221 vacc${ABC[4:8]} = vcombine_s32(vmovn_s64(vacc${ABC[4:6]}), vmovn_s64(vacc${ABC[6:8]}));
Dmultipass-neon.c.in276 … vacc${ABC[C:C+4]} = vcombine_s32(vmovn_s64(vacc${ABC[C:C+2]}), vmovn_s64(vacc${ABC[C+2:C+4]}));
379 vacc${ABC[0:4]} = vcombine_s32(vmovn_s64(vacc${ABC[0:2]}), vmovn_s64(vacc${ABC[2:4]}));
380 vacc${ABC[4:8]} = vcombine_s32(vmovn_s64(vacc${ABC[4:6]}), vmovn_s64(vacc${ABC[6:8]}));
/external/libopus/silk/fixed/arm/
Dwarped_autocorrelation_FIX_neon_intr.c239 … corr_s32x4 = vcombine_s32( vmovn_s64( corr_QC1_s64x2 ), vmovn_s64( corr_QC0_s64x2 ) ); in silk_warped_autocorrelation_FIX_neon()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dneon_tensor_utils.cc1588 val4.val[0] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_0.val[0], 10)), in NeonApplyLayerNorm()
1589 vmovn_s64(vrshrq_n_s64(val3_0.val[1], 10))); in NeonApplyLayerNorm()
1590 val4.val[1] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_1.val[0], 10)), in NeonApplyLayerNorm()
1591 vmovn_s64(vrshrq_n_s64(val3_1.val[1], 10))); in NeonApplyLayerNorm()
1592 val4.val[2] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_2.val[0], 10)), in NeonApplyLayerNorm()
1593 vmovn_s64(vrshrq_n_s64(val3_2.val[1], 10))); in NeonApplyLayerNorm()
1594 val4.val[3] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_3.val[0], 10)), in NeonApplyLayerNorm()
1595 vmovn_s64(vrshrq_n_s64(val3_3.val[1], 10))); in NeonApplyLayerNorm()
/external/pffft/
Dsse2neon.h3045 int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a)); in _mm_mul_epi32()
3046 int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b)); in _mm_mul_epi32()
3752 int32x4_t ab02 = vcombine_s32(vmovn_s64(a), vmovn_s64(b)); in _mm_hsub_epi32()
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-misc.c1799 return vmovn_s64(a); in test_vmovn_s64()
Darm_neon_intrinsics.c8018 return vmovn_s64(a); in test_vmovn_s64()
/external/clang/test/CodeGen/
Daarch64-neon-misc.c1952 return vmovn_s64(a); in test_vmovn_s64()
Darm_neon_intrinsics.c9895 return vmovn_s64(a); in test_vmovn_s64()
/external/neon_2_sse/
DNEON_2_SSE.h1795 _NEON2SSESTORAGE int32x2_t vmovn_s64(int64x2_t a); // VMOVN.I64 d0,q0
12906 _NEON2SSESTORAGE int32x2_t vmovn_s64(int64x2_t a); // VMOVN.I64 d0,q0
12907 _NEON2SSE_INLINE int32x2_t vmovn_s64(int64x2_t a) in vmovn_s64() function
12923 #define vmovn_u64 vmovn_s64