/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_idct8x8_add_neon.c | 244 b[0] = vmovn_s32(a[0]); in vpx_highbd_idct8x8_12_add_neon() 245 b[1] = vmovn_s32(a[1]); in vpx_highbd_idct8x8_12_add_neon() 246 b[2] = vmovn_s32(a[2]); in vpx_highbd_idct8x8_12_add_neon() 247 b[3] = vmovn_s32(a[3]); in vpx_highbd_idct8x8_12_add_neon() 318 b[0] = vcombine_s16(vmovn_s32(a[0]), vmovn_s32(a[1])); in vpx_highbd_idct8x8_64_add_neon() 319 b[1] = vcombine_s16(vmovn_s32(a[2]), vmovn_s32(a[3])); in vpx_highbd_idct8x8_64_add_neon() 320 b[2] = vcombine_s16(vmovn_s32(a[4]), vmovn_s32(a[5])); in vpx_highbd_idct8x8_64_add_neon() 321 b[3] = vcombine_s16(vmovn_s32(a[6]), vmovn_s32(a[7])); in vpx_highbd_idct8x8_64_add_neon() 322 b[4] = vcombine_s16(vmovn_s32(a[8]), vmovn_s32(a[9])); in vpx_highbd_idct8x8_64_add_neon() 323 b[5] = vcombine_s16(vmovn_s32(a[10]), vmovn_s32(a[11])); in vpx_highbd_idct8x8_64_add_neon() [all …]
|
D | mem_neon.h | 42 const int16x4_t s0 = vmovn_s32(v0.val[0]); in load_tran_low_to_s16x2q() 43 const int16x4_t s1 = vmovn_s32(v0.val[1]); in load_tran_low_to_s16x2q() 44 const int16x4_t s2 = vmovn_s32(v1.val[0]); in load_tran_low_to_s16x2q() 45 const int16x4_t s3 = vmovn_s32(v1.val[1]); in load_tran_low_to_s16x2q() 59 const int16x4_t s0 = vmovn_s32(v0); in load_tran_low_to_s16q() 60 const int16x4_t s1 = vmovn_s32(v1); in load_tran_low_to_s16q() 70 return vmovn_s32(v0); in load_tran_low_to_s16d()
|
D | highbd_idct4x4_add_neon.c | 64 a[0] = vcombine_s16(vmovn_s32(c[0]), vmovn_s32(c[1])); in vpx_highbd_idct4x4_16_add_neon() 65 a[1] = vcombine_s16(vmovn_s32(c[2]), vmovn_s32(c[3])); in vpx_highbd_idct4x4_16_add_neon()
|
D | quantize_neon.c | 30 vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1))); in calculate_dqcoeff_and_store()
|
/external/libvpx/libvpx/vp9/common/arm/neon/ |
D | vp9_highbd_iht8x8_add_neon.c | 185 c[0] = vcombine_s16(vmovn_s32(a[0]), vmovn_s32(a[1])); in vp9_highbd_iht8x8_64_add_neon() 186 c[1] = vcombine_s16(vmovn_s32(a[2]), vmovn_s32(a[3])); in vp9_highbd_iht8x8_64_add_neon() 187 c[2] = vcombine_s16(vmovn_s32(a[4]), vmovn_s32(a[5])); in vp9_highbd_iht8x8_64_add_neon() 188 c[3] = vcombine_s16(vmovn_s32(a[6]), vmovn_s32(a[7])); in vp9_highbd_iht8x8_64_add_neon() 189 c[4] = vcombine_s16(vmovn_s32(a[8]), vmovn_s32(a[9])); in vp9_highbd_iht8x8_64_add_neon() 190 c[5] = vcombine_s16(vmovn_s32(a[10]), vmovn_s32(a[11])); in vp9_highbd_iht8x8_64_add_neon() 191 c[6] = vcombine_s16(vmovn_s32(a[12]), vmovn_s32(a[13])); in vp9_highbd_iht8x8_64_add_neon() 192 c[7] = vcombine_s16(vmovn_s32(a[14]), vmovn_s32(a[15])); in vp9_highbd_iht8x8_64_add_neon()
|
D | vp9_highbd_iht4x4_add_neon.c | 89 a[0] = vcombine_s16(vmovn_s32(c[0]), vmovn_s32(c[1])); in vp9_highbd_iht4x4_16_add_neon() 90 a[1] = vcombine_s16(vmovn_s32(c[2]), vmovn_s32(c[3])); in vp9_highbd_iht4x4_16_add_neon()
|
/external/XNNPACK/src/qs8-requantization/ |
D | fp32-neon.c | 127 const int16x8_t xy_packed = vcombine_s16(vmovn_s32(x_biased), vmovn_s32(y_biased)); in xnn_qs8_requantize_fp32__neon() 128 const int16x8_t zw_packed = vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased)); in xnn_qs8_requantize_fp32__neon()
|
/external/XNNPACK/src/qu8-requantization/ |
D | fp32-neon.c | 103 const int16x8_t xy_packed = vcombine_s16(vmovn_s32(x_biased), vmovn_s32(y_biased)); in xnn_qu8_requantize_fp32__neon() 104 const int16x8_t zw_packed = vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased)); in xnn_qu8_requantize_fp32__neon()
|
/external/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | transform_neon.c | 126 outre16x4x4.val[0] = vmovn_s32(inre32x4x4.val[0]); in PreShiftW32toW16Neon() 127 outre16x4x4.val[1] = vmovn_s32(inre32x4x4.val[1]); in PreShiftW32toW16Neon() 128 outre16x4x4.val[2] = vmovn_s32(inre32x4x4.val[2]); in PreShiftW32toW16Neon() 129 outre16x4x4.val[3] = vmovn_s32(inre32x4x4.val[3]); in PreShiftW32toW16Neon() 130 outim16x4x4.val[0] = vmovn_s32(inim32x4x4.val[0]); in PreShiftW32toW16Neon() 131 outim16x4x4.val[1] = vmovn_s32(inim32x4x4.val[1]); in PreShiftW32toW16Neon() 132 outim16x4x4.val[2] = vmovn_s32(inim32x4x4.val[2]); in PreShiftW32toW16Neon() 133 outim16x4x4.val[3] = vmovn_s32(inim32x4x4.val[3]); in PreShiftW32toW16Neon() 199 int16x4_t outre_0 = vmovn_s32(outr0); in PostShiftAndSeparateNeon() 200 int16x4_t outim_0 = vmovn_s32(outi0); in PostShiftAndSeparateNeon() [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | add.h | 126 const int16x4_t s11_narrowed = vmovn_s32(s11); in AddElementwise() 127 const int16x4_t s12_narrowed = vmovn_s32(s12); in AddElementwise() 128 const int16x4_t s21_narrowed = vmovn_s32(s21); in AddElementwise() 129 const int16x4_t s22_narrowed = vmovn_s32(s22); in AddElementwise() 228 const int16x4_t s1_narrowed = vmovn_s32(s1); in AddScalarBroadcast() 229 const int16x4_t s2_narrowed = vmovn_s32(s2); in AddScalarBroadcast()
|
D | mean.h | 105 int16x4_t narrowed_low_low = vmovn_s32(temp_sum.val[0]); in MeanImpl() 106 int16x4_t narrowed_high_low = vmovn_s32(temp_sum.val[1]); in MeanImpl() 107 int16x4_t narrowed_low_high = vmovn_s32(temp_sum.val[2]); in MeanImpl() 108 int16x4_t narrowed_high_high = vmovn_s32(temp_sum.val[3]); in MeanImpl()
|
/external/libaom/libaom/av1/encoder/arm/neon/ |
D | quantize_neon.c | 129 vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1))); in calculate_dqcoeff_lp_and_store()
|
/external/libvpx/libvpx/vp9/encoder/arm/neon/ |
D | vp9_quantize_neon.c | 41 vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1))); in calculate_dqcoeff_and_store()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | neon_tensor_utils.cc | 1739 const int16x8_t result = vcombine_s16(vmovn_s32(x_0), vmovn_s32(x_1)); in NeonCwiseMul() 1792 vcombine_s16(vmovn_s32(temp_val.val[0]), vmovn_s32(temp_val.val[1])); in NeonCwiseMul() 2343 int16x4_t min0_16x4 = vmovn_s32(min0_i32x4); in NeonSymmetricQuantizeFloats() 2344 int16x4_t min1_16x4 = vmovn_s32(min1_i32x4); in NeonSymmetricQuantizeFloats() 2426 int16x4_t min0_16x4 = vmovn_s32(min0_i32x4); in NeonAsymmetricQuantizeFloats() 2427 int16x4_t min1_16x4 = vmovn_s32(min1_i32x4); in NeonAsymmetricQuantizeFloats() 2559 vst1_s16(result, vmovn_s32(prod.val[0])); in NeonVectorBatchVectorCwiseProductAccumulate() 2560 vst1_s16(result + 4, vmovn_s32(prod.val[1])); in NeonVectorBatchVectorCwiseProductAccumulate() 2561 vst1_s16(result + 8, vmovn_s32(prod.val[2])); in NeonVectorBatchVectorCwiseProductAccumulate() 2562 vst1_s16(result + 12, vmovn_s32(prod.val[3])); in NeonVectorBatchVectorCwiseProductAccumulate()
|
D | optimized_ops.h | 1915 const int16x4_t s1_narrowed = vmovn_s32(s1); in AddElementwise() 1916 const int16x4_t s2_narrowed = vmovn_s32(s2); in AddElementwise() 2013 const int16x4_t s1_narrowed = vmovn_s32(s1); in AddScalarBroadcast() 2014 const int16x4_t s2_narrowed = vmovn_s32(s2); in AddScalarBroadcast() 2496 const auto p1_narrowed = vmovn_s32(p1); in MulSimpleBroadcast() 2497 const auto p2_narrowed = vmovn_s32(p2); in MulSimpleBroadcast() 6919 const int16x4_t narrowed_val_0 = vmovn_s32(casted_val_0); in AffineQuantize() 6920 const int16x4_t narrowed_val_1 = vmovn_s32(casted_val_1); in AffineQuantize() 7034 const int16x4_t narrowed_val_0 = vmovn_s32(casted_val_0); in AffineQuantize() 7035 const int16x4_t narrowed_val_1 = vmovn_s32(casted_val_1); in AffineQuantize()
|
/external/webp/src/dsp/ |
D | enc_neon.c | 537 const int16x4_t out0 = vmovn_s32(b0); in FTransformWHT_NEON() 538 const int16x4_t out1 = vmovn_s32(b1); in FTransformWHT_NEON() 539 const int16x4_t out2 = vmovn_s32(b2); in FTransformWHT_NEON() 540 const int16x4_t out3 = vmovn_s32(b3); in FTransformWHT_NEON()
|
/external/libhevc/encoder/arm/ |
D | ihevce_common_utils_neon.c | 159 a2 = vcombine_s16(vmovn_s32(reg0[0]), vmovn_s32(reg0[1])); in ihevce_wt_avg_2d_16x1_neon() 160 a3 = vcombine_s16(vmovn_s32(reg0[2]), vmovn_s32(reg0[3])); in ihevce_wt_avg_2d_16x1_neon() 209 a0 = vcombine_s16(vmovn_s32(a8), vmovn_s32(a10)); in ihevce_wt_avg_2d_8x1_neon() 285 a8 = vcombine_s16(vmovn_s32(reg0[0]), vmovn_s32(reg0[1])); in ihevce_wt_avg_2d_4xn_neon() 286 a9 = vcombine_s16(vmovn_s32(reg0[2]), vmovn_s32(reg0[3])); in ihevce_wt_avg_2d_4xn_neon()
|
/external/libaom/libaom/av1/common/arm/ |
D | mem_neon.h | 527 const int16x4_t s0 = vmovn_s32(v0); in load_tran_low_to_s16q() 528 const int16x4_t s1 = vmovn_s32(v1); in load_tran_low_to_s16q()
|
D | warp_plane_neon.c | 699 result_final = vcombine_s16(vmovn_s32(res_lo), vmovn_s32(res_hi)); in av1_warp_affine_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans_neon.c | 345 vst1_s16((pi2_dst + dst_strd), vmovn_s32(src1_4x32b)); in ihevc_resi_trans_4x4_ttype1_neon() 346 vst1_s16(pi2_dst, vmovn_s32(src0_4x32b)); in ihevc_resi_trans_4x4_ttype1_neon() 347 vst1_s16((pi2_dst + 2 * dst_strd), vmovn_s32(src2_4x32b)); in ihevc_resi_trans_4x4_ttype1_neon() 348 vst1_s16((pi2_dst + 3 * dst_strd), vmovn_s32(src3_4x32b)); in ihevc_resi_trans_4x4_ttype1_neon()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | film_grain_neon.cc | 680 vmovn_s32(vrshlq_s32(upscaled_noise_lo, scaling_shift_vect)); in ScaleNoise() 682 vmovn_s32(vrshlq_s32(upscaled_noise_hi, scaling_shift_vect)); in ScaleNoise()
|
/external/pffft/ |
D | sse2neon.h | 3681 int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); in _mm_hsub_epi16() 3702 int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); in _mm_hadds_epi16() 3725 int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); in _mm_hsubs_epi16()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v64_intrinsics_arm.h | 295 return vreinterpret_s64_s16(vmovn_s32(vshrq_n_s32( in v64_mulhi_s16()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-misc.c | 1791 return vmovn_s32(a); in test_vmovn_s32()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-misc.c | 1943 return vmovn_s32(a); in test_vmovn_s32()
|