/external/webp/src/dsp/ |
D | cost_neon.c | 29 const uint8x16_t eob = vcombine_u8(vqmovn_u16(eob_0), vqmovn_u16(eob_1)); in SetResidualCoeffs_NEON() 75 const uint8x16_t F = vcombine_u8(vqmovn_u16(E0), vqmovn_u16(E1)); in GetResidualCost_NEON()
|
D | rescaler_neon.c | 84 const uint8x8_t D = vqmovn_u16(vcombine_u16(C0, C1)); in RescalerExportRowExpand_NEON() 104 const uint8x8_t F = vqmovn_u16(vcombine_u16(E0, E1)); in RescalerExportRowExpand_NEON() 144 const uint8x8_t E = vqmovn_u16(vcombine_u16(D0, D1)); in RescalerExportRowShrink_NEON() 161 const uint8x8_t C = vqmovn_u16(vcombine_u16(B0, B1)); in RescalerExportRowShrink_NEON()
|
D | yuv_neon.c | 46 return vqmovn_u16(Y2); in ConvertRGBToY_NEON()
|
/external/libhevc/common/arm/ |
D | ihevc_weighted_pred_neon_intr.c | 165 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_uni_neonintr() 173 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_uni_neonintr() 312 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_chroma_uni_neonintr() 320 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_chroma_uni_neonintr() 476 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_bi_neonintr() 484 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_bi_neonintr() 662 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_chroma_bi_neonintr() 670 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_chroma_bi_neonintr() 811 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_bi_default_neonintr() 819 sto_res = vqmovn_u16(sto_res_tmp3); in ihevc_weighted_pred_bi_default_neonintr() [all …]
|
D | ihevc_weighted_pred_uni.s | 187 vqmovn.u16 d4,q2 @vqmovn_u16(sto_res_tmp3) 201 vqmovn.u16 d6,q3 @vqmovn_u16(sto_res_tmp3) ii iteration 204 vqmovn.u16 d10,q5 @vqmovn_u16(sto_res_tmp3) iii iteration 209 vqmovn.u16 d12,q6 @vqmovn_u16(sto_res_tmp3) iv iteration
|
D | ihevc_weighted_pred_bi.s | 233 vqmovn.u16 d4,q2 @vqmovn_u16(sto_res_tmp3) 248 vqmovn.u16 d10,q5 @vqmovn_u16(sto_res_tmp3) ii iteration 254 vqmovn.u16 d14,q7 @vqmovn_u16(sto_res_tmp3) iii iteration 258 vqmovn.u16 d18,q9 @vqmovn_u16(sto_res_tmp3) iv iteration
|
/external/libaom/libaom/av1/common/arm/ |
D | jnt_convolve_neon.c | 168 *t0 = vqmovn_u16(vreinterpretq_u16_s16(tmp4)); in compute_avg_4x4() 169 *t1 = vqmovn_u16(vreinterpretq_u16_s16(tmp5)); in compute_avg_4x4() 192 *t0 = vqmovn_u16(vreinterpretq_u16_s16(tmp4)); in compute_avg_4x4() 193 *t1 = vqmovn_u16(vreinterpretq_u16_s16(tmp5)); in compute_avg_4x4() 282 *t0 = vqmovn_u16(vreinterpretq_u16_s16(f0)); in compute_avg_8x4() 283 *t1 = vqmovn_u16(vreinterpretq_u16_s16(f1)); in compute_avg_8x4() 284 *t2 = vqmovn_u16(vreinterpretq_u16_s16(f2)); in compute_avg_8x4() 285 *t3 = vqmovn_u16(vreinterpretq_u16_s16(f3)); in compute_avg_8x4() 311 *t0 = vqmovn_u16(vreinterpretq_u16_s16(f0)); in compute_avg_8x4() 312 *t1 = vqmovn_u16(vreinterpretq_u16_s16(f1)); in compute_avg_8x4() [all …]
|
D | convolve_neon.h | 62 res = vqmovn_u16(tmp); in wiener_convolve8_vert_4x8()
|
D | convolve_neon.c | 191 return vqmovn_u16(res); in convolve8_vert_8x4_s32() 1357 d01 = vqmovn_u16(dd0); in av1_convolve_2d_sr_neon() 1358 d23 = vqmovn_u16(dd1); in av1_convolve_2d_sr_neon() 1421 d01 = vqmovn_u16(dd0); in av1_convolve_2d_sr_neon()
|
D | selfguided_neon.c | 1569 t0 = vqmovn_u16(r4); in av1_apply_selfguided_restoration_neon() 1574 t0 = vqmovn_u16(r4); in av1_apply_selfguided_restoration_neon()
|
/external/skia/include/private/ |
D | SkNx_neon.h | 631 return vqmovn_u16(vcombine_u16(_16, _16)); 666 return vqmovn_u16(vcombine_u16(a16, b16)); 682 return vqmovn_u16(src.fVec); 687 return vqmovn_u16(vcombine_u16(_16, _16)); 692 return vqmovn_u16(vcombine_u16(_16, _16));
|
/external/skqp/include/private/ |
D | SkNx_neon.h | 653 return vqmovn_u16(vcombine_u16(_16, _16)); 688 return vqmovn_u16(vcombine_u16(a16, b16)); 704 return vqmovn_u16(src.fVec); 709 return vqmovn_u16(vcombine_u16(_16, _16)); 714 return vqmovn_u16(vcombine_u16(_16, _16));
|
/external/tensorflow/tensorflow/core/kernels/ |
D | quantized_instance_norm.cc | 239 vqmovn_u16(vcombine_u16(normed_uint16[3], normed_uint16[2]))); in InstanceNorm() 241 vqmovn_u16(vcombine_u16(normed_uint16[1], normed_uint16[0]))); in InstanceNorm()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 3340 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \ in AveragePool() 3354 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); in AveragePool() 3897 vcombine_u8(vqmovn_u16(result_2), vqmovn_u16(result_1)); in StoreValue() 5828 vcombine_u8(vqmovn_u16(result_1), vqmovn_u16(result_2)); in Quantize() 6173 const uint8x8_t narrowed_first_half = vqmovn_u16(output_first_half); 6174 const uint8x8_t narrowed_second_half = vqmovn_u16(output_second_half); 6415 const uint8x8_t narrowed_first_half = vqmovn_u16(output_first_half); 6416 const uint8x8_t narrowed_second_half = vqmovn_u16(output_second_half);
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-misc.c | 1995 return vqmovn_u16(a); in test_vqmovn_u16()
|
D | arm_neon_intrinsics.c | 10205 return vqmovn_u16(a); in test_vqmovn_u16()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-misc.c | 2178 return vqmovn_u16(a); in test_vqmovn_u16()
|
D | arm_neon_intrinsics.c | 12306 return vqmovn_u16(a); in test_vqmovn_u16()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | loop_restoration_neon.cc | 252 return vqmovn_u16(vcombine_u16(sum_lo_16, sum_hi_16)); in WienerVertical() 1015 const uint8x8_t idx = vqmovn_u16(z01); in CalculateIntermediate()
|
/external/arm-neon-tests/ |
D | ref-rvct-neon-nofp16.txt | 5207 VQMOVN:3:vqmovn_u16 Neon cumulative saturation 0 5239 VQMOVN:31:vqmovn_u16 Neon cumulative saturation 1
|
D | ref-rvct-neon.txt | 6055 VQMOVN:3:vqmovn_u16 Neon cumulative saturation 0 6089 VQMOVN:33:vqmovn_u16 Neon cumulative saturation 1
|
D | ref-rvct-all.txt | 6055 VQMOVN:3:vqmovn_u16 Neon cumulative saturation 0 6089 VQMOVN:33:vqmovn_u16 Neon cumulative saturation 1
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 1810 _NEON2SSESTORAGE uint8x8_t vqmovn_u16(uint16x8_t a); // VQMOVN.U16 d0,q0 12998 _NEON2SSESTORAGE uint8x8_t vqmovn_u16(uint16x8_t a); // VQMOVN.s16 d0,q0 12999 _NEON2SSE_INLINE uint8x8_t vqmovn_u16(uint16x8_t a) // VQMOVN.s16 d0,q0 in vqmovn_u16() function
|