/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sum_neon.h | 29 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_uint16x8() 30 vreinterpret_u32_u64(vget_high_u64(c))); in horizontal_add_uint16x8() 35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4() 36 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_add_uint32x4()
|
D | intrapred_neon.c | 274 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in vpx_d45_predictor_4x4_neon() 275 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in vpx_d45_predictor_4x4_neon() 276 const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in vpx_d45_predictor_4x4_neon() 399 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in vpx_d135_predictor_4x4_neon() 400 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in vpx_d135_predictor_4x4_neon() 401 const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in vpx_d135_predictor_4x4_neon()
|
D | highbd_loopfilter_neon.c | 38 const uint64x1_t t1 = vpaddl_u32(vreinterpret_u32_u64(t0)); in calc_flat_status() 39 return vget_lane_u32(vreinterpret_u32_u64(t1), 0); in calc_flat_status()
|
D | loopfilter_neon.c | 51 vreinterpret_u32_u64(vpaddl_u32(vreinterpret_u32_u8(flat))), 0); in calc_flat_status_8()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | sad_neon.c | 43 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad8x16_neon() 44 vreinterpret_u32_u64(vget_high_u64(q3))); in aom_sad8x16_neon() 74 return vget_lane_u32(vreinterpret_u32_u64(d3), 0); in aom_sad4x4_neon() 105 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad16x8_neon() 106 vreinterpret_u32_u64(vget_high_u64(q3))); in aom_sad16x8_neon() 119 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8() 120 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_long_add_16x8() 126 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_16x8() 127 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_add_16x8()
|
D | sum_neon.h | 35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_u16x8() 36 vreinterpret_u32_u64(vget_high_u64(c))); in horizontal_add_u16x8()
|
D | sad4d_neon.c | 27 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8() 28 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_long_add_16x8()
|
D | intrapred_neon.c | 337 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in aom_d135_predictor_4x4_neon() 338 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in aom_d135_predictor_4x4_neon() 339 const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in aom_d135_predictor_4x4_neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_sad_compute_neon.c | 69 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))), 0); in ihevce_4x4_sad_computer_neon() 95 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_8xn_sad_computer_neon() 125 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_16xn_sad_computer_neon() 159 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_32xn_sad_computer_neon() 201 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_64xn_sad_computer_neon()
|
D | ihevce_ssd_and_sad_calculator_neon.c | 82 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))); in ihevce_ssd_and_sad_calculator_neon() 117 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon() 162 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon() 221 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon() 308 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
|
D | ihevce_coarse_layer_sad_neon.c | 240 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_speed_neon() 241 vreinterpret_u32_u64(vget_high_u64(c))), in hme_store_4x4_sads_high_speed_neon() 407 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon() 408 vreinterpret_u32_u64(vget_high_u64(c))), in hme_store_4x4_sads_high_quality_neon() 418 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon() 419 vreinterpret_u32_u64(vget_high_u64(c))), in hme_store_4x4_sads_high_quality_neon()
|
/external/webp/src/dsp/ |
D | quant.h | 26 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4() 27 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_add_uint32x4()
|
D | dec_neon.c | 1376 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in RD4_NEON() 1377 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in RD4_NEON() 1378 const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in RD4_NEON() 1395 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in LD4_NEON() 1396 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in LD4_NEON() 1397 const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in LD4_NEON()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | intrapred_cfl_neon.cc | 46 return vget_lane_u32(vreinterpret_u32_u64(b), 0); in SumVector() 56 return vget_lane_u32(vreinterpret_u32_u64(c), 0); in SumVector()
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | sixtappredict_neon.c | 255 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon() 256 vreinterpret_u32_u64(vget_high_u64(s01_f3))); in vp8_sixtap_predict4x4_neon() 257 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon() 258 vreinterpret_u32_u64(vget_high_u64(s23_f3))); in vp8_sixtap_predict4x4_neon() 333 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon() 334 vreinterpret_u32_u64(vget_high_u64(s01_f3))); in vp8_sixtap_predict4x4_neon() 335 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon() 336 vreinterpret_u32_u64(vget_high_u64(s23_f3))); in vp8_sixtap_predict4x4_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans_neon.c | 103 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_neon() 104 vreinterpret_u32_u64(vget_high_u64(c))), in ihevc_resi_trans_4x4_neon() 250 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_ttype1_neon() 251 vreinterpret_u32_u64(vget_high_u64(c))), in ihevc_resi_trans_4x4_ttype1_neon()
|
D | ihevc_intra_pred_filters_neon_intr.c | 960 dc_val = (vget_lane_u32(vreinterpret_u32_u64(acc_dc), 0) + nt) >> (log2nt_plus1); in ihevc_intra_pred_luma_dc_neonintr() 1759 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u64(shift_res), 0); in ihevc_intra_pred_luma_mode2_neonintr() 1763 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u64(shift_res), 0); in ihevc_intra_pred_luma_mode2_neonintr() 1767 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u64(shift_res), 0); in ihevc_intra_pred_luma_mode2_neonintr()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v64_intrinsics_arm.h | 199 vreinterpret_u32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0); in v64_ssd_u8_sum()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 19862 return vreinterpret_u32_u64(a); in test_vreinterpret_u32_u64()
|
D | arm_neon_intrinsics.c | 14167 return vreinterpret_u32_u64(a); in test_vreinterpret_u32_u64()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 16628 _NEON2SSESTORAGE uint32x2_t vreinterpret_u32_u64 (uint64x1_t t); 16629 #define vreinterpret_u32_u64 macro
|