Home
last modified time | relevance | path

Searched refs:vreinterpret_u32_u64 (Results 1 – 21 of 21) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/arm/
Dsum_neon.h29 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_uint16x8()
30 vreinterpret_u32_u64(vget_high_u64(c))); in horizontal_add_uint16x8()
35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4()
36 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_add_uint32x4()
Dintrapred_neon.c274 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in vpx_d45_predictor_4x4_neon()
275 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in vpx_d45_predictor_4x4_neon()
276 const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in vpx_d45_predictor_4x4_neon()
399 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in vpx_d135_predictor_4x4_neon()
400 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in vpx_d135_predictor_4x4_neon()
401 const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in vpx_d135_predictor_4x4_neon()
Dhighbd_loopfilter_neon.c38 const uint64x1_t t1 = vpaddl_u32(vreinterpret_u32_u64(t0)); in calc_flat_status()
39 return vget_lane_u32(vreinterpret_u32_u64(t1), 0); in calc_flat_status()
Dloopfilter_neon.c51 vreinterpret_u32_u64(vpaddl_u32(vreinterpret_u32_u8(flat))), 0); in calc_flat_status_8()
/external/libaom/libaom/aom_dsp/arm/
Dsad_neon.c43 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad8x16_neon()
44 vreinterpret_u32_u64(vget_high_u64(q3))); in aom_sad8x16_neon()
74 return vget_lane_u32(vreinterpret_u32_u64(d3), 0); in aom_sad4x4_neon()
105 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad16x8_neon()
106 vreinterpret_u32_u64(vget_high_u64(q3))); in aom_sad16x8_neon()
119 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8()
120 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_long_add_16x8()
126 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_16x8()
127 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_add_16x8()
Dsum_neon.h35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_u16x8()
36 vreinterpret_u32_u64(vget_high_u64(c))); in horizontal_add_u16x8()
Dsad4d_neon.c27 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8()
28 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_long_add_16x8()
Dintrapred_neon.c337 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in aom_d135_predictor_4x4_neon()
338 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in aom_d135_predictor_4x4_neon()
339 const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in aom_d135_predictor_4x4_neon()
/external/libhevc/encoder/arm/
Dihevce_sad_compute_neon.c69 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))), 0); in ihevce_4x4_sad_computer_neon()
95 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_8xn_sad_computer_neon()
125 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_16xn_sad_computer_neon()
159 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_32xn_sad_computer_neon()
201 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_64xn_sad_computer_neon()
Dihevce_ssd_and_sad_calculator_neon.c82 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))); in ihevce_ssd_and_sad_calculator_neon()
117 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
162 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
221 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
308 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
Dihevce_coarse_layer_sad_neon.c240 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_speed_neon()
241 vreinterpret_u32_u64(vget_high_u64(c))), in hme_store_4x4_sads_high_speed_neon()
407 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon()
408 vreinterpret_u32_u64(vget_high_u64(c))), in hme_store_4x4_sads_high_quality_neon()
418 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon()
419 vreinterpret_u32_u64(vget_high_u64(c))), in hme_store_4x4_sads_high_quality_neon()
/external/webp/src/dsp/
Dquant.h26 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4()
27 vreinterpret_u32_u64(vget_high_u64(b))); in horizontal_add_uint32x4()
Ddec_neon.c1376 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in RD4_NEON()
1377 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in RD4_NEON()
1378 const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in RD4_NEON()
1395 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); in LD4_NEON()
1396 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); in LD4_NEON()
1397 const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); in LD4_NEON()
/external/libgav1/libgav1/src/dsp/arm/
Dintrapred_cfl_neon.cc46 return vget_lane_u32(vreinterpret_u32_u64(b), 0); in SumVector()
56 return vget_lane_u32(vreinterpret_u32_u64(c), 0); in SumVector()
/external/libvpx/libvpx/vp8/common/arm/neon/
Dsixtappredict_neon.c255 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon()
256 vreinterpret_u32_u64(vget_high_u64(s01_f3))); in vp8_sixtap_predict4x4_neon()
257 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon()
258 vreinterpret_u32_u64(vget_high_u64(s23_f3))); in vp8_sixtap_predict4x4_neon()
333 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon()
334 vreinterpret_u32_u64(vget_high_u64(s01_f3))); in vp8_sixtap_predict4x4_neon()
335 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon()
336 vreinterpret_u32_u64(vget_high_u64(s23_f3))); in vp8_sixtap_predict4x4_neon()
/external/libhevc/common/arm/
Dihevc_resi_trans_neon.c103 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_neon()
104 vreinterpret_u32_u64(vget_high_u64(c))), in ihevc_resi_trans_4x4_neon()
250 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_ttype1_neon()
251 vreinterpret_u32_u64(vget_high_u64(c))), in ihevc_resi_trans_4x4_ttype1_neon()
Dihevc_intra_pred_filters_neon_intr.c960 dc_val = (vget_lane_u32(vreinterpret_u32_u64(acc_dc), 0) + nt) >> (log2nt_plus1); in ihevc_intra_pred_luma_dc_neonintr()
1759 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u64(shift_res), 0); in ihevc_intra_pred_luma_mode2_neonintr()
1763 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u64(shift_res), 0); in ihevc_intra_pred_luma_mode2_neonintr()
1767 vst1_lane_u32((uint32_t *)pu1_dst_tmp, vreinterpret_u32_u64(shift_res), 0); in ihevc_intra_pred_luma_mode2_neonintr()
/external/libaom/libaom/aom_dsp/simd/
Dv64_intrinsics_arm.h199 vreinterpret_u32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0); in v64_ssd_u8_sum()
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c19862 return vreinterpret_u32_u64(a); in test_vreinterpret_u32_u64()
Darm_neon_intrinsics.c14167 return vreinterpret_u32_u64(a); in test_vreinterpret_u32_u64()
/external/neon_2_sse/
DNEON_2_SSE.h16628 _NEON2SSESTORAGE uint32x2_t vreinterpret_u32_u64 (uint64x1_t t);
16629 #define vreinterpret_u32_u64 macro