Home
last modified time | relevance | path

Searched refs:vget_low_u64 (Results 1 – 21 of 21) sorted by relevance

/external/webp/src/dsp/
Dneon.h61 const uint64x1_t row2l = vget_low_u64(row23.val[0]); in Transpose4x4_NEON()
63 const uint64x1_t row3l = vget_low_u64(row23.val[1]); in Transpose4x4_NEON()
64 row01.val[0] = vcombine_u64(vget_low_u64(row01.val[0]), row2l); in Transpose4x4_NEON()
66 row01.val[1] = vcombine_u64(vget_low_u64(row01.val[1]), row3l); in Transpose4x4_NEON()
Dquant.h26 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4()
/external/libvpx/libvpx/vpx_dsp/arm/
Dsum_neon.h29 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_uint16x8()
35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4()
Dsum_squares_neon.c81 s2 = vadd_u64(vget_low_u64(s1), vget_high_u64(s1)); in vpx_sum_squares_2d_i16_neon()
/external/libhevc/encoder/arm/
Dihevce_sad_compute_neon.c69 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))), 0); in ihevce_4x4_sad_computer_neon()
95 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_8xn_sad_computer_neon()
125 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_16xn_sad_computer_neon()
159 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_32xn_sad_computer_neon()
201 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_64xn_sad_computer_neon()
Dihevce_ssd_and_sad_calculator_neon.c82 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))); in ihevce_ssd_and_sad_calculator_neon()
117 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
162 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
221 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
308 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
Dihevce_coarse_layer_sad_neon.c240 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_speed_neon()
407 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon()
418 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon()
Dihevce_had_compute_neon.c607 satd = vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_HAD_16x16_8bit_neon()
749 satd = vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_HAD_32x32_8bit_neon()
971 satd = vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_compute_16x16HAD_using_8x8_neon()
1182 return vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_compute_32x32HAD_using_16x16_neon()
/external/libaom/libaom/aom_dsp/arm/
Dsad_neon.c43 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad8x16_neon()
105 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad16x8_neon()
119 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8()
126 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_16x8()
Dsum_neon.h35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_u16x8()
Dsad4d_neon.c27 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8()
/external/clang/test/CodeGen/
Darm-neon-vget.c50 return vget_low_u64(a); in low_u64()
Daarch64-neon-vget-hilo.c159 return vget_low_u64(a); in test_vget_low_u64()
Darm_neon_intrinsics.c3838 return vget_low_u64(a); in test_vget_low_u64()
/external/libaom/libaom/aom_dsp/simd/
Dv128_intrinsics_arm.h125 vreinterpret_s32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0); in v128_hadd_u8()
162 return (uint32_t)(uint64_t)(vget_high_u64(t) + vget_low_u64(t)); in v128_sad_u8_sum()
953 (uint32_t)(uint64_t)vget_low_u64(t); in v128_sad_u16_sum()
Dv64_intrinsics_arm.h178 return (uint32_t)(uint64_t)(vget_high_u64(r) + vget_low_u64(r)); in v64_sad_u8_sum()
199 vreinterpret_u32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0); in v64_ssd_u8_sum()
/external/libgav1/libgav1/src/dsp/arm/
Dintrapred_cfl_neon.cc55 const uint64x1_t c = vadd_u64(vget_low_u64(b), vget_high_u64(b)); in SumVector()
Dcommon_neon.h395 const uint64x1_t c = vadd_u64(vget_low_u64(b), vget_high_u64(b)); in SumVector()
/external/libvpx/libvpx/vp8/common/arm/neon/
Dsixtappredict_neon.c255 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon()
257 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon()
333 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon()
335 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon()
/external/libhevc/common/arm/
Dihevc_resi_trans_neon.c103 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_neon()
250 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_ttype1_neon()
/external/neon_2_sse/
DNEON_2_SSE.h1766 _NEON2SSESTORAGE uint64x1_t vget_low_u64(uint64x2_t a); // VMOV d0,d0
12676 _NEON2SSESTORAGE uint64x1_t vget_low_u64(uint64x2_t a); // VMOV d0,d0
12677 #define vget_low_u64 vget_low_s64 macro