/external/webp/src/dsp/ |
D | neon.h | 61 const uint64x1_t row2l = vget_low_u64(row23.val[0]); in Transpose4x4_NEON() 63 const uint64x1_t row3l = vget_low_u64(row23.val[1]); in Transpose4x4_NEON() 64 row01.val[0] = vcombine_u64(vget_low_u64(row01.val[0]), row2l); in Transpose4x4_NEON() 66 row01.val[1] = vcombine_u64(vget_low_u64(row01.val[1]), row3l); in Transpose4x4_NEON()
|
D | quant.h | 26 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sum_neon.h | 29 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_uint16x8() 35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_uint32x4()
|
D | sum_squares_neon.c | 81 s2 = vadd_u64(vget_low_u64(s1), vget_high_u64(s1)); in vpx_sum_squares_2d_i16_neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_sad_compute_neon.c | 69 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))), 0); in ihevce_4x4_sad_computer_neon() 95 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_8xn_sad_computer_neon() 125 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_16xn_sad_computer_neon() 159 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_32xn_sad_computer_neon() 201 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_64xn_sad_computer_neon()
|
D | ihevce_ssd_and_sad_calculator_neon.c | 82 vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), vreinterpret_u32_u64(vget_high_u64(c))); in ihevce_ssd_and_sad_calculator_neon() 117 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon() 162 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon() 221 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon() 308 vreinterpret_u32_u64(vget_low_u64(tmp_b)), vreinterpret_u32_u64(vget_high_u64(tmp_b))); in ihevce_ssd_and_sad_calculator_neon()
|
D | ihevce_coarse_layer_sad_neon.c | 240 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_speed_neon() 407 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon() 418 vreinterpret_u32_u64(vget_low_u64(c)), in hme_store_4x4_sads_high_quality_neon()
|
D | ihevce_had_compute_neon.c | 607 satd = vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_HAD_16x16_8bit_neon() 749 satd = vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_HAD_32x32_8bit_neon() 971 satd = vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_compute_16x16HAD_using_8x8_neon() 1182 return vget_lane_u64(vadd_u64(vget_low_u64(c), vget_high_u64(c)), 0); in ihevce_compute_32x32HAD_using_16x16_neon()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | sad_neon.c | 43 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad8x16_neon() 105 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in aom_sad16x8_neon() 119 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8() 126 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_add_16x8()
|
D | sum_neon.h | 35 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in horizontal_add_u16x8()
|
D | sad4d_neon.c | 27 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), in horizontal_long_add_16x8()
|
/external/clang/test/CodeGen/ |
D | arm-neon-vget.c | 50 return vget_low_u64(a); in low_u64()
|
D | aarch64-neon-vget-hilo.c | 159 return vget_low_u64(a); in test_vget_low_u64()
|
D | arm_neon_intrinsics.c | 3838 return vget_low_u64(a); in test_vget_low_u64()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v128_intrinsics_arm.h | 125 vreinterpret_s32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0); in v128_hadd_u8() 162 return (uint32_t)(uint64_t)(vget_high_u64(t) + vget_low_u64(t)); in v128_sad_u8_sum() 953 (uint32_t)(uint64_t)vget_low_u64(t); in v128_sad_u16_sum()
|
D | v64_intrinsics_arm.h | 178 return (uint32_t)(uint64_t)(vget_high_u64(r) + vget_low_u64(r)); in v64_sad_u8_sum() 199 vreinterpret_u32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0); in v64_ssd_u8_sum()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | intrapred_cfl_neon.cc | 55 const uint64x1_t c = vadd_u64(vget_low_u64(b), vget_high_u64(b)); in SumVector()
|
D | common_neon.h | 395 const uint64x1_t c = vadd_u64(vget_low_u64(b), vget_high_u64(b)); in SumVector()
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | sixtappredict_neon.c | 255 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon() 257 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon() 333 s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)), in vp8_sixtap_predict4x4_neon() 335 s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)), in vp8_sixtap_predict4x4_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans_neon.c | 103 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_neon() 250 sad = vget_lane_u32(vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)), in ihevc_resi_trans_4x4_ttype1_neon()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 1766 _NEON2SSESTORAGE uint64x1_t vget_low_u64(uint64x2_t a); // VMOV d0,d0 12676 _NEON2SSESTORAGE uint64x1_t vget_low_u64(uint64x2_t a); // VMOV d0,d0 12677 #define vget_low_u64 vget_low_s64 macro
|