/external/clang/test/CodeGen/ |
D | arm64_crypto.c | 30 uint32x4_t test_sha1c(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_sha1c() 37 uint32x4_t test_sha1p(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_sha1p() 44 uint32x4_t test_sha1m(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_sha1m() 59 uint32x4_t test_sha1su0(uint32x4_t wk0_3, uint32x4_t wk4_7, uint32x4_t wk8_11) { in test_sha1su0() 65 uint32x4_t test_sha1su1(uint32x4_t wk0_3, uint32x4_t wk12_15) { in test_sha1su1() 71 uint32x4_t test_sha256h(uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) { in test_sha256h() 77 uint32x4_t test_sha256h2(uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) { in test_sha256h2() 83 uint32x4_t test_sha256su0(uint32x4_t w0_3, uint32x4_t w4_7) { in test_sha256su0() 89 uint32x4_t test_sha256su1(uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15) { in test_sha256su1()
|
D | neon-crypto.c | 44 uint32x4_t test_vsha1su1q_u32(uint32x4_t w0_3, uint32x4_t w12_15) { in test_vsha1su1q_u32() 50 uint32x4_t test_vsha256su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7) { in test_vsha256su0q_u32() 56 uint32x4_t test_vsha1cq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_vsha1cq_u32() 62 uint32x4_t test_vsha1pq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_vsha1pq_u32() 68 uint32x4_t test_vsha1mq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_vsha1mq_u32() 74 uint32x4_t test_vsha1su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11) { in test_vsha1su0q_u32() 80 uint32x4_t test_vsha256hq_u32(uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) { in test_vsha256hq_u32() 86 uint32x4_t test_vsha256h2q_u32(uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) { in test_vsha256h2q_u32() 92 uint32x4_t test_vsha256su1q_u32(uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15) { in test_vsha256su1q_u32()
|
D | aarch64-neon-2velem.c | 211 uint32x4_t test_vmulq_lane_u32(uint32x4_t a, uint32x2_t v) { in test_vmulq_lane_u32() 267 uint32x2_t test_vmul_laneq_u32(uint32x2_t a, uint32x4_t v) { in test_vmul_laneq_u32() 275 uint32x4_t test_vmulq_laneq_u32(uint32x4_t a, uint32x4_t v) { in test_vmulq_laneq_u32() 960 uint32x4_t test_vmull_lane_u16(uint16x4_t a, uint16x4_t v) { in test_vmull_lane_u16() 1011 uint32x4_t test_vmull_high_lane_u16(uint16x8_t a, uint16x4_t v) { in test_vmull_high_lane_u16() 1024 uint64x2_t test_vmull_high_lane_u32(uint32x4_t a, uint32x2_t v) { in test_vmull_high_lane_u32() 1060 uint32x4_t test_vmull_laneq_u16(uint16x4_t a, uint16x8_t v) { in test_vmull_laneq_u16() 1072 uint64x2_t test_vmull_laneq_u32(uint32x2_t a, uint32x4_t v) { in test_vmull_laneq_u32() 1111 uint32x4_t test_vmull_high_laneq_u16(uint16x8_t a, uint16x8_t v) { in test_vmull_high_laneq_u16() 1124 uint64x2_t test_vmull_high_laneq_u32(uint32x4_t a, uint32x4_t v) { in test_vmull_high_laneq_u32() [all …]
|
D | aarch64-neon-misc.c | 91 uint32x4_t test_vceqzq_s32(int32x4_t a) { in test_vceqzq_s32() 157 uint32x4_t test_vceqzq_u32(uint32x4_t a) { in test_vceqzq_u32() 197 uint32x4_t test_vceqzq_f32(float32x4_t a) { in test_vceqzq_f32() 319 uint32x4_t test_vcgezq_s32(int32x4_t a) { in test_vcgezq_s32() 359 uint32x4_t test_vcgezq_f32(float32x4_t a) { in test_vcgezq_f32() 435 uint32x4_t test_vclezq_s32(int32x4_t a) { in test_vclezq_s32() 475 uint32x4_t test_vclezq_f32(float32x4_t a) { in test_vclezq_f32() 551 uint32x4_t test_vcgtzq_s32(int32x4_t a) { in test_vcgtzq_s32() 591 uint32x4_t test_vcgtzq_f32(float32x4_t a) { in test_vcgtzq_f32() 667 uint32x4_t test_vcltzq_s32(int32x4_t a) { in test_vcltzq_s32() [all …]
|
D | arm_neon_intrinsics.c | 149 uint32x4_t test_vabaq_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) { in test_vabaq_u32() 219 uint32x4_t test_vabal_u16(uint32x4_t a, uint16x4_t b, uint16x4_t c) { in test_vabal_u16() 382 uint32x4_t test_vabdq_u32(uint32x4_t a, uint32x4_t b) { in test_vabdq_u32() 460 uint32x4_t test_vabdl_u16(uint16x4_t a, uint16x4_t b) { in test_vabdl_u16() 665 uint32x4_t test_vaddq_u32(uint32x4_t a, uint32x4_t b) { in test_vaddq_u32() 738 uint16x4_t test_vaddhn_u32(uint32x4_t a, uint32x4_t b) { in test_vaddhn_u32() 809 uint32x4_t test_vaddl_u16(uint16x4_t a, uint16x4_t b) { in test_vaddl_u16() 869 uint32x4_t test_vaddw_u16(uint32x4_t a, uint16x4_t b) { in test_vaddw_u16() 985 uint32x4_t test_vandq_u32(uint32x4_t a, uint32x4_t b) { in test_vandq_u32() 1113 uint32x4_t test_vbicq_u32(uint32x4_t a, uint32x4_t b) { in test_vbicq_u32() [all …]
|
D | aarch64-neon-3v.c | 101 uint32x4_t test_vandq_u32(uint32x4_t a, uint32x4_t b) { in test_vandq_u32() 213 uint32x4_t test_vorrq_u32(uint32x4_t a, uint32x4_t b) { in test_vorrq_u32() 325 uint32x4_t test_veorq_u32(uint32x4_t a, uint32x4_t b) { in test_veorq_u32() 451 uint32x4_t test_vbicq_u32(uint32x4_t a, uint32x4_t b) { in test_vbicq_u32() 579 uint32x4_t test_vornq_u32(uint32x4_t a, uint32x4_t b) { in test_vornq_u32()
|
D | aarch64-neon-intrinsics.c | 132 uint32x4_t test_vaddq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vaddq_u32() 263 uint32x4_t test_vsubq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vsubq_u32() 362 uint32x4_t test_vmulq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vmulq_u32() 508 uint32x4_t test_vmlaq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { in test_vmlaq_u32() 627 uint32x4_t test_vmlsq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { in test_vmlsq_u32() 863 uint32x4_t test_vabaq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { in test_vabaq_u32() 990 uint32x4_t test_vabdq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vabdq_u32() 1233 int32x4_t test_vbslq_s32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vbslq_s32() 1291 int32x4_t test_vbslq_u32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vbslq_u32() 1324 float32x4_t test_vbslq_f32(uint32x4_t v1, float32x4_t v2, float32x4_t v3) { in test_vbslq_f32() [all …]
|
D | aarch64-neon-perm.c | 94 uint32x4_t test_vuzp1q_u32(uint32x4_t a, uint32x4_t b) { in test_vuzp1q_u32() 241 uint32x4_t test_vuzp2q_u32(uint32x4_t a, uint32x4_t b) { in test_vuzp2q_u32() 388 uint32x4_t test_vzip1q_u32(uint32x4_t a, uint32x4_t b) { in test_vzip1q_u32() 535 uint32x4_t test_vzip2q_u32(uint32x4_t a, uint32x4_t b) { in test_vzip2q_u32() 682 uint32x4_t test_vtrn1q_u32(uint32x4_t a, uint32x4_t b) { in test_vtrn1q_u32() 829 uint32x4_t test_vtrn2q_u32(uint32x4_t a, uint32x4_t b) { in test_vtrn2q_u32() 1272 uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) { in test_vuzpq_u32() 1736 uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) { in test_vzipq_u32() 2200 uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { in test_vtrnq_u32()
|
D | arm-neon-vcvtX.c | 37 uint32x4_t test_vcvtaq_u32_f32(float32x4_t a) { in test_vcvtaq_u32_f32() 73 uint32x4_t test_vcvtnq_u32_f32(float32x4_t a) { in test_vcvtnq_u32_f32() 109 uint32x4_t test_vcvtpq_u32_f32(float32x4_t a) { in test_vcvtpq_u32_f32() 145 uint32x4_t test_vcvtmq_u32_f32(float32x4_t a) { in test_vcvtmq_u32_f32()
|
D | arm64_vcopy.c | 70 uint32x4_t test_vcopyq_laneq_u32(uint32x4_t a1, uint32x4_t a2) { in test_vcopyq_laneq_u32()
|
/external/scrypt/lib/crypto/ |
D | crypto_scrypt-neon-salsa208.h | 15 const uint32x4_t abab = {-1,0,-1,0}; in salsa20_8_intrinsic() 26 uint32x4_t x0x5x10x15; in salsa20_8_intrinsic() 27 uint32x4_t x12x1x6x11; in salsa20_8_intrinsic() 28 uint32x4_t x8x13x2x7; in salsa20_8_intrinsic() 29 uint32x4_t x4x9x14x3; in salsa20_8_intrinsic() 31 uint32x4_t x0x1x10x11; in salsa20_8_intrinsic() 32 uint32x4_t x12x13x6x7; in salsa20_8_intrinsic() 33 uint32x4_t x8x9x2x3; in salsa20_8_intrinsic() 34 uint32x4_t x4x5x14x15; in salsa20_8_intrinsic() 36 uint32x4_t x0x1x2x3; in salsa20_8_intrinsic() [all …]
|
/external/webp/src/dsp/ |
D | rescaler_neon.c | 27 #define LOAD_32x4(SRC, DST) const uint32x4_t DST = vld1q_u32((SRC)) 48 static uint32x4_t Interpolate_NEON(const rescaler_t* const frow, in Interpolate_NEON() 57 const uint32x4_t E = vcombine_u32( in Interpolate_NEON() 80 const uint32x4_t B0 = MULT_FIX(A0, fy_scale_half); in RescalerExportRowExpand_NEON() 81 const uint32x4_t B1 = MULT_FIX(A1, fy_scale_half); in RescalerExportRowExpand_NEON() 96 const uint32x4_t C0 = in RescalerExportRowExpand_NEON() 98 const uint32x4_t C1 = in RescalerExportRowExpand_NEON() 100 const uint32x4_t D0 = MULT_FIX(C0, fy_scale_half); in RescalerExportRowExpand_NEON() 101 const uint32x4_t D1 = MULT_FIX(C1, fy_scale_half); in RescalerExportRowExpand_NEON() 126 const uint32x4_t zero = vdupq_n_u32(0); in RescalerExportRowShrink_NEON() [all …]
|
D | enc_neon.c | 259 uint32x4_t out = vdupq_n_u32(0); in Load4x4_NEON() 751 uint32x4_t* const sum) { in AccumulateSSE16_NEON() 760 const uint32x4_t sum1 = vpaddlq_u16(prod1); in AccumulateSSE16_NEON() 761 const uint32x4_t sum2 = vpaddlq_u16(prod2); in AccumulateSSE16_NEON() 766 static int SumToInt_NEON(uint32x4_t sum) { in SumToInt_NEON() 773 uint32x4_t sum = vdupq_n_u32(0); in SSE16x16_NEON() 782 uint32x4_t sum = vdupq_n_u32(0); in SSE16x8_NEON() 791 uint32x4_t sum = vdupq_n_u32(0); in SSE8x8_NEON() 812 const uint32x4_t sum1 = vpaddlq_u16(prod1); in SSE4x4_NEON() 813 const uint32x4_t sum2 = vpaddlq_u16(prod2); in SSE4x4_NEON() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | types_vsx.h | 21 typedef vector unsigned int uint32x4_t; typedef 72 const uint32x4_t a0 = (uint32x4_t)vec_vsx_ld(0, a); in read4x2() 73 const uint32x4_t a1 = (uint32x4_t)vec_vsx_ld(0, a + stride); in read4x2() 88 static const uint32x4_t vec_ones_u32 = { 1, 1, 1, 1 }; 90 static const uint32x4_t vec_zeros_u32 = { 0, 0, 0, 0 }; 92 static const uint32x4_t vec_shift_sign_s32 = { 31, 31, 31, 31 };
|
D | variance_vsx.c | 49 vec_ste((uint32x4_t)s, 0, &sum); in vpx_get_mb_ss_vsx() 86 const uint32x4_t r0 = (uint32x4_t)vec_vsx_ld(0, ref); in vpx_comp_avg_pred_vsx() 87 const uint32x4_t r1 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride); in vpx_comp_avg_pred_vsx() 88 const uint32x4_t r2 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 2); in vpx_comp_avg_pred_vsx() 89 const uint32x4_t r3 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 3); in vpx_comp_avg_pred_vsx() 213 vec_ste((uint32x4_t)ss, 0, sse); in variance()
|
D | intrapred_vsx.c | 38 static const uint32x4_t mask4 = { 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; 500 const uint32x4_t sum = (uint32x4_t)vec_sums(sum4s, vec_splat_s32(8)); in avg16() 501 const uint32x4_t avg = (uint32x4_t)vec_sr(sum, vec_splat_u32(4)); in avg16() 528 const uint32x4_t sum = (uint32x4_t)vec_sums(sum4s, v16); in avg32() 529 const uint32x4_t avg = (uint32x4_t)vec_sr(sum, vec_splat_u32(5)); in avg32() 556 const uint32x4_t sum = (uint32x4_t)vec_sums(sum4s8, vec_splat_s32(8)); in dc_avg8() 557 const uint32x4_t avg = (uint32x4_t)vec_sr(sum, vec_splat_u32(4)); in dc_avg8() 569 const uint32x4_t sum = (uint32x4_t)vec_sums(sum4s, v16); in dc_avg16() 570 const uint32x4_t avg = (uint32x4_t)vec_sr(sum, vec_splat_u32(5)); in dc_avg16() 592 const uint32x4_t a_sum = vec_sum4s(a0, vec_sum4s(a1, vec_splat_u32(0))); in dc_avg32() [all …]
|
D | sad_vsx.c | 31 uint32x4_t v_sad = vec_zeros_u32; \ 49 uint32x4_t v_sad = vec_zeros_u32; \ 67 uint32x4_t v_sad = vec_zeros_u32; \ 86 uint32x4_t v_sad = vec_zeros_u32; \ 189 vec_vsx_st((uint32x4_t)v_sad, 0, sad); \ 216 vec_vsx_st((uint32x4_t)v_sad, 0, sad); \ 248 vec_vsx_st((uint32x4_t)v_sad, 0, sad); \
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sad4d_neon.c | 38 uint32x4_t r; in sad4x_4d() 83 const uint32x4_t r = vpaddlq_u16(vcombine_u16(b0, b1)); in sad_512_pel_final_neon() 94 const uint32x4_t b0 = vpaddlq_u16(vcombine_u16(a0, a1)); in sad_1024_pel_final_neon() 95 const uint32x4_t b1 = vpaddlq_u16(vcombine_u16(a2, a3)); in sad_1024_pel_final_neon() 104 const uint32x4_t a0 = vpaddlq_u16(sum[0]); in sad_2048_pel_final_neon() 105 const uint32x4_t a1 = vpaddlq_u16(sum[1]); in sad_2048_pel_final_neon() 106 const uint32x4_t a2 = vpaddlq_u16(sum[2]); in sad_2048_pel_final_neon() 107 const uint32x4_t a3 = vpaddlq_u16(sum[3]); in sad_2048_pel_final_neon() 120 const uint32x4_t a0 = vpaddlq_u16(sum[0]); in sad_4096_pel_final_neon() 121 const uint32x4_t a1 = vpaddlq_u16(sum[1]); in sad_4096_pel_final_neon() [all …]
|
/external/scrypt/patches/ |
D | arm_neon.patch | 21 + const uint32x4_t abab = {-1,0,-1,0}; 32 + uint32x4_t x0x5x10x15; 33 + uint32x4_t x12x1x6x11; 34 + uint32x4_t x8x13x2x7; 35 + uint32x4_t x4x9x14x3; 37 + uint32x4_t x0x1x10x11; 38 + uint32x4_t x12x13x6x7; 39 + uint32x4_t x8x9x2x3; 40 + uint32x4_t x4x5x14x15; 42 + uint32x4_t x0x1x2x3; [all …]
|
/external/libhevc/encoder/arm/ |
D | ihevce_ssd_and_sad_calculator_neon.c | 74 uint32x4_t b, d; in ihevce_ssd_and_sad_calculator_neon() 94 uint32x4_t sqabs_sum = vdupq_n_u32(0); in ihevce_ssd_and_sad_calculator_neon() 96 uint32x4_t tmp_a; in ihevce_ssd_and_sad_calculator_neon() 127 uint32x4_t sqabs_sum_l = vdupq_n_u32(0); in ihevce_ssd_and_sad_calculator_neon() 128 uint32x4_t sqabs_sum_h = vdupq_n_u32(0); in ihevce_ssd_and_sad_calculator_neon() 131 uint32x4_t tmp_a, tmp_c; in ihevce_ssd_and_sad_calculator_neon() 174 uint32x4_t sqabs_sum_l = vdupq_n_u32(0); in ihevce_ssd_and_sad_calculator_neon() 175 uint32x4_t sqabs_sum_h = vdupq_n_u32(0); in ihevce_ssd_and_sad_calculator_neon() 178 uint32x4_t tmp_a, tmp_c; in ihevce_ssd_and_sad_calculator_neon() 231 uint32x4_t abs_sum = vdupq_n_u32(0); in ihevce_ssd_and_sad_calculator_neon() [all …]
|
D | ihevce_ssd_calculator_neon.c | 56 static INLINE uint32x4_t ihevce_4x4_ssd_computer_neon( in ihevce_4x4_ssd_computer_neon() 59 uint32x4_t ssd_low, ssd_high; in ihevce_4x4_ssd_computer_neon() 82 static INLINE uint32x4_t 85 uint32x4_t ssd_val; in ihevce_1x8_ssd_computer_neon() 106 static INLINE uint32x4_t 109 uint32x4_t ssd_low, ssd_high; in ihevce_1x16_ssd_computer_neon() 132 static INLINE uint32x4_t 135 uint32x4_t ssd_0, ssd_1, ssd_2, ssd_3; in ihevce_1x32_ssd_computer_neon() 169 static INLINE uint32x4_t 172 uint32x4_t ssd_0, ssd_1, ssd_2, ssd_3; in ihevce_1x64_ssd_computer_neon() [all …]
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 169 typedef __m128i uint32x4_t; typedef 461 _NEON2SSESTORAGE uint32x4_t vaddq_u32(uint32x4_t a, uint32x4_t b); // VADD.I32 q0,q0,q0 468 _NEON2SSESTORAGE uint32x4_t vaddl_u16(uint16x4_t a, uint16x4_t b); // VADDL.U16 q0,d0,d0 475 _NEON2SSESTORAGE uint32x4_t vaddw_u16(uint32x4_t a, uint16x4_t b); // VADDW.U16 q0,q0,d0 489 _NEON2SSESTORAGE uint32x4_t vhaddq_u32(uint32x4_t a, uint32x4_t b); // VHADD.U32 q0,q0,q0 502 _NEON2SSESTORAGE uint32x4_t vrhaddq_u32(uint32x4_t a, uint32x4_t b); // VRHADD.U32 q0,q0,q0 518 _NEON2SSESTORAGE uint32x4_t vqaddq_u32(uint32x4_t a, uint32x4_t b); // VQADD.U32 q0,q0,q0 525 _NEON2SSESTORAGE uint16x4_t vaddhn_u32(uint32x4_t a, uint32x4_t b); // VADDHN.I32 d0,q0,q0 532 _NEON2SSESTORAGE uint16x4_t vraddhn_u32(uint32x4_t a, uint32x4_t b); // VRADDHN.I32 d0,q0,q0 550 _NEON2SSESTORAGE uint32x4_t vmulq_u32(uint32x4_t a, uint32x4_t b); // VMUL.I32 q0,q0,q0 [all …]
|
/external/libaom/libaom/av1/common/arm/ |
D | selfguided_neon.c | 34 uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4, in calc_ab_fast_internal_common() 35 uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, int32x4_t sr4, int32x4_t sr5, in calc_ab_fast_internal_common() 36 int32x4_t sr6, int32x4_t sr7, uint32x4_t const_n_val, uint32x4_t s_vec, in calc_ab_fast_internal_common() 37 uint32x4_t const_val, uint32x4_t one_by_n_minus_1_vec, in calc_ab_fast_internal_common() 40 uint32x4_t q0, q1, q2, q3; in calc_ab_fast_internal_common() 41 uint32x4_t p0, p1, p2, p3; in calc_ab_fast_internal_common() 119 uint32x4_t s0, uint32x4_t s1, uint32x4_t s2, uint32x4_t s3, uint32x4_t s4, in calc_ab_internal_common() 120 uint32x4_t s5, uint32x4_t s6, uint32x4_t s7, uint16x8_t s16_0, in calc_ab_internal_common() 123 uint32x4_t const_n_val, uint32x4_t s_vec, uint32x4_t const_val, in calc_ab_internal_common() 127 uint32x4_t q0, q1, q2, q3, q4, q5, q6, q7; in calc_ab_internal_common() [all …]
|
/external/webrtc/webrtc/modules/audio_processing/aec/ |
D | aec_core_neon.c | 109 const uint32x4_t vec_p_inf = vdupq_n_u32(0x7F800000); in vsqrtq_f32() 111 const uint32x4_t div_by_zero = vceqq_u32(vec_p_inf, vreinterpretq_u32_f32(x)); in vsqrtq_f32() 152 const uint32x4_t bigger = vcgtq_f32(absEf, kThresh); in ScaleErrorSignalNEON() 155 uint32x4_t ef_re_if = vreinterpretq_u32_f32(vmulq_f32(ef_re, absEfInv)); in ScaleErrorSignalNEON() 156 uint32x4_t ef_im_if = vreinterpretq_u32_f32(vmulq_f32(ef_im, absEfInv)); in ScaleErrorSignalNEON() 157 uint32x4_t ef_re_u32 = vandq_u32(vmvnq_u32(bigger), in ScaleErrorSignalNEON() 159 uint32x4_t ef_im_u32 = vandq_u32(vmvnq_u32(bigger), in ScaleErrorSignalNEON() 292 const uint32x4_t vec_float_exponent_mask = vdupq_n_u32(0x7F800000); in vpowq_f32() 293 const uint32x4_t vec_eight_biased_exponent = vdupq_n_u32(0x43800000); in vpowq_f32() 294 const uint32x4_t vec_implicit_leading_one = vdupq_n_u32(0x43BF8000); in vpowq_f32() [all …]
|
/external/libaom/libaom/aom_dsp/arm/ |
D | sad_neon.c | 22 uint32x4_t q1; in aom_sad8x16_neon() 81 uint32x4_t q1; in aom_sad16x8_neon() 113 const uint32x4_t vec_l_lo = in horizontal_long_add_16x8() 115 const uint32x4_t vec_l_hi = in horizontal_long_add_16x8() 117 const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi); in horizontal_long_add_16x8() 124 const uint32x4_t a = vpaddlq_u16(vec_16x8); in horizontal_add_16x8()
|