Home
last modified time | relevance | path

Searched refs:uint32x4_t (Results 1 – 25 of 54) sorted by relevance

123

/external/clang/test/CodeGen/
Dneon-crypto.c44 uint32x4_t test_vsha1su1q_u32(uint32x4_t w0_3, uint32x4_t w12_15) { in test_vsha1su1q_u32()
50 uint32x4_t test_vsha256su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7) { in test_vsha256su0q_u32()
56 uint32x4_t test_vsha1cq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_vsha1cq_u32()
62 uint32x4_t test_vsha1pq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_vsha1pq_u32()
68 uint32x4_t test_vsha1mq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_vsha1mq_u32()
74 uint32x4_t test_vsha1su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11) { in test_vsha1su0q_u32()
80 uint32x4_t test_vsha256hq_u32(uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) { in test_vsha256hq_u32()
86 uint32x4_t test_vsha256h2q_u32(uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) { in test_vsha256h2q_u32()
92 uint32x4_t test_vsha256su1q_u32(uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15) { in test_vsha256su1q_u32()
Darm64_crypto.c30 uint32x4_t test_sha1c(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_sha1c()
37 uint32x4_t test_sha1p(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_sha1p()
44 uint32x4_t test_sha1m(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { in test_sha1m()
59 uint32x4_t test_sha1su0(uint32x4_t wk0_3, uint32x4_t wk4_7, uint32x4_t wk8_11) { in test_sha1su0()
65 uint32x4_t test_sha1su1(uint32x4_t wk0_3, uint32x4_t wk12_15) { in test_sha1su1()
71 uint32x4_t test_sha256h(uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) { in test_sha256h()
77 uint32x4_t test_sha256h2(uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) { in test_sha256h2()
83 uint32x4_t test_sha256su0(uint32x4_t w0_3, uint32x4_t w4_7) { in test_sha256su0()
89 uint32x4_t test_sha256su1(uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15) { in test_sha256su1()
Darm64_neon_high_half.c26 uint32x4_t test_vaddw_high_u16(uint32x4_t lhs, uint16x8_t rhs) { in test_vaddw_high_u16()
31 uint64x2_t test_vaddw_high_u32(uint64x2_t lhs, uint32x4_t rhs) { in test_vaddw_high_u32()
56 uint32x4_t test_vsubw_high_u16(uint32x4_t lhs, uint16x8_t rhs) { in test_vsubw_high_u16()
61 uint64x2_t test_vsubw_high_u32(uint64x2_t lhs, uint32x4_t rhs) { in test_vsubw_high_u32()
86 uint32x4_t test_vabdl_high_u16(uint16x8_t lhs, uint16x8_t rhs) { in test_vabdl_high_u16()
91 uint64x2_t test_vabdl_high_u32(uint32x4_t lhs, uint32x4_t rhs) { in test_vabdl_high_u32()
116 uint32x4_t test_vabal_high_u16(uint32x4_t accum, uint16x8_t lhs, uint16x8_t rhs) { in test_vabal_high_u16()
121 uint64x2_t test_vabal_high_u32(uint64x2_t accum, uint32x4_t lhs, uint32x4_t rhs) { in test_vabal_high_u32()
236 uint32x4_t test_vsubl_high_u16(uint16x8_t lhs, uint16x8_t rhs) { in test_vsubl_high_u16()
241 uint64x2_t test_vsubl_high_u32(uint32x4_t lhs, uint32x4_t rhs) { in test_vsubl_high_u32()
[all …]
Daarch64-neon-2velem.c146 uint32x4_t test_vmulq_lane_u32(uint32x4_t a, uint32x2_t v) { in test_vmulq_lane_u32()
188 uint32x2_t test_vmul_laneq_u32(uint32x2_t a, uint32x4_t v) { in test_vmul_laneq_u32()
194 uint32x4_t test_vmulq_laneq_u32(uint32x4_t a, uint32x4_t v) { in test_vmulq_laneq_u32()
500 uint32x4_t test_vmull_lane_u16(uint16x4_t a, uint16x4_t v) { in test_vmull_lane_u16()
524 uint32x4_t test_vmull_high_lane_u16(uint16x8_t a, uint16x4_t v) { in test_vmull_high_lane_u16()
530 uint64x2_t test_vmull_high_lane_u32(uint32x4_t a, uint32x2_t v) { in test_vmull_high_lane_u32()
548 uint32x4_t test_vmull_laneq_u16(uint16x4_t a, uint16x8_t v) { in test_vmull_laneq_u16()
554 uint64x2_t test_vmull_laneq_u32(uint32x2_t a, uint32x4_t v) { in test_vmull_laneq_u32()
572 uint32x4_t test_vmull_high_laneq_u16(uint16x8_t a, uint16x8_t v) { in test_vmull_high_laneq_u16()
578 uint64x2_t test_vmull_high_laneq_u32(uint32x4_t a, uint32x4_t v) { in test_vmull_high_laneq_u32()
[all …]
Daarch64-neon-3v.c86 uint32x4_t test_vandq_u32(uint32x4_t a, uint32x4_t b) { in test_vandq_u32()
182 uint32x4_t test_vorrq_u32(uint32x4_t a, uint32x4_t b) { in test_vorrq_u32()
278 uint32x4_t test_veorq_u32(uint32x4_t a, uint32x4_t b) { in test_veorq_u32()
374 uint32x4_t test_vbicq_u32(uint32x4_t a, uint32x4_t b) { in test_vbicq_u32()
470 uint32x4_t test_vornq_u32(uint32x4_t a, uint32x4_t b) { in test_vornq_u32()
Daarch64-neon-misc.c59 uint32x4_t test_vceqzq_s32(int32x4_t a) { in test_vceqzq_s32()
101 uint32x4_t test_vceqzq_u32(uint32x4_t a) { in test_vceqzq_u32()
125 uint32x4_t test_vceqzq_f32(float32x4_t a) { in test_vceqzq_f32()
203 uint32x4_t test_vcgezq_s32(int32x4_t a) { in test_vcgezq_s32()
227 uint32x4_t test_vcgezq_f32(float32x4_t a) { in test_vcgezq_f32()
275 uint32x4_t test_vclezq_s32(int32x4_t a) { in test_vclezq_s32()
299 uint32x4_t test_vclezq_f32(float32x4_t a) { in test_vclezq_f32()
347 uint32x4_t test_vcgtzq_s32(int32x4_t a) { in test_vcgtzq_s32()
371 uint32x4_t test_vcgtzq_f32(float32x4_t a) { in test_vcgtzq_f32()
419 uint32x4_t test_vcltzq_s32(int32x4_t a) { in test_vcltzq_s32()
[all …]
Darm_neon_intrinsics.c80 uint32x4_t test_vabaq_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) { in test_vabaq_u32()
111 uint32x4_t test_vabal_u16(uint32x4_t a, uint16x4_t b, uint16x4_t c) { in test_vabal_u16()
196 uint32x4_t test_vabdq_u32(uint32x4_t a, uint32x4_t b) { in test_vabdq_u32()
233 uint32x4_t test_vabdl_u16(uint16x4_t a, uint16x4_t b) { in test_vabdl_u16()
391 uint32x4_t test_vaddq_u32(uint32x4_t a, uint32x4_t b) { in test_vaddq_u32()
428 uint16x4_t test_vaddhn_u32(uint32x4_t a, uint32x4_t b) { in test_vaddhn_u32()
465 uint32x4_t test_vaddl_u16(uint16x4_t a, uint16x4_t b) { in test_vaddl_u16()
502 uint32x4_t test_vaddw_u16(uint32x4_t a, uint16x4_t b) { in test_vaddw_u16()
599 uint32x4_t test_vandq_u32(uint32x4_t a, uint32x4_t b) { in test_vandq_u32()
696 uint32x4_t test_vbicq_u32(uint32x4_t a, uint32x4_t b) { in test_vbicq_u32()
[all …]
Daarch64-neon-intrinsics.c111 uint32x4_t test_vaddq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vaddq_u32()
223 uint32x4_t test_vsubq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vsubq_u32()
308 uint32x4_t test_vmulq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vmulq_u32()
417 uint32x4_t test_vmlaq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { in test_vmlaq_u32()
505 uint32x4_t test_vmlsq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { in test_vmlsq_u32()
635 uint32x4_t test_vabaq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { in test_vabaq_u32()
713 uint32x4_t test_vabdq_u32(uint32x4_t v1, uint32x4_t v2) { in test_vabdq_u32()
816 int32x4_t test_vbslq_s32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vbslq_s32()
840 int32x4_t test_vbslq_u32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { in test_vbslq_u32()
852 float32x4_t test_vbslq_f32(uint32x4_t v1, float32x4_t v2, float32x4_t v3) { in test_vbslq_f32()
[all …]
Darm64_vqmov.c32 uint16x8_t test_vqmovn_high_u32(uint16x4_t Vdlow, uint32x4_t Vn) in test_vqmovn_high_u32()
48 uint32x4_t test_vqmovn_high_u64(uint32x2_t Vdlow, uint64x2_t Vn) in test_vqmovn_high_u64()
64 uint16x8_t test_vqmovun_high_s32(uint16x4_t Vdlow, uint32x4_t Vn) in test_vqmovun_high_s32()
72 uint32x4_t test_vqmovun_high_s64(uint32x2_t Vdlow, uint64x2_t Vn) in test_vqmovun_high_s64()
Daarch64-neon-perm.c80 uint32x4_t test_vuzp1q_u32(uint32x4_t a, uint32x4_t b) { in test_vuzp1q_u32()
206 uint32x4_t test_vuzp2q_u32(uint32x4_t a, uint32x4_t b) { in test_vuzp2q_u32()
332 uint32x4_t test_vzip1q_u32(uint32x4_t a, uint32x4_t b) { in test_vzip1q_u32()
458 uint32x4_t test_vzip2q_u32(uint32x4_t a, uint32x4_t b) { in test_vzip2q_u32()
584 uint32x4_t test_vtrn1q_u32(uint32x4_t a, uint32x4_t b) { in test_vtrn1q_u32()
710 uint32x4_t test_vtrn2q_u32(uint32x4_t a, uint32x4_t b) { in test_vtrn2q_u32()
849 uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) { in test_vuzpq_u32()
959 uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) { in test_vzipq_u32()
1069 uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { in test_vtrnq_u32()
Darm-neon-vcvtX.c23 uint32x4_t test_vcvtaq_u32_f32(float32x4_t a) { in test_vcvtaq_u32_f32()
47 uint32x4_t test_vcvtnq_u32_f32(float32x4_t a) { in test_vcvtnq_u32_f32()
71 uint32x4_t test_vcvtpq_u32_f32(float32x4_t a) { in test_vcvtpq_u32_f32()
95 uint32x4_t test_vcvtmq_u32_f32(float32x4_t a) { in test_vcvtmq_u32_f32()
Darm64_vshift.c89 uint32x4_t test_vqshlq_n_u32(uint32x4_t in) { in test_vqshlq_n_u32()
186 uint32x4_t test_vrshrq_n_u32(uint32x4_t in) { in test_vrshrq_n_u32()
345 uint32x4_t test_vrsraq_n_u32(uint32x4_t acc, uint32x4_t in) { in test_vrsraq_n_u32()
Darm64_vcopy.c40 uint32x4_t test_vcopyq_laneq_u32(uint32x4_t a1, uint32x4_t a2) { in test_vcopyq_laneq_u32()
Daarch64-neon-across.c63 uint64_t test_vaddlvq_u32(uint32x4_t a) { in test_vaddlvq_u32()
123 uint32_t test_vmaxvq_u32(uint32x4_t a) { in test_vmaxvq_u32()
183 uint32_t test_vminvq_u32(uint32x4_t a) { in test_vminvq_u32()
243 uint32_t test_vaddvq_u32(uint32x4_t a) { in test_vaddvq_u32()
Darm64_vsli.c121 uint32x4_t test_vsliq_n_u32(uint32x4_t a1, uint32x4_t a2) { in test_vsliq_n_u32()
Darm64_vsri.c122 uint32x4_t test_vsriq_n_u32(uint32x4_t a1, uint32x4_t a2) { in test_vsriq_n_u32()
Darm64-vrsqrt.c18 uint32x4_t test_vrsqrteq_u32(uint32x4_t in) { in test_vrsqrteq_u32()
/external/scrypt/lib/crypto/
Dcrypto_scrypt-neon-salsa208.h15 const uint32x4_t abab = {-1,0,-1,0}; in salsa20_8_intrinsic()
26 uint32x4_t x0x5x10x15; in salsa20_8_intrinsic()
27 uint32x4_t x12x1x6x11; in salsa20_8_intrinsic()
28 uint32x4_t x8x13x2x7; in salsa20_8_intrinsic()
29 uint32x4_t x4x9x14x3; in salsa20_8_intrinsic()
31 uint32x4_t x0x1x10x11; in salsa20_8_intrinsic()
32 uint32x4_t x12x13x6x7; in salsa20_8_intrinsic()
33 uint32x4_t x8x9x2x3; in salsa20_8_intrinsic()
34 uint32x4_t x4x5x14x15; in salsa20_8_intrinsic()
36 uint32x4_t x0x1x2x3; in salsa20_8_intrinsic()
[all …]
/external/scrypt/patches/
Darm-neon.patch21 + const uint32x4_t abab = {-1,0,-1,0};
32 + uint32x4_t x0x5x10x15;
33 + uint32x4_t x12x1x6x11;
34 + uint32x4_t x8x13x2x7;
35 + uint32x4_t x4x9x14x3;
37 + uint32x4_t x0x1x10x11;
38 + uint32x4_t x12x13x6x7;
39 + uint32x4_t x8x9x2x3;
40 + uint32x4_t x4x5x14x15;
42 + uint32x4_t x0x1x2x3;
[all …]
/external/chromium_org/third_party/boringssl/src/crypto/chacha/
Dchacha_vec.c47 #define ROTV1(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 1)
48 #define ROTV2(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 2)
49 #define ROTV3(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 3)
58 (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 7), (uint32x4_t)x, 25)
60 (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 8), (uint32x4_t)x, 24)
62 (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 12), (uint32x4_t)x, 20)
/external/chromium_org/third_party/webrtc/modules/audio_processing/aec/
Daec_core_neon.c103 const uint32x4_t vec_p_inf = vdupq_n_u32(0x7F800000); in vsqrtq_f32()
105 const uint32x4_t div_by_zero = vceqq_u32(vec_p_inf, vreinterpretq_u32_f32(x)); in vsqrtq_f32()
141 const uint32x4_t bigger = vcgtq_f32(absEf, kThresh); in ScaleErrorSignalNEON()
144 uint32x4_t ef_re_if = vreinterpretq_u32_f32(vmulq_f32(ef_re, absEfInv)); in ScaleErrorSignalNEON()
145 uint32x4_t ef_im_if = vreinterpretq_u32_f32(vmulq_f32(ef_im, absEfInv)); in ScaleErrorSignalNEON()
146 uint32x4_t ef_re_u32 = vandq_u32(vmvnq_u32(bigger), in ScaleErrorSignalNEON()
148 uint32x4_t ef_im_u32 = vandq_u32(vmvnq_u32(bigger), in ScaleErrorSignalNEON()
278 const uint32x4_t vec_float_exponent_mask = vdupq_n_u32(0x7F800000); in vpowq_f32()
279 const uint32x4_t vec_eight_biased_exponent = vdupq_n_u32(0x43800000); in vpowq_f32()
280 const uint32x4_t vec_implicit_leading_one = vdupq_n_u32(0x43BF8000); in vpowq_f32()
[all …]
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/arm/neon/
Dvp9_sad_neon.c19 const uint32x4_t vec_l_lo = vaddl_u16(vget_low_u16(vec_lo), in horizontal_long_add_16x8()
21 const uint32x4_t vec_l_hi = vaddl_u16(vget_low_u16(vec_hi), in horizontal_long_add_16x8()
23 const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi); in horizontal_long_add_16x8()
30 const uint32x4_t a = vpaddlq_u16(vec_16x8); in horizontal_add_16x8()
/external/webp/src/dsp/
Denc_neon.c256 uint32x4_t out = { 0, 0, 0, 0 }; in Load4x4()
636 uint32x4_t d0d1 = { 0, 0, 0, 0 }; in Disto4x4()
637 uint32x4_t d2d3 = { 0, 0, 0, 0 }; in Disto4x4()
915 uint32x4_t* const sum) { in AccumulateSSE16()
925 static int SumToInt(uint32x4_t sum) { in SumToInt()
932 uint32x4_t sum = { 0, 0, 0, 0 }; in SSE16x16()
941 uint32x4_t sum = { 0, 0, 0, 0 }; in SSE16x8()
950 uint32x4_t sum = { 0, 0, 0, 0 }; in SSE8x8()
982 const uint32x4_t bias0 = vld1q_u32(&mtx->bias_[offset + 0]); in Quantize()
983 const uint32x4_t bias1 = vld1q_u32(&mtx->bias_[offset + 4]); in Quantize()
[all …]
/external/chromium_org/third_party/libwebp/dsp/
Denc_neon.c256 uint32x4_t out = { 0, 0, 0, 0 }; in Load4x4()
636 uint32x4_t d0d1 = { 0, 0, 0, 0 }; in Disto4x4()
637 uint32x4_t d2d3 = { 0, 0, 0, 0 }; in Disto4x4()
915 uint32x4_t* const sum) { in AccumulateSSE16()
925 static int SumToInt(uint32x4_t sum) { in SumToInt()
932 uint32x4_t sum = { 0, 0, 0, 0 }; in SSE16x16()
941 uint32x4_t sum = { 0, 0, 0, 0 }; in SSE16x8()
950 uint32x4_t sum = { 0, 0, 0, 0 }; in SSE8x8()
982 const uint32x4_t bias0 = vld1q_u32(&mtx->bias_[offset + 0]); in Quantize()
983 const uint32x4_t bias1 = vld1q_u32(&mtx->bias_[offset + 4]); in Quantize()
[all …]
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/neon/
Dsad_neon.c20 uint32x4_t q1; in vp8_sad8x8_neon()
54 uint32x4_t q1; in vp8_sad8x16_neon()
119 uint32x4_t q1; in vp8_sad16x16_neon()
156 uint32x4_t q1; in vp8_sad16x8_neon()

123