Home
last modified time | relevance | path

Searched refs:vget_low_u32 (Results 1 – 25 of 44) sorted by relevance

12

/external/scrypt/lib/crypto/
Dcrypto_scrypt-neon-salsa208.h46 x0x1x10x11 = vcombine_u32(vget_low_u32(x0x1x2x3), vget_high_u32(x8x9x10x11)); in salsa20_8_intrinsic()
47 x4x5x14x15 = vcombine_u32(vget_low_u32(x4x5x6x7), vget_high_u32(x12x13x14x15)); in salsa20_8_intrinsic()
48 x8x9x2x3 = vcombine_u32(vget_low_u32(x8x9x10x11), vget_high_u32(x0x1x2x3)); in salsa20_8_intrinsic()
49 x12x13x6x7 = vcombine_u32(vget_low_u32(x12x13x14x15), vget_high_u32(x4x5x6x7)); in salsa20_8_intrinsic()
111 x0x1x2x3 = vcombine_u32(vget_low_u32(x0x1x10x11),vget_high_u32(x8x9x2x3)); in salsa20_8_intrinsic()
112 x4x5x6x7 = vcombine_u32(vget_low_u32(x4x5x14x15),vget_high_u32(x12x13x6x7)); in salsa20_8_intrinsic()
113 x8x9x10x11 = vcombine_u32(vget_low_u32(x8x9x2x3),vget_high_u32(x0x1x10x11)); in salsa20_8_intrinsic()
114 x12x13x14x15 = vcombine_u32(vget_low_u32(x12x13x6x7),vget_high_u32(x4x5x14x15)); in salsa20_8_intrinsic()
/external/XNNPACK/src/qu8-requantization/
Dprecise-neon.c81 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qu8_requantize_precise__neon()
83 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qu8_requantize_precise__neon()
85 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qu8_requantize_precise__neon()
87 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qu8_requantize_precise__neon()
90 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qu8_requantize_precise__neon()
92 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qu8_requantize_precise__neon()
94 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qu8_requantize_precise__neon()
96 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qu8_requantize_precise__neon()
/external/XNNPACK/src/qs8-requantization/
Dprecise-neon.c81 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qs8_requantize_precise__neon()
83 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qs8_requantize_precise__neon()
85 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qs8_requantize_precise__neon()
87 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qs8_requantize_precise__neon()
90 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qs8_requantize_precise__neon()
92 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qs8_requantize_precise__neon()
94 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qs8_requantize_precise__neon()
96 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qs8_requantize_precise__neon()
/external/libvpx/libvpx/vpx_dsp/arm/
Dsad4d_neon.c96 const uint32x2_t c0 = vpadd_u32(vget_low_u32(b0), vget_high_u32(b0)); in sad_1024_pel_final_neon()
97 const uint32x2_t c1 = vpadd_u32(vget_low_u32(b1), vget_high_u32(b1)); in sad_1024_pel_final_neon()
108 const uint32x2_t b0 = vadd_u32(vget_low_u32(a0), vget_high_u32(a0)); in sad_2048_pel_final_neon()
109 const uint32x2_t b1 = vadd_u32(vget_low_u32(a1), vget_high_u32(a1)); in sad_2048_pel_final_neon()
110 const uint32x2_t b2 = vadd_u32(vget_low_u32(a2), vget_high_u32(a2)); in sad_2048_pel_final_neon()
111 const uint32x2_t b3 = vadd_u32(vget_low_u32(a3), vget_high_u32(a3)); in sad_2048_pel_final_neon()
132 const uint32x2_t c0 = vadd_u32(vget_low_u32(b0), vget_high_u32(b0)); in sad_4096_pel_final_neon()
133 const uint32x2_t c1 = vadd_u32(vget_low_u32(b1), vget_high_u32(b1)); in sad_4096_pel_final_neon()
134 const uint32x2_t c2 = vadd_u32(vget_low_u32(b2), vget_high_u32(b2)); in sad_4096_pel_final_neon()
135 const uint32x2_t c3 = vadd_u32(vget_low_u32(b3), vget_high_u32(b3)); in sad_4096_pel_final_neon()
Dsum_squares_neon.c32 s1 = vpadd_u32(vget_low_u32(vreinterpretq_u32_s32(s0)), in vpx_sum_squares_2d_i16_neon()
75 s1 = vaddw_u32(s1, vget_low_u32(vreinterpretq_u32_s32(s0))); in vpx_sum_squares_2d_i16_neon()
Dtranspose_neon.h51 b0.val[0] = vcombine_u8(vreinterpret_u8_u32(vget_low_u32(a0)), in vpx_vtrnq_u64_to_u8()
52 vreinterpret_u8_u32(vget_low_u32(a1))); in vpx_vtrnq_u64_to_u8()
60 b0.val[0] = vcombine_u16(vreinterpret_u16_u32(vget_low_u32(a0)), in vpx_vtrnq_u64_to_u16()
61 vreinterpret_u16_u32(vget_low_u32(a1))); in vpx_vtrnq_u64_to_u16()
176 vcombine_u32(vget_low_u32(b0.val[0]), vget_low_u32(b0.val[1])); in transpose_u16_4x4q()
561 *a0 = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); in transpose_u8_8x8()
563 *a2 = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); in transpose_u8_8x8()
565 *a4 = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); in transpose_u8_8x8()
567 *a6 = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); in transpose_u8_8x8()
/external/XNNPACK/src/x32-zip/
Dxm-neon.c49 vst1_u32(output, vget_low_u32(vxy.val[0])); in xnn_x32_zip_xm_ukernel__neon()
50 vst1_u32(output + 2, vget_low_u32(vzw.val[0])); in xnn_x32_zip_xm_ukernel__neon()
57 vst1_u32(output, vget_low_u32(vxy.val[1])); in xnn_x32_zip_xm_ukernel__neon()
58 vst1_u32(output + 2, vget_low_u32(vzw.val[1])); in xnn_x32_zip_xm_ukernel__neon()
/external/XNNPACK/src/x32-pad/
Dneon.c41 vst1_u32(output, vget_low_u32(vfill)); output += 2; in xnn_x32_pad_ukernel__neon()
58 uint32x2_t vtmp_lo = vget_low_u32(vtmp); in xnn_x32_pad_ukernel__neon()
75 vst1_u32(output, vget_low_u32(vfill)); output += 2; in xnn_x32_pad_ukernel__neon()
/external/libhevc/encoder/arm/
Dihevce_ssd_and_sad_calculator_neon.c87 ssd = vadd_u32(vget_low_u32(b), vget_high_u32(b)); in ihevce_ssd_and_sad_calculator_neon()
119 ssd = vadd_u32(vget_low_u32(sqabs_sum), vget_high_u32(sqabs_sum)); in ihevce_ssd_and_sad_calculator_neon()
166 ssd = vadd_u32(vget_low_u32(sqabs_sum_l), vget_high_u32(sqabs_sum_l)); in ihevce_ssd_and_sad_calculator_neon()
225 ssd = vadd_u32(vget_low_u32(sqabs_sum_l), vget_high_u32(sqabs_sum_l)); in ihevce_ssd_and_sad_calculator_neon()
312 ssd = vadd_u32(vget_low_u32(sqabs_sum_l), vget_high_u32(sqabs_sum_l)); in ihevce_ssd_and_sad_calculator_neon()
/external/scrypt/patches/
Darm_neon.patch52 + x0x1x10x11 = vcombine_u32(vget_low_u32(x0x1x2x3), vget_high_u32(x8x9x10x11));
53 + x4x5x14x15 = vcombine_u32(vget_low_u32(x4x5x6x7), vget_high_u32(x12x13x14x15));
54 + x8x9x2x3 = vcombine_u32(vget_low_u32(x8x9x10x11), vget_high_u32(x0x1x2x3));
55 + x12x13x6x7 = vcombine_u32(vget_low_u32(x12x13x14x15), vget_high_u32(x4x5x6x7));
117 + x0x1x2x3 = vcombine_u32(vget_low_u32(x0x1x10x11),vget_high_u32(x8x9x2x3));
118 + x4x5x6x7 = vcombine_u32(vget_low_u32(x4x5x14x15),vget_high_u32(x12x13x6x7));
119 + x8x9x10x11 = vcombine_u32(vget_low_u32(x8x9x2x3),vget_high_u32(x0x1x10x11));
120 + x12x13x14x15 = vcombine_u32(vget_low_u32(x12x13x6x7),vget_high_u32(x4x5x14x15));
/external/rust/crates/libz-sys/src/zlib-ng/arch/arm/
Dadler32_neon.c59 adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc)); in NEON_accum32()
60 s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc)); in NEON_accum32()
/external/XNNPACK/src/x32-unpool/
Dneon.c32 vst1_u32(o, vget_low_u32(vfill)); o += 2; in xnn_x32_unpool_ukernel__neon()
/external/libgav1/libgav1/src/dsp/arm/
Dcommon_neon.h460 b0.val[0] = vcombine_u16(vreinterpret_u16_u32(vget_low_u32(a0)), in VtrnqU64()
461 vreinterpret_u16_u32(vget_low_u32(a1))); in VtrnqU64()
628 a[0] = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); in Transpose8x8()
630 a[2] = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); in Transpose8x8()
632 a[4] = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); in Transpose8x8()
634 a[6] = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); in Transpose8x8()
Dintrapred_directional_neon.cc375 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[0]))); in DirectionalZone3_WxH()
379 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[0]))); in DirectionalZone3_WxH()
384 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[1]))); in DirectionalZone3_WxH()
388 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[1]))); in DirectionalZone3_WxH()
392 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[0]))); in DirectionalZone3_WxH()
396 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[0]))); in DirectionalZone3_WxH()
401 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[1]))); in DirectionalZone3_WxH()
405 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[1]))); in DirectionalZone3_WxH()
/external/libaom/libaom/aom_dsp/arm/
Dsse_neon.c33 uint32x2_t d4 = vadd_u32(vget_low_u32(q8), vget_high_u32(q8)); in sse_W16x1_neon()
34 uint32x2_t d5 = vadd_u32(vget_low_u32(q9), vget_high_u32(q9)); in sse_W16x1_neon()
238 uint32x2_t d4 = vadd_u32(vget_low_u32(q6), vget_high_u32(q6)); in highbd_sse_W8x1_neon()
239 uint32x2_t d5 = vadd_u32(vget_low_u32(q7), vget_high_u32(q7)); in highbd_sse_W8x1_neon()
/external/XNNPACK/src/x32-fill/
Dneon.c41 vst1_u32(output, vget_low_u32(vfill)); output += 2; in xnn_x32_fill_ukernel__neon()
/external/libaom/libaom/av1/common/arm/
Dtranspose_neon.h60 *a0 = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); in transpose_u8_8x8()
62 *a2 = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); in transpose_u8_8x8()
64 *a4 = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); in transpose_u8_8x8()
66 *a6 = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); in transpose_u8_8x8()
/external/zlib/
Dadler32_simd.c309 uint32x2_t sum1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1)); in adler32_simd_()
310 uint32x2_t sum2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2)); in adler32_simd_()
/external/angle/third_party/zlib/
Dadler32_simd.c309 uint32x2_t sum1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1)); in adler32_simd_()
310 uint32x2_t sum2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2)); in adler32_simd_()
/external/llvm-project/clang/test/CodeGen/
Darm-neon-vget.c40 return vget_low_u32(a); in low_u32()
Daarch64-neon-vget-hilo.c152 return vget_low_u32(a); in test_vget_low_u32()
/external/clang/test/CodeGen/
Darm-neon-vget.c40 return vget_low_u32(a); in low_u32()
Daarch64-neon-vget-hilo.c152 return vget_low_u32(a); in test_vget_low_u32()
/external/webp/src/dsp/
Drescaler_neon.c53 const uint64x2_t C0 = vmull_n_u32(vget_low_u32(A0), A); in Interpolate_NEON()
55 const uint64x2_t D0 = vmlal_n_u32(C0, vget_low_u32(B0), B); in Interpolate_NEON()
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dfastquantizeb_neon.c78 eob_d32 = vmax_u32(vget_low_u32(eob_q32), vget_high_u32(eob_q32)); in vp8_fast_quantize_b_neon()

12