/external/scrypt/lib/crypto/ |
D | crypto_scrypt-neon-salsa208.h | 46 x0x1x10x11 = vcombine_u32(vget_low_u32(x0x1x2x3), vget_high_u32(x8x9x10x11)); in salsa20_8_intrinsic() 47 x4x5x14x15 = vcombine_u32(vget_low_u32(x4x5x6x7), vget_high_u32(x12x13x14x15)); in salsa20_8_intrinsic() 48 x8x9x2x3 = vcombine_u32(vget_low_u32(x8x9x10x11), vget_high_u32(x0x1x2x3)); in salsa20_8_intrinsic() 49 x12x13x6x7 = vcombine_u32(vget_low_u32(x12x13x14x15), vget_high_u32(x4x5x6x7)); in salsa20_8_intrinsic() 111 x0x1x2x3 = vcombine_u32(vget_low_u32(x0x1x10x11),vget_high_u32(x8x9x2x3)); in salsa20_8_intrinsic() 112 x4x5x6x7 = vcombine_u32(vget_low_u32(x4x5x14x15),vget_high_u32(x12x13x6x7)); in salsa20_8_intrinsic() 113 x8x9x10x11 = vcombine_u32(vget_low_u32(x8x9x2x3),vget_high_u32(x0x1x10x11)); in salsa20_8_intrinsic() 114 x12x13x14x15 = vcombine_u32(vget_low_u32(x12x13x6x7),vget_high_u32(x4x5x14x15)); in salsa20_8_intrinsic()
|
/external/XNNPACK/src/qu8-requantization/ |
D | precise-neon.c | 81 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qu8_requantize_precise__neon() 83 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qu8_requantize_precise__neon() 85 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qu8_requantize_precise__neon() 87 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qu8_requantize_precise__neon() 90 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qu8_requantize_precise__neon() 92 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qu8_requantize_precise__neon() 94 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qu8_requantize_precise__neon() 96 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qu8_requantize_precise__neon()
|
/external/XNNPACK/src/qs8-requantization/ |
D | precise-neon.c | 81 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qs8_requantize_precise__neon() 83 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qs8_requantize_precise__neon() 85 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qs8_requantize_precise__neon() 87 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qs8_requantize_precise__neon() 90 …const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_… in xnn_qs8_requantize_precise__neon() 92 …const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_… in xnn_qs8_requantize_precise__neon() 94 …const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_… in xnn_qs8_requantize_precise__neon() 96 …const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_… in xnn_qs8_requantize_precise__neon()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sad4d_neon.c | 96 const uint32x2_t c0 = vpadd_u32(vget_low_u32(b0), vget_high_u32(b0)); in sad_1024_pel_final_neon() 97 const uint32x2_t c1 = vpadd_u32(vget_low_u32(b1), vget_high_u32(b1)); in sad_1024_pel_final_neon() 108 const uint32x2_t b0 = vadd_u32(vget_low_u32(a0), vget_high_u32(a0)); in sad_2048_pel_final_neon() 109 const uint32x2_t b1 = vadd_u32(vget_low_u32(a1), vget_high_u32(a1)); in sad_2048_pel_final_neon() 110 const uint32x2_t b2 = vadd_u32(vget_low_u32(a2), vget_high_u32(a2)); in sad_2048_pel_final_neon() 111 const uint32x2_t b3 = vadd_u32(vget_low_u32(a3), vget_high_u32(a3)); in sad_2048_pel_final_neon() 132 const uint32x2_t c0 = vadd_u32(vget_low_u32(b0), vget_high_u32(b0)); in sad_4096_pel_final_neon() 133 const uint32x2_t c1 = vadd_u32(vget_low_u32(b1), vget_high_u32(b1)); in sad_4096_pel_final_neon() 134 const uint32x2_t c2 = vadd_u32(vget_low_u32(b2), vget_high_u32(b2)); in sad_4096_pel_final_neon() 135 const uint32x2_t c3 = vadd_u32(vget_low_u32(b3), vget_high_u32(b3)); in sad_4096_pel_final_neon()
|
D | sum_squares_neon.c | 32 s1 = vpadd_u32(vget_low_u32(vreinterpretq_u32_s32(s0)), in vpx_sum_squares_2d_i16_neon() 75 s1 = vaddw_u32(s1, vget_low_u32(vreinterpretq_u32_s32(s0))); in vpx_sum_squares_2d_i16_neon()
|
D | transpose_neon.h | 51 b0.val[0] = vcombine_u8(vreinterpret_u8_u32(vget_low_u32(a0)), in vpx_vtrnq_u64_to_u8() 52 vreinterpret_u8_u32(vget_low_u32(a1))); in vpx_vtrnq_u64_to_u8() 60 b0.val[0] = vcombine_u16(vreinterpret_u16_u32(vget_low_u32(a0)), in vpx_vtrnq_u64_to_u16() 61 vreinterpret_u16_u32(vget_low_u32(a1))); in vpx_vtrnq_u64_to_u16() 176 vcombine_u32(vget_low_u32(b0.val[0]), vget_low_u32(b0.val[1])); in transpose_u16_4x4q() 561 *a0 = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); in transpose_u8_8x8() 563 *a2 = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); in transpose_u8_8x8() 565 *a4 = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); in transpose_u8_8x8() 567 *a6 = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); in transpose_u8_8x8()
|
/external/XNNPACK/src/x32-zip/ |
D | xm-neon.c | 49 vst1_u32(output, vget_low_u32(vxy.val[0])); in xnn_x32_zip_xm_ukernel__neon() 50 vst1_u32(output + 2, vget_low_u32(vzw.val[0])); in xnn_x32_zip_xm_ukernel__neon() 57 vst1_u32(output, vget_low_u32(vxy.val[1])); in xnn_x32_zip_xm_ukernel__neon() 58 vst1_u32(output + 2, vget_low_u32(vzw.val[1])); in xnn_x32_zip_xm_ukernel__neon()
|
/external/XNNPACK/src/x32-pad/ |
D | neon.c | 41 vst1_u32(output, vget_low_u32(vfill)); output += 2; in xnn_x32_pad_ukernel__neon() 58 uint32x2_t vtmp_lo = vget_low_u32(vtmp); in xnn_x32_pad_ukernel__neon() 75 vst1_u32(output, vget_low_u32(vfill)); output += 2; in xnn_x32_pad_ukernel__neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_ssd_and_sad_calculator_neon.c | 87 ssd = vadd_u32(vget_low_u32(b), vget_high_u32(b)); in ihevce_ssd_and_sad_calculator_neon() 119 ssd = vadd_u32(vget_low_u32(sqabs_sum), vget_high_u32(sqabs_sum)); in ihevce_ssd_and_sad_calculator_neon() 166 ssd = vadd_u32(vget_low_u32(sqabs_sum_l), vget_high_u32(sqabs_sum_l)); in ihevce_ssd_and_sad_calculator_neon() 225 ssd = vadd_u32(vget_low_u32(sqabs_sum_l), vget_high_u32(sqabs_sum_l)); in ihevce_ssd_and_sad_calculator_neon() 312 ssd = vadd_u32(vget_low_u32(sqabs_sum_l), vget_high_u32(sqabs_sum_l)); in ihevce_ssd_and_sad_calculator_neon()
|
/external/scrypt/patches/ |
D | arm_neon.patch | 52 + x0x1x10x11 = vcombine_u32(vget_low_u32(x0x1x2x3), vget_high_u32(x8x9x10x11)); 53 + x4x5x14x15 = vcombine_u32(vget_low_u32(x4x5x6x7), vget_high_u32(x12x13x14x15)); 54 + x8x9x2x3 = vcombine_u32(vget_low_u32(x8x9x10x11), vget_high_u32(x0x1x2x3)); 55 + x12x13x6x7 = vcombine_u32(vget_low_u32(x12x13x14x15), vget_high_u32(x4x5x6x7)); 117 + x0x1x2x3 = vcombine_u32(vget_low_u32(x0x1x10x11),vget_high_u32(x8x9x2x3)); 118 + x4x5x6x7 = vcombine_u32(vget_low_u32(x4x5x14x15),vget_high_u32(x12x13x6x7)); 119 + x8x9x10x11 = vcombine_u32(vget_low_u32(x8x9x2x3),vget_high_u32(x0x1x10x11)); 120 + x12x13x14x15 = vcombine_u32(vget_low_u32(x12x13x6x7),vget_high_u32(x4x5x14x15));
|
/external/rust/crates/libz-sys/src/zlib-ng/arch/arm/ |
D | adler32_neon.c | 59 adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc)); in NEON_accum32() 60 s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc)); in NEON_accum32()
|
/external/XNNPACK/src/x32-unpool/ |
D | neon.c | 32 vst1_u32(o, vget_low_u32(vfill)); o += 2; in xnn_x32_unpool_ukernel__neon()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | common_neon.h | 460 b0.val[0] = vcombine_u16(vreinterpret_u16_u32(vget_low_u32(a0)), in VtrnqU64() 461 vreinterpret_u16_u32(vget_low_u32(a1))); in VtrnqU64() 628 a[0] = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); in Transpose8x8() 630 a[2] = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); in Transpose8x8() 632 a[4] = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); in Transpose8x8() 634 a[6] = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); in Transpose8x8()
|
D | intrapred_directional_neon.cc | 375 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[0]))); in DirectionalZone3_WxH() 379 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[0]))); in DirectionalZone3_WxH() 384 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[1]))); in DirectionalZone3_WxH() 388 StoreLo4(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[1]))); in DirectionalZone3_WxH() 392 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[0]))); in DirectionalZone3_WxH() 396 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[0]))); in DirectionalZone3_WxH() 401 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d0.val[1]))); in DirectionalZone3_WxH() 405 vst1_u8(dst, vreinterpret_u8_u32(vget_low_u32(d1.val[1]))); in DirectionalZone3_WxH()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | sse_neon.c | 33 uint32x2_t d4 = vadd_u32(vget_low_u32(q8), vget_high_u32(q8)); in sse_W16x1_neon() 34 uint32x2_t d5 = vadd_u32(vget_low_u32(q9), vget_high_u32(q9)); in sse_W16x1_neon() 238 uint32x2_t d4 = vadd_u32(vget_low_u32(q6), vget_high_u32(q6)); in highbd_sse_W8x1_neon() 239 uint32x2_t d5 = vadd_u32(vget_low_u32(q7), vget_high_u32(q7)); in highbd_sse_W8x1_neon()
|
/external/XNNPACK/src/x32-fill/ |
D | neon.c | 41 vst1_u32(output, vget_low_u32(vfill)); output += 2; in xnn_x32_fill_ukernel__neon()
|
/external/libaom/libaom/av1/common/arm/ |
D | transpose_neon.h | 60 *a0 = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); in transpose_u8_8x8() 62 *a2 = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); in transpose_u8_8x8() 64 *a4 = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); in transpose_u8_8x8() 66 *a6 = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); in transpose_u8_8x8()
|
/external/zlib/ |
D | adler32_simd.c | 309 uint32x2_t sum1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1)); in adler32_simd_() 310 uint32x2_t sum2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2)); in adler32_simd_()
|
/external/angle/third_party/zlib/ |
D | adler32_simd.c | 309 uint32x2_t sum1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1)); in adler32_simd_() 310 uint32x2_t sum2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2)); in adler32_simd_()
|
/external/llvm-project/clang/test/CodeGen/ |
D | arm-neon-vget.c | 40 return vget_low_u32(a); in low_u32()
|
D | aarch64-neon-vget-hilo.c | 152 return vget_low_u32(a); in test_vget_low_u32()
|
/external/clang/test/CodeGen/ |
D | arm-neon-vget.c | 40 return vget_low_u32(a); in low_u32()
|
D | aarch64-neon-vget-hilo.c | 152 return vget_low_u32(a); in test_vget_low_u32()
|
/external/webp/src/dsp/ |
D | rescaler_neon.c | 53 const uint64x2_t C0 = vmull_n_u32(vget_low_u32(A0), A); in Interpolate_NEON() 55 const uint64x2_t D0 = vmlal_n_u32(C0, vget_low_u32(B0), B); in Interpolate_NEON()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | fastquantizeb_neon.c | 78 eob_d32 = vmax_u32(vget_low_u32(eob_q32), vget_high_u32(eob_q32)); in vp8_fast_quantize_b_neon()
|