/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | vp8_shortwalsh4x4_neon.c | 94 q8s32 = vreinterpretq_s32_u32(q8u32); in vp8_short_walsh4x4_neon() 95 q9s32 = vreinterpretq_s32_u32(q9u32); in vp8_short_walsh4x4_neon() 96 q10s32 = vreinterpretq_s32_u32(q10u32); in vp8_short_walsh4x4_neon() 97 q11s32 = vreinterpretq_s32_u32(q11u32); in vp8_short_walsh4x4_neon()
|
/external/webrtc/webrtc/modules/audio_processing/aecm/ |
D | aecm_core_neon.c | 87 vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); in WebRtcAecm_CalcLinearEnergiesNeon() 88 vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); in WebRtcAecm_CalcLinearEnergiesNeon() 163 vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); in WebRtcAecm_StoreAdaptiveChannelNeon() 164 vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); in WebRtcAecm_StoreAdaptiveChannelNeon()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_neon.h | 151 return vreinterpretq_s32_u32(vceqq_s32(a, b)); 181 return vreinterpretq_s32_u32(vtstq_s32(a, a)); 191 return vreinterpretq_s32_u32(vcgtq_s32(a, b)); 201 return vreinterpretq_s32_u32(vcgeq_s32(a, b)); 211 return vreinterpretq_s32_u32(vcltq_s32(a, b)); 221 return vreinterpretq_s32_u32(vcleq_s32(a, b));
|
/external/XNNPACK/src/requantization/ |
D | precise-neon.c | 81 …const int64x2_t x23_adjusted_product = vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mas… in xnn_requantize_precise__neon() 83 …const int64x2_t y23_adjusted_product = vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mas… in xnn_requantize_precise__neon() 85 …const int64x2_t z23_adjusted_product = vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mas… in xnn_requantize_precise__neon() 87 …const int64x2_t w23_adjusted_product = vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mas… in xnn_requantize_precise__neon()
|
D | q31-neon.c | 48 const int32x4_t vshift_eq_0_mask = vreinterpretq_s32_u32(vceqq_s32(vshift, vmovq_n_s32(0))); in xnn_requantize_q31__neon()
|
/external/XNNPACK/src/q8-avgpool/ |
D | up9-neon.c | 104 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon() 105 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon() 177 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon() 178 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon()
|
D | mp9p8q-neon.c | 184 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 185 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 258 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 259 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon()
|
/external/XNNPACK/src/q8-gavgpool/ |
D | up7-neon.c | 86 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon() 87 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon() 156 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon() 157 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon()
|
D | mp7p7q-neon.c | 162 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 163 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 234 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 235 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon()
|
/external/libaom/libaom/av1/common/arm/ |
D | selfguided_neon.c | 114 store_s32_4x4(src2, buf_stride, vreinterpretq_s32_u32(p0), in calc_ab_fast_internal_common() 115 vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2), in calc_ab_fast_internal_common() 116 vreinterpretq_s32_u32(p3)); in calc_ab_fast_internal_common() 255 store_s32_4x4(dst2, buf_stride, vreinterpretq_s32_u32(p0), in calc_ab_internal_common() 256 vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2), in calc_ab_internal_common() 257 vreinterpretq_s32_u32(p3)); in calc_ab_internal_common() 258 store_s32_4x4(dst2 + 4, buf_stride, vreinterpretq_s32_u32(p4), in calc_ab_internal_common() 259 vreinterpretq_s32_u32(p5), vreinterpretq_s32_u32(p6), in calc_ab_internal_common() 260 vreinterpretq_s32_u32(p7)); in calc_ab_internal_common() 954 *a0 = vreinterpretq_s32_u32( in cross_sum_inp_u16() [all …]
|
D | jnt_convolve_neon.c | 44 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), vmovl_s16(sub_const_vec)); in compute_avg_4x1() 89 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec); in compute_avg_8x1() 90 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec); in compute_avg_8x1() 149 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), const_vec); in compute_avg_4x4() 150 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), const_vec); in compute_avg_4x4() 151 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), const_vec); in compute_avg_4x4() 152 dst3 = vsubq_s32(vreinterpretq_s32_u32(sum3), const_vec); in compute_avg_4x4() 245 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec); in compute_avg_8x4() 246 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), sub_const_vec); in compute_avg_8x4() 247 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec); in compute_avg_8x4() [all …]
|
D | warp_plane_neon.c | 641 int32x4_t tmp32_lo = vreinterpretq_s32_u32(vmovl_u16(tmp16_lo)); in av1_warp_affine_neon() 672 int32x4_t tmp32_hi = vreinterpretq_s32_u32(vmovl_u16(tmp16_hi)); in av1_warp_affine_neon()
|
/external/webp/src/dsp/ |
D | rescaler_neon.c | 41 vreinterpretq_u32_s32(vqrdmulhq_s32(vreinterpretq_s32_u32((A)), (B))) 43 vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32((A)), (B)))
|
/external/skia/include/private/ |
D | SkNx_neon.h | 553 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); 556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); 559 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); 662 return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec); 718 return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); 726 return vreinterpretq_s32_u32(src.fVec);
|
/external/skqp/include/private/ |
D | SkNx_neon.h | 553 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); 556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); 559 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); 662 return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec); 718 return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); 726 return vreinterpretq_s32_u32(src.fVec);
|
/external/gemmlowp/internal/ |
D | pack_neon.h | 111 sums_of_4_cells[cell][i] = vreinterpretq_s32_u32( in Pack() 239 int32x4_t s = vreinterpretq_s32_u32(vmovl_u16(sums_of_16[cell])); in Pack()
|
/external/XNNPACK/src/q8-dwconv/ |
D | up8x9-neon.c | 186 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_q8_dwconv_ukernel_up8x9__neon() 344 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_q8_dwconv_ukernel_up8x9__neon() 491 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_q8_dwconv_ukernel_up8x9__neon() 582 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_q8_dwconv_ukernel_up8x9__neon()
|
/external/libopus/silk/arm/ |
D | NSQ_del_dec_neon_intr.c | 645 …Seed_s32x4 = vreinterpretq_s32_u32( vmlaq_u32( rand_increment_u32x4, vreinterpretq_u32_s32( Seed_s… in silk_noise_shape_quantizer_del_dec_neon() 694 sign_s32x4 = vreinterpretq_s32_u32( vcltq_s32( Seed_s32x4, vdupq_n_s32( 0 ) ) ); in silk_noise_shape_quantizer_del_dec_neon() 830 tmp1_s32x4 = vaddq_s32( tmp1_s32x4, vreinterpretq_s32_u32( t_u32x4 ) ); in silk_noise_shape_quantizer_del_dec_neon() 831 tmp2_s32x4 = vaddq_s32( tmp2_s32x4, vreinterpretq_s32_u32( t_u32x4 ) ); in silk_noise_shape_quantizer_del_dec_neon()
|
/external/skqp/src/core/ |
D | SkBitmapProcState_matrixProcs.cpp | 374 return vreinterpretq_s32_u32(tmp); in repeat4() 389 ret = vshrq_n_s32(vreinterpretq_s32_u32(tmp), 12); in extract_low_bits_repeat_mirror4()
|
/external/libvpx/libvpx/vp9/encoder/arm/neon/ |
D | vp9_quantize_neon.c | 137 return vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_s32(a), 31)); in extract_sign_bit()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | quantize_neon.c | 164 return vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_s32(a), 31)); in extract_sign_bit()
|
D | fdct32x32_neon.c | 653 const int32x4_t a_lo_sign_s32 = vreinterpretq_s32_u32(a_lo_sign_u32); in add_round_shift_s32() 658 const int32x4_t a_hi_sign_s32 = vreinterpretq_s32_u32(a_hi_sign_u32); in add_round_shift_s32()
|
/external/XNNPACK/src/q8-vadd/ |
D | neon.c | 28 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_q8_vadd_ukernel__neon()
|
/external/XNNPACK/src/q8-igemm/ |
D | 4x8-neon.c | 331 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_q8_igemm_ukernel_4x8__neon()
|
/external/XNNPACK/src/q8-gemm/ |
D | 4x8-neon.c | 294 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_q8_gemm_ukernel_4x8__neon()
|