/external/libjpeg-turbo/simd/arm/ |
D | jquanti-neon.c | 140 int32x4_t row0_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row0), in jsimd_quantize_neon() 142 int32x4_t row0_h = vreinterpretq_s32_u32(vmull_u16(vget_high_u16(abs_row0), in jsimd_quantize_neon() 144 int32x4_t row1_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row1), in jsimd_quantize_neon() 146 int32x4_t row1_h = vreinterpretq_s32_u32(vmull_u16(vget_high_u16(abs_row1), in jsimd_quantize_neon() 148 int32x4_t row2_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row2), in jsimd_quantize_neon() 150 int32x4_t row2_h = vreinterpretq_s32_u32(vmull_u16(vget_high_u16(abs_row2), in jsimd_quantize_neon() 152 int32x4_t row3_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row3), in jsimd_quantize_neon() 154 int32x4_t row3_h = vreinterpretq_s32_u32(vmull_u16(vget_high_u16(abs_row3), in jsimd_quantize_neon()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | vp8_shortwalsh4x4_neon.c | 94 q8s32 = vreinterpretq_s32_u32(q8u32); in vp8_short_walsh4x4_neon() 95 q9s32 = vreinterpretq_s32_u32(q9u32); in vp8_short_walsh4x4_neon() 96 q10s32 = vreinterpretq_s32_u32(q10u32); in vp8_short_walsh4x4_neon() 97 q11s32 = vreinterpretq_s32_u32(q11u32); in vp8_short_walsh4x4_neon()
|
/external/webrtc/modules/audio_processing/aecm/ |
D | aecm_core_neon.cc | 79 vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); in WebRtcAecm_CalcLinearEnergiesNeon() 80 vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); in WebRtcAecm_CalcLinearEnergiesNeon() 155 vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); in WebRtcAecm_StoreAdaptiveChannelNeon() 156 vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); in WebRtcAecm_StoreAdaptiveChannelNeon()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | distance_weighted_blend_neon.cc | 205 const int32x4_t res_lo = vsubq_s32(vreinterpretq_s32_u32(blended_lo), offset); in ComputeWeightedAverage8() 206 const int32x4_t res_hi = vsubq_s32(vreinterpretq_s32_u32(blended_hi), offset); in ComputeWeightedAverage8() 225 const int32x4_t res0 = vsubq_s32(vreinterpretq_s32_u32(blended0), offset); in ComputeWeightedAverage8() 226 const int32x4_t res1 = vsubq_s32(vreinterpretq_s32_u32(blended1), offset); in ComputeWeightedAverage8() 231 const int32x4_t res2 = vsubq_s32(vreinterpretq_s32_u32(blended2), offset); in ComputeWeightedAverage8() 232 const int32x4_t res3 = vsubq_s32(vreinterpretq_s32_u32(blended3), offset); in ComputeWeightedAverage8()
|
D | average_blend_neon.cc | 153 vsubq_s32(vreinterpretq_s32_u32(pred_lo), compound_offset); in AverageBlend8Row() 155 vsubq_s32(vreinterpretq_s32_u32(pred_hi), compound_offset); in AverageBlend8Row()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_neon.h | 151 return vreinterpretq_s32_u32(vceqq_s32(a, b)); 181 return vreinterpretq_s32_u32(vtstq_s32(a, a)); 191 return vreinterpretq_s32_u32(vcgtq_s32(a, b)); 201 return vreinterpretq_s32_u32(vcgeq_s32(a, b)); 211 return vreinterpretq_s32_u32(vcltq_s32(a, b)); 221 return vreinterpretq_s32_u32(vcleq_s32(a, b));
|
/external/XNNPACK/src/qs8-gavgpool/gen/ |
D | 7x-minmax-neon-c32-acc2.c | 133 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 134 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 135 const int32x4_t vsgnacc89AB = vreinterpretq_s32_u32(vcltq_s32(vacc89AB, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 136 const int32x4_t vsgnaccCDEF = vreinterpretq_s32_u32(vcltq_s32(vaccCDEF, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 137 const int32x4_t vsgnaccGHIJ = vreinterpretq_s32_u32(vcltq_s32(vaccGHIJ, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 138 const int32x4_t vsgnaccKLMN = vreinterpretq_s32_u32(vcltq_s32(vaccKLMN, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 139 const int32x4_t vsgnaccOPQR = vreinterpretq_s32_u32(vcltq_s32(vaccOPQR, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 140 const int32x4_t vsgnaccSTUV = vreinterpretq_s32_u32(vcltq_s32(vaccSTUV, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 299 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2() 300 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c32_acc2()
|
D | 7x-minmax-neon-c24-acc2.c | 118 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() 119 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() 120 const int32x4_t vsgnacc89AB = vreinterpretq_s32_u32(vcltq_s32(vacc89AB, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() 121 const int32x4_t vsgnaccCDEF = vreinterpretq_s32_u32(vcltq_s32(vaccCDEF, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() 122 const int32x4_t vsgnaccGHIJ = vreinterpretq_s32_u32(vcltq_s32(vaccGHIJ, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() 123 const int32x4_t vsgnaccKLMN = vreinterpretq_s32_u32(vcltq_s32(vaccKLMN, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() 256 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2() 257 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c24_acc2()
|
D | 7x-minmax-neon-c16-acc2.c | 103 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2() 104 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2() 105 const int32x4_t vsgnacc89AB = vreinterpretq_s32_u32(vcltq_s32(vacc89AB, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2() 106 const int32x4_t vsgnaccCDEF = vreinterpretq_s32_u32(vcltq_s32(vaccCDEF, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2() 208 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2() 209 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2()
|
D | 7x-minmax-neon-c8-acc2.c | 88 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2() 89 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2() 165 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2() 166 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c8_acc2()
|
D | 7p7x-minmax-neon-c16-acc2.c | 223 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() 224 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() 225 const int32x4_t vsgnacc89AB = vreinterpretq_s32_u32(vcltq_s32(vacc89AB, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() 226 const int32x4_t vsgnaccCDEF = vreinterpretq_s32_u32(vcltq_s32(vaccCDEF, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() 331 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2() 332 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c16_acc2()
|
D | 7p7x-minmax-neon-c32-acc2.c | 390 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 391 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 392 const int32x4_t vsgnacc89AB = vreinterpretq_s32_u32(vcltq_s32(vacc89AB, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 393 const int32x4_t vsgnaccCDEF = vreinterpretq_s32_u32(vcltq_s32(vaccCDEF, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 394 const int32x4_t vsgnaccGHIJ = vreinterpretq_s32_u32(vcltq_s32(vaccGHIJ, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 395 const int32x4_t vsgnaccKLMN = vreinterpretq_s32_u32(vcltq_s32(vaccKLMN, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 396 const int32x4_t vsgnaccOPQR = vreinterpretq_s32_u32(vcltq_s32(vaccOPQR, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 397 const int32x4_t vsgnaccSTUV = vreinterpretq_s32_u32(vcltq_s32(vaccSTUV, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 559 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2() 560 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c32_acc2()
|
D | 7p7x-minmax-neon-c24-acc2.c | 337 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 338 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 339 const int32x4_t vsgnacc89AB = vreinterpretq_s32_u32(vcltq_s32(vacc89AB, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 340 const int32x4_t vsgnaccCDEF = vreinterpretq_s32_u32(vcltq_s32(vaccCDEF, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 341 const int32x4_t vsgnaccGHIJ = vreinterpretq_s32_u32(vcltq_s32(vaccGHIJ, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 342 const int32x4_t vsgnaccKLMN = vreinterpretq_s32_u32(vcltq_s32(vaccKLMN, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 478 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2() 479 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c24_acc2()
|
D | 7p7x-minmax-neon-c8-acc2.c | 170 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2() 171 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2() 250 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2() 251 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0))); in xnn_qs8_gavgpool_minmax_ukernel_7p7x__neon_c8_acc2()
|
/external/XNNPACK/src/qu8-requantization/ |
D | precise-neon.c | 82 …const int64x2_t x23_adjusted_product = vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mas… in xnn_qu8_requantize_precise__neon() 84 …const int64x2_t y23_adjusted_product = vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mas… in xnn_qu8_requantize_precise__neon() 86 …const int64x2_t z23_adjusted_product = vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mas… in xnn_qu8_requantize_precise__neon() 88 …const int64x2_t w23_adjusted_product = vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mas… in xnn_qu8_requantize_precise__neon()
|
/external/XNNPACK/src/qs8-requantization/ |
D | precise-neon.c | 82 …const int64x2_t x23_adjusted_product = vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mas… in xnn_qs8_requantize_precise__neon() 84 …const int64x2_t y23_adjusted_product = vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mas… in xnn_qs8_requantize_precise__neon() 86 …const int64x2_t z23_adjusted_product = vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mas… in xnn_qs8_requantize_precise__neon() 88 …const int64x2_t w23_adjusted_product = vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mas… in xnn_qs8_requantize_precise__neon()
|
/external/XNNPACK/src/qu8-gavgpool/ |
D | 7x-minmax-neon-c8.c | 86 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 87 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 156 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 157 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8()
|
D | 7p7x-minmax-neon-c8.c | 162 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 163 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 234 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 235 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8()
|
/external/libaom/libaom/av1/common/arm/ |
D | selfguided_neon.c | 114 store_s32_4x4(src2, buf_stride, vreinterpretq_s32_u32(p0), in calc_ab_fast_internal_common() 115 vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2), in calc_ab_fast_internal_common() 116 vreinterpretq_s32_u32(p3)); in calc_ab_fast_internal_common() 255 store_s32_4x4(dst2, buf_stride, vreinterpretq_s32_u32(p0), in calc_ab_internal_common() 256 vreinterpretq_s32_u32(p1), vreinterpretq_s32_u32(p2), in calc_ab_internal_common() 257 vreinterpretq_s32_u32(p3)); in calc_ab_internal_common() 258 store_s32_4x4(dst2 + 4, buf_stride, vreinterpretq_s32_u32(p4), in calc_ab_internal_common() 259 vreinterpretq_s32_u32(p5), vreinterpretq_s32_u32(p6), in calc_ab_internal_common() 260 vreinterpretq_s32_u32(p7)); in calc_ab_internal_common() 954 *a0 = vreinterpretq_s32_u32( in cross_sum_inp_u16() [all …]
|
D | jnt_convolve_neon.c | 44 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), vmovl_s16(sub_const_vec)); in compute_avg_4x1() 89 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec); in compute_avg_8x1() 90 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec); in compute_avg_8x1() 149 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), const_vec); in compute_avg_4x4() 150 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), const_vec); in compute_avg_4x4() 151 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), const_vec); in compute_avg_4x4() 152 dst3 = vsubq_s32(vreinterpretq_s32_u32(sum3), const_vec); in compute_avg_4x4() 245 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec); in compute_avg_8x4() 246 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), sub_const_vec); in compute_avg_8x4() 247 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec); in compute_avg_8x4() [all …]
|
/external/XNNPACK/src/qu8-avgpool/ |
D | 9x-minmax-neon-c8.c | 141 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 142 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 214 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 215 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8()
|
D | 9p8x-minmax-neon-c8.c | 285 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 286 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 359 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 360 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8()
|
/external/webp/src/dsp/ |
D | rescaler_neon.c | 41 vreinterpretq_u32_s32(vqrdmulhq_s32(vreinterpretq_s32_u32((A)), (B))) 43 vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32((A)), (B)))
|
/external/skia/include/private/ |
D | SkNx_neon.h | 531 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); 534 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); 537 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); 640 return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec); 696 return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); 704 return vreinterpretq_s32_u32(src.fVec);
|
/external/skqp/include/private/ |
D | SkNx_neon.h | 553 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); 556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); 559 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); 662 return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec); 718 return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); 726 return vreinterpretq_s32_u32(src.fVec);
|