Home
last modified time | relevance | path

Searched refs:vsubq_s32 (Results 1 – 25 of 39) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/arm/
Dhighbd_idct16x16_add_neon.c481 out[8].val[0] = vsubq_s32(step2[7].val[0], step2[8].val[0]); in highbd_idct16x16_add_stage7_dual()
482 out[8].val[1] = vsubq_s32(step2[7].val[1], step2[8].val[1]); in highbd_idct16x16_add_stage7_dual()
483 out[9].val[0] = vsubq_s32(step2[6].val[0], step2[9].val[0]); in highbd_idct16x16_add_stage7_dual()
484 out[9].val[1] = vsubq_s32(step2[6].val[1], step2[9].val[1]); in highbd_idct16x16_add_stage7_dual()
485 out[10].val[0] = vsubq_s32(step2[5].val[0], step2[10].val[0]); in highbd_idct16x16_add_stage7_dual()
486 out[10].val[1] = vsubq_s32(step2[5].val[1], step2[10].val[1]); in highbd_idct16x16_add_stage7_dual()
487 out[11].val[0] = vsubq_s32(step2[4].val[0], step2[11].val[0]); in highbd_idct16x16_add_stage7_dual()
488 out[11].val[1] = vsubq_s32(step2[4].val[1], step2[11].val[1]); in highbd_idct16x16_add_stage7_dual()
489 out[12].val[0] = vsubq_s32(step2[3].val[0], step2[12].val[0]); in highbd_idct16x16_add_stage7_dual()
490 out[12].val[1] = vsubq_s32(step2[3].val[1], step2[12].val[1]); in highbd_idct16x16_add_stage7_dual()
[all …]
Dhighbd_idct_neon.h44 b1 = vsubq_s32(a[0], a[2]); in idct4x4_16_kernel_bd10()
57 a[2] = vsubq_s32(b1, b2); in idct4x4_16_kernel_bd10()
58 a[3] = vsubq_s32(b0, b3); in idct4x4_16_kernel_bd10()
68 b1 = vsubq_s32(a[0], a[2]); in idct4x4_16_kernel_bd12()
95 a[2] = vsubq_s32(b1, b2); in idct4x4_16_kernel_bd12()
96 a[3] = vsubq_s32(b0, b3); in idct4x4_16_kernel_bd12()
207 step2[5] = vsubq_s32(step1[4], step1[5]); in idct8x8_64_half1d_bd10()
208 step2[6] = vsubq_s32(step1[7], step1[6]); in idct8x8_64_half1d_bd10()
214 step1[2] = vsubq_s32(step2[1], step2[2]); in idct8x8_64_half1d_bd10()
215 step1[3] = vsubq_s32(step2[0], step2[3]); in idct8x8_64_half1d_bd10()
[all …]
Dhighbd_idct8x8_add_neon.c99 step2[5] = vsubq_s32(step1[4], step1[5]); in idct8x8_12_half1d_bd10()
100 step2[6] = vsubq_s32(step1[7], step1[6]); in idct8x8_12_half1d_bd10()
106 step1[2] = vsubq_s32(step2[1], step2[2]); in idct8x8_12_half1d_bd10()
107 step1[3] = vsubq_s32(step2[1], step2[3]); in idct8x8_12_half1d_bd10()
120 *io4 = vsubq_s32(step1[3], step2[4]); in idct8x8_12_half1d_bd10()
121 *io5 = vsubq_s32(step1[2], step1[5]); in idct8x8_12_half1d_bd10()
122 *io6 = vsubq_s32(step1[1], step1[6]); in idct8x8_12_half1d_bd10()
123 *io7 = vsubq_s32(step1[0], step2[7]); in idct8x8_12_half1d_bd10()
188 step2[5] = vsubq_s32(step1[4], step1[5]); in idct8x8_12_half1d_bd12()
189 step2[6] = vsubq_s32(step1[7], step1[6]); in idct8x8_12_half1d_bd12()
[all …]
Didct_neon.h74 t.val[0] = vsubq_s32(s0.val[0], s1.val[0]); in highbd_idct_sub_dual()
75 t.val[1] = vsubq_s32(s0.val[1], s1.val[1]); in highbd_idct_sub_dual()
200 t[0] = vsubq_s32(a.val[0], b.val[0]); in sub_multiply_shift_and_narrow_s32_dual()
201 t[1] = vsubq_s32(a.val[1], b.val[1]); in sub_multiply_shift_and_narrow_s32_dual()
303 c[1] = vsubq_s32(c[0], c[2]); in idct4x4_16_kernel_bd8()
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_iht_neon.h47 s[1] = vsubq_s32(s[1], s[4]); in iadst4()
48 s[1] = vsubq_s32(s[1], s[6]); in iadst4()
56 output[3] = vsubq_s32(output[3], s[3]); in iadst4()
71 t1[0] = vsubq_s32(x0_lo, x1_lo); in iadst_half_butterfly_neon()
72 t1[1] = vsubq_s32(x0_hi, x1_hi); in iadst_half_butterfly_neon()
89 t1[0] = vsubq_s32(x0_lo, x1_lo); in iadst_half_butterfly_neg_neon()
90 t1[1] = vsubq_s32(x0_hi, x1_hi); in iadst_half_butterfly_neg_neon()
107 t1[0] = vsubq_s32(x0_lo, x1_lo); in iadst_half_butterfly_pos_neon()
108 t1[1] = vsubq_s32(x0_hi, x1_hi); in iadst_half_butterfly_pos_neon()
190 sum[0] = vsubq_s32(in0[0], in1[0]); in sub_dct_const_round_shift_low_8()
[all …]
Dvp9_highbd_iht8x8_add_neon.c24 const int32x4_t sub = vsubq_s32(x[0], x[1]); in highbd_iadst_half_butterfly_neon()
141 x[2] = vsubq_s32(t[0], t[2]); in highbd_iadst8()
142 x[3] = vsubq_s32(t[1], t[3]); in highbd_iadst8()
Dvp9_highbd_iht16x16_add_neon.c99 out.val[0] = vsubq_s32(in0.val[0], in1.val[0]); in vsubq_s32_dual()
100 out.val[1] = vsubq_s32(in0.val[1], in1.val[1]); in vsubq_s32_dual()
Dvp9_highbd_iht4x4_add_neon.c43 s7 = vsubq_s32(io[0], io[2]); in highbd_iadst4()
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dvp8_shortwalsh4x4_neon.c86 q2s32 = vsubq_s32(q11s32, q10s32); in vp8_short_walsh4x4_neon()
87 q3s32 = vsubq_s32(q8s32, q9s32); in vp8_short_walsh4x4_neon()
99 q0s32 = vsubq_s32(q0s32, q8s32); in vp8_short_walsh4x4_neon()
100 q1s32 = vsubq_s32(q1s32, q9s32); in vp8_short_walsh4x4_neon()
101 q2s32 = vsubq_s32(q2s32, q10s32); in vp8_short_walsh4x4_neon()
102 q3s32 = vsubq_s32(q3s32, q11s32); in vp8_short_walsh4x4_neon()
/external/libxaac/decoder/armv7/
Dixheaacd_fft32x32_ld.s200 VSUB.S32 q7, q9, q6 @c_data6_r=vsubq_s32(b_data4_r,b_data6_i)@
203 VSUB.S32 q6, q4, q5 @c_data4_i=vsubq_s32(b_data4_i,b_data6_r)@
212 VSUB.S32 q5, q8, q1 @c_data2_r=vsubq_s32(b_data0_r,b_data2_r)@
218 VSUB.S32 q0, q0, q2 @c_data2_i=vsubq_s32(b_data0_i,b_data2_i)@
264 VSUB.S32 q2, q7, q15 @c_data3_r=vsubq_s32(b_data1_r,b_data3_r)@
266 VSUB.S32 q15, q14, q10 @c_data3_i=vsubq_s32(b_data1_i,b_data3_i)@
269 VSUB.S32 q10, q3, q12 @c_data5_i=vsubq_s32(b_data5_r,b_data5_i)@
271 VSUB.S32 q12, q13, q1 @c_data7_i=vsubq_s32(b_data7_r,b_data7_i)@
274 VSUB.S32 q13, q14, q12 @b_data7_i=vsubq_s32(c_data5_r,c_data7_i)@
275 VSUB.S32 q12, q3, q10 @b_data5_i=vsubq_s32(c_data7_r,c_data5_i)@
[all …]
/external/libhevc/common/arm/
Dihevc_resi_trans_neon.c143 o_03 = vsubq_s32(temp1, temp4); in ihevc_resi_trans_4x4_neon()
144 o_12 = vsubq_s32(temp3, temp2); in ihevc_resi_trans_4x4_neon()
147 e_0_s_e_1 = vsubq_s32(e_03, e_12); in ihevc_resi_trans_4x4_neon()
288 src2_4x32b = vsubq_s32(src2_4x32b, c3_4x32b); in ihevc_resi_trans_4x4_ttype1_neon()
294 src3_4x32b = vsubq_s32(src3_4x32b, c1_4x32b); in ihevc_resi_trans_4x4_ttype1_neon()
318 c2_4x32b = vsubq_s32(src0_4x32b, src1_4x32b); /* r0-r1 */ in ihevc_resi_trans_4x4_ttype1_neon()
322 src1_4x32b = vsubq_s32(src1_4x32b, src3_4x32b); /* r0+r1-r3 */ in ihevc_resi_trans_4x4_ttype1_neon()
326 src2_4x32b = vsubq_s32(src2_4x32b, c3_4x32b); in ihevc_resi_trans_4x4_ttype1_neon()
332 src3_4x32b = vsubq_s32(src3_4x32b, c1_4x32b); in ihevc_resi_trans_4x4_ttype1_neon()
592 o0_2 = vsubq_s32(a0.val[0], a3.val[1]); /*B0 - B7*/ in ihevc_resi_trans_8x8_neon()
[all …]
Dihevc_resi_trans_neon_32x32.c1163 vsubq_s32(temp_data[0], temp_data[7]); /*R2(9-16) - R2(24-17)*/ in ihevc_resi_trans_32x32_neon()
1165 vsubq_s32(temp_data[1], temp_data[6]); /*R2(1- 8) - R2(32-25)*/ in ihevc_resi_trans_32x32_neon()
1167 vsubq_s32(temp_data[2], temp_data[5]); /*R1(9-16) - R1(24-17)*/ in ihevc_resi_trans_32x32_neon()
1169 vsubq_s32(temp_data[3], temp_data[4]); /*R1(1- 8) - R1(32-25)*/ in ihevc_resi_trans_32x32_neon()
1187 const int32x4_t eo1 = vsubq_s32(e1, e2); /*E2(1- 8) - E2(16-9)*/ in ihevc_resi_trans_32x32_neon()
1188 const int32x4_t eo0 = vsubq_s32(e0, e3); /*E1(1- 8) - E1(16-9)*/ in ihevc_resi_trans_32x32_neon()
1191 const int32x4_t eeo = vsubq_s32(ee0, ee1); //Q5 in ihevc_resi_trans_32x32_neon()
/external/XNNPACK/src/requantization/
Dfp32-neon.c95 …const int32x4_t x_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(x_clamped, vfmagic)), vimagic… in xnn_requantize_fp32__neon()
96 …const int32x4_t y_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(y_clamped, vfmagic)), vimagic… in xnn_requantize_fp32__neon()
97 …const int32x4_t z_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(z_clamped, vfmagic)), vimagic… in xnn_requantize_fp32__neon()
98 …const int32x4_t w_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(w_clamped, vfmagic)), vimagic… in xnn_requantize_fp32__neon()
/external/libopus/silk/arm/
DNSQ_del_dec_neon_intr.c656 tmp1_s32x4 = vsubq_s32( vld1q_s32( psDelDec->sAR2_Q14[ 1 ] ), tmp2_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
665 tmp2_s32x4 = vsubq_s32( vld1q_s32( psDelDec->sAR2_Q14[ j + 0 ] ), tmp1_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
670 tmp1_s32x4 = vsubq_s32( vld1q_s32( psDelDec->sAR2_Q14[ j + 1 ] ), tmp2_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
689 tmp1_s32x4 = vsubq_s32( tmp2_s32x4, tmp1_s32x4 ); /* Q13 */ in silk_noise_shape_quantizer_del_dec_neon()
691 … tmp1_s32x4 = vsubq_s32( vdupq_n_s32( x_Q10[ i ] ), tmp1_s32x4 ); /* residual error Q10 */ in silk_noise_shape_quantizer_del_dec_neon()
696 tmp1_s32x4 = vsubq_s32( tmp1_s32x4, sign_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
772 exc_Q14_s32x4 = vsubq_s32( exc_Q14_s32x4, sign_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
779 tmp1_s32x4 = vsubq_s32( xq_Q14_s32x4, vshlq_n_s32( vdupq_n_s32( x_Q10[ i ] ), 4 ) ); in silk_noise_shape_quantizer_del_dec_neon()
781 sLF_AR_shp_Q14_s32x4 = vsubq_s32( tmp1_s32x4, n_AR_Q14_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
782 … vst1q_s32( psSampleState[ 0 ].sLTP_shp_Q14, vsubq_s32( sLF_AR_shp_Q14_s32x4, n_LF_Q14_s32x4 ) ); in silk_noise_shape_quantizer_del_dec_neon()
[all …]
/external/libaom/libaom/av1/common/arm/
Djnt_convolve_neon.c44 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), vmovl_s16(sub_const_vec)); in compute_avg_4x1()
89 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec); in compute_avg_8x1()
90 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec); in compute_avg_8x1()
149 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), const_vec); in compute_avg_4x4()
150 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), const_vec); in compute_avg_4x4()
151 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), const_vec); in compute_avg_4x4()
152 dst3 = vsubq_s32(vreinterpretq_s32_u32(sum3), const_vec); in compute_avg_4x4()
245 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec); in compute_avg_8x4()
246 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), sub_const_vec); in compute_avg_8x4()
247 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec); in compute_avg_8x4()
[all …]
Dconvolve_neon.h48 sum0 = vsubq_s32(sum0, round_vec); in wiener_convolve8_vert_4x8()
49 sum1 = vsubq_s32(sum1, round_vec); in wiener_convolve8_vert_4x8()
Dselfguided_neon.c290 r789 = vsubq_s32(r6789, d6); in boxsum2_square_sum_calc()
921 res = vsubq_s32(vshlq_n_s32(vaddq_s32(fours, threes), 2), threes); in cross_sum_inp_s32()
1539 f00 = vsubq_s32(f00, u0); in av1_apply_selfguided_restoration_neon()
1540 f10 = vsubq_s32(f10, u4); in av1_apply_selfguided_restoration_neon()
1550 f00 = vsubq_s32(f00, u0); in av1_apply_selfguided_restoration_neon()
1551 f10 = vsubq_s32(f10, u4); in av1_apply_selfguided_restoration_neon()
/external/XNNPACK/src/math/
Dexp-neonfma-p5.c63 veo = vsubq_s32(veo, ven); in xnn_math_f32_exp__neonfma_p5()
Dexp-neonfma-lut64-p2.c81 veo = vsubq_s32(veo, ven); in xnn_math_f32_exp__neonfma_lut64_p2()
/external/libhevc/encoder/arm/
Dihevce_hme_utils_neon.c709 dst0_4x32b = vsubq_s32(src0_4x32b, off_4x32b); in hme_get_wt_inp_ctb_neon()
710 dst1_4x32b = vsubq_s32(src1_4x32b, off_4x32b); in hme_get_wt_inp_ctb_neon()
711 dst2_4x32b = vsubq_s32(src2_4x32b, off_4x32b); in hme_get_wt_inp_ctb_neon()
712 dst3_4x32b = vsubq_s32(src3_4x32b, off_4x32b); in hme_get_wt_inp_ctb_neon()
/external/webp/src/dsp/
Denc_neon.c520 tmp0.val[2] = vsubq_s32(a3, a2); in FTransformWHT_NEON()
521 tmp0.val[3] = vsubq_s32(a0, a1); in FTransformWHT_NEON()
531 const int32x4_t a2 = vsubq_s32(tmp1.val[1], tmp1.val[3]); in FTransformWHT_NEON()
532 const int32x4_t a3 = vsubq_s32(tmp1.val[0], tmp1.val[2]); in FTransformWHT_NEON()
Ddec_neon.c1222 tmp.val[2] = vsubq_s32(a0, a1); in TransformWHT_NEON()
1223 tmp.val[3] = vsubq_s32(a3, a2); in TransformWHT_NEON()
1233 const int32x4_t a2 = vsubq_s32(tmp.val[1], tmp.val[2]); in TransformWHT_NEON()
1234 const int32x4_t a3 = vsubq_s32(dc, tmp.val[3]); in TransformWHT_NEON()
1238 tmp.val[2] = vsubq_s32(a0, a1); in TransformWHT_NEON()
1239 tmp.val[3] = vsubq_s32(a3, a2); in TransformWHT_NEON()
/external/webrtc/webrtc/modules/audio_coding/codecs/isac/fix/source/
Dtransform_neon.c300 int32x4_t outr0 = vsubq_s32(xr, yi); in TransformAndFindMaxNeon()
303 int32x4_t outi1 = vsubq_s32(yr, xi); in TransformAndFindMaxNeon()
/external/libopus/silk/fixed/arm/
Dwarped_autocorrelation_FIX_neon_intr.c55 int32x4_t t_s32x4 = vsubq_s32( state_QS0_s32x4, state_QS0_1_s32x4 ); in calc_state()
/external/gemmlowp/fixedpoint/
Dfixedpoint_neon.h89 return vsubq_s32(a, b);

12