Home
last modified time | relevance | path

Searched refs:x3_16x4 (Results 1 – 5 of 5) sorted by relevance

/external/libavc/common/arm/svc/
Disvc_resi_trans_quant_neon.c71 int16x4_t x0_16x4, x1_16x4, x2_16x4, x3_16x4; in isvc_resi_trans_quant_4x4_neon() local
140 x3_16x4 = vreinterpret_s16_s32(x1_32x2x2.val[1]); in isvc_resi_trans_quant_4x4_neon()
142 xx0_16x4 = vadd_s16(x0_16x4, x3_16x4); in isvc_resi_trans_quant_4x4_neon()
145 xx3_16x4 = vsub_s16(x0_16x4, x3_16x4); in isvc_resi_trans_quant_4x4_neon()
153 x3_16x4 = vsub_s16(xx3_16x4, temp0_16x4); in isvc_resi_trans_quant_4x4_neon()
164 xx1_16x4x2 = vtrn_s16(x2_16x4, x3_16x4); in isvc_resi_trans_quant_4x4_neon()
173 x3_16x4 = vreinterpret_s16_s32(x1_32x2x2.val[1]); in isvc_resi_trans_quant_4x4_neon()
177 xx0_16x4 = vadd_s16(x0_16x4, x3_16x4); in isvc_resi_trans_quant_4x4_neon()
180 xx3_16x4 = vsub_s16(x0_16x4, x3_16x4); in isvc_resi_trans_quant_4x4_neon()
188 x3_16x4 = vsub_s16(xx3_16x4, temp0_16x4); in isvc_resi_trans_quant_4x4_neon()
[all …]
Disvc_iquant_itrans_recon_neon.c66 int16x4_t x0_16x4, x1_16x4, x2_16x4, x3_16x4; in isvc_iquant_itrans_recon_4x4_neon() local
129 x3_16x4 = vadd_s16(q1_16x4, rq3_16x4); in isvc_iquant_itrans_recon_4x4_neon()
131 xx0_16x4 = vadd_s16(x0_16x4, x3_16x4); in isvc_iquant_itrans_recon_4x4_neon()
134 xx3_16x4 = vsub_s16(x0_16x4, x3_16x4); in isvc_iquant_itrans_recon_4x4_neon()
147 x3_16x4 = vreinterpret_s16_s32(x1_32x2x2.val[1]); in isvc_iquant_itrans_recon_4x4_neon()
153 vst1_s16(pi2_tmp + 12, x3_16x4); in isvc_iquant_itrans_recon_4x4_neon()
157 rq3_16x4 = vshr_n_s16(x3_16x4, 1); in isvc_iquant_itrans_recon_4x4_neon()
161 xx2_16x4 = vsub_s16(rq1_16x4, x3_16x4); in isvc_iquant_itrans_recon_4x4_neon()
167 x3_16x4 = vsub_s16(xx0_16x4, xx3_16x4); in isvc_iquant_itrans_recon_4x4_neon()
172 x3_16x4 = vrshr_n_s16(x3_16x4, 6); in isvc_iquant_itrans_recon_4x4_neon()
[all …]
/external/libavc/decoder/arm/svc/
Disvcd_iquant_itrans_neon.c273 int16x4_t x0_16x4, x1_16x4, x2_16x4, x3_16x4; in isvcd_iquant_itrans_chroma_4x4_neonintr() local
332 x3_16x4 = vadd_s16(q1_16x4, rq3_16x4); // x2 = q1 + q3>>1 in isvcd_iquant_itrans_chroma_4x4_neonintr()
334 xx0_16x4 = vadd_s16(x0_16x4, x3_16x4); // x0+x3 in isvcd_iquant_itrans_chroma_4x4_neonintr()
337 xx3_16x4 = vsub_s16(x0_16x4, x3_16x4); // x0-x3 in isvcd_iquant_itrans_chroma_4x4_neonintr()
355 x3_16x4 = vreinterpret_s16_s32(x3_32x2); in isvcd_iquant_itrans_chroma_4x4_neonintr()
359 rq3_16x4 = vshr_n_s16(x3_16x4, 1); // q3 >> 1 in isvcd_iquant_itrans_chroma_4x4_neonintr()
363 xx2_16x4 = vsub_s16(rq1_16x4, x3_16x4); // x2 = q1>>1 - q3 in isvcd_iquant_itrans_chroma_4x4_neonintr()
369 x3_16x4 = vsub_s16(xx0_16x4, xx3_16x4); // imacro = x0 - x3 in isvcd_iquant_itrans_chroma_4x4_neonintr()
374 x3_16x4 = vrshr_n_s16(x3_16x4, 6); in isvcd_iquant_itrans_chroma_4x4_neonintr()
382 x3_16x4 = vmin_s16(x3_16x4, dup_max); in isvcd_iquant_itrans_chroma_4x4_neonintr()
[all …]
Disvcd_iquant_itrans_residual_neon.c104 int16x4_t x0_16x4, x1_16x4, x2_16x4, x3_16x4; in isvcd_iquant_itrans_residual_4x4_neonintr() local
161 x3_16x4 = vadd_s16(q1_16x4, rq3_16x4); // x2 = q1 + q3>>1 in isvcd_iquant_itrans_residual_4x4_neonintr()
163 xx0_16x4 = vadd_s16(x0_16x4, x3_16x4); // x0+x3 in isvcd_iquant_itrans_residual_4x4_neonintr()
166 xx3_16x4 = vsub_s16(x0_16x4, x3_16x4); // x0-x3 in isvcd_iquant_itrans_residual_4x4_neonintr()
184 x3_16x4 = vreinterpret_s16_s32(x3_32x2); in isvcd_iquant_itrans_residual_4x4_neonintr()
188 rq3_16x4 = vshr_n_s16(x3_16x4, 1); // q3 >> 1 in isvcd_iquant_itrans_residual_4x4_neonintr()
192 xx2_16x4 = vsub_s16(rq1_16x4, x3_16x4); // x2 = q1>>1 - q3 in isvcd_iquant_itrans_residual_4x4_neonintr()
198 x3_16x4 = vsub_s16(xx0_16x4, xx3_16x4); // imacro = x0 - x3 in isvcd_iquant_itrans_residual_4x4_neonintr()
203 x3_16x4 = vrshr_n_s16(x3_16x4, 6); in isvcd_iquant_itrans_residual_4x4_neonintr()
213 x3_16x4 = vadd_s16(pred3, x3_16x4); in isvcd_iquant_itrans_residual_4x4_neonintr()
[all …]
Disvcd_iquant_itrans_residual_recon_neon.c320 int16x4_t x0_16x4, x1_16x4, x2_16x4, x3_16x4; in isvcd_iquant_itrans_residual_recon_4x4_neonintr() local
384 x3_16x4 = vadd_s16(q1_16x4, rq3_16x4); // x2 = q1 + q3>>1 in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
386 xx0_16x4 = vadd_s16(x0_16x4, x3_16x4); // x0+x3 in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
389 xx3_16x4 = vsub_s16(x0_16x4, x3_16x4); // x0-x3 in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
407 x3_16x4 = vreinterpret_s16_s32(x3_32x2); in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
411 rq3_16x4 = vshr_n_s16(x3_16x4, 1); // q3 >> 1 in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
415 xx2_16x4 = vsub_s16(rq1_16x4, x3_16x4); // x2 = q1>>1 - q3 in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
421 x3_16x4 = vsub_s16(xx0_16x4, xx3_16x4); // imacro = x0 - x3 in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
431 x3_16x4 = vrshr_n_s16(x3_16x4, 6); in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
436 resd3_in = vadd_s16(resd3_in, x3_16x4); in isvcd_iquant_itrans_residual_recon_4x4_neonintr()
[all …]