1 /*
2 * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include "./vpx_dsp_rtcd.h"
13 #include "vpx_dsp/loongarch/vpx_convolve_lsx.h"
14
common_vt_8t_and_aver_dst_4w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter,int32_t height)15 static void common_vt_8t_and_aver_dst_4w_lsx(const uint8_t *src,
16 int32_t src_stride, uint8_t *dst,
17 int32_t dst_stride,
18 const int8_t *filter,
19 int32_t height) {
20 uint32_t loop_cnt = (height >> 2);
21 uint8_t *dst_tmp = dst;
22 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
23 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
24 __m128i reg0, reg1, reg2, reg3, reg4;
25 __m128i filter0, filter1, filter2, filter3;
26 __m128i out0, out1;
27 int32_t src_stride2 = src_stride << 1;
28 int32_t src_stride3 = src_stride + src_stride2;
29 int32_t src_stride4 = src_stride2 << 1;
30 uint8_t *src_tmp0 = (uint8_t *)src - src_stride3;
31
32 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
33 filter0, filter1, filter2, filter3);
34 src0 = __lsx_vld(src_tmp0, 0);
35 DUP2_ARG2(__lsx_vldx, src_tmp0, src_stride, src_tmp0, src_stride2, src1,
36 src2);
37 src3 = __lsx_vldx(src_tmp0, src_stride3);
38 src_tmp0 += src_stride4;
39 src4 = __lsx_vld(src_tmp0, 0);
40 DUP2_ARG2(__lsx_vldx, src_tmp0, src_stride, src_tmp0, src_stride2, src5,
41 src6);
42 src_tmp0 += src_stride3;
43 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1, tmp0,
44 tmp1, tmp2, tmp3);
45 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
46 DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
47 reg2 = __lsx_vilvl_d(tmp5, tmp2);
48 DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
49 reg2 = __lsx_vxori_b(reg2, 128);
50
51 for (; loop_cnt--;) {
52 src7 = __lsx_vld(src_tmp0, 0);
53 DUP2_ARG2(__lsx_vldx, src_tmp0, src_stride, src_tmp0, src_stride2, src8,
54 src9);
55 src10 = __lsx_vldx(src_tmp0, src_stride3);
56 src_tmp0 += src_stride4;
57 src0 = __lsx_vldrepl_w(dst_tmp, 0);
58 dst_tmp += dst_stride;
59 src1 = __lsx_vldrepl_w(dst_tmp, 0);
60 dst_tmp += dst_stride;
61 src2 = __lsx_vldrepl_w(dst_tmp, 0);
62 dst_tmp += dst_stride;
63 src3 = __lsx_vldrepl_w(dst_tmp, 0);
64 dst_tmp += dst_stride;
65 DUP2_ARG2(__lsx_vilvl_w, src1, src0, src3, src2, src0, src1);
66 src0 = __lsx_vilvl_d(src1, src0);
67 DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10, src9,
68 tmp0, tmp1, tmp2, tmp3);
69 DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
70 DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
71 out0 = filt_8tap_dpadd_s_h(reg0, reg1, reg2, reg3, filter0, filter1,
72 filter2, filter3);
73 out1 = filt_8tap_dpadd_s_h(reg1, reg2, reg3, reg4, filter0, filter1,
74 filter2, filter3);
75 out0 = __lsx_vssrarni_b_h(out1, out0, 7);
76 out0 = __lsx_vxori_b(out0, 128);
77 out0 = __lsx_vavgr_bu(out0, src0);
78 __lsx_vstelm_w(out0, dst, 0, 0);
79 dst += dst_stride;
80 __lsx_vstelm_w(out0, dst, 0, 1);
81 dst += dst_stride;
82 __lsx_vstelm_w(out0, dst, 0, 2);
83 dst += dst_stride;
84 __lsx_vstelm_w(out0, dst, 0, 3);
85 dst += dst_stride;
86 reg0 = reg2;
87 reg1 = reg3;
88 reg2 = reg4;
89 src6 = src10;
90 }
91 }
92
common_vt_8t_and_aver_dst_8w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter,int32_t height)93 static void common_vt_8t_and_aver_dst_8w_lsx(const uint8_t *src,
94 int32_t src_stride, uint8_t *dst,
95 int32_t dst_stride,
96 const int8_t *filter,
97 int32_t height) {
98 uint32_t loop_cnt = height >> 2;
99 uint8_t *dst_tmp = dst;
100 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
101 __m128i tmp0, tmp1, tmp2, tmp3;
102 __m128i reg0, reg1, reg2, reg3, reg4, reg5;
103 __m128i filter0, filter1, filter2, filter3;
104 __m128i out0, out1, out2, out3;
105 int32_t src_stride2 = src_stride << 1;
106 int32_t src_stride3 = src_stride + src_stride2;
107 int32_t src_stride4 = src_stride2 << 1;
108 uint8_t *src_tmp0 = (uint8_t *)src - src_stride3;
109
110 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
111 filter0, filter1, filter2, filter3);
112
113 src0 = __lsx_vld(src_tmp0, 0);
114 DUP2_ARG2(__lsx_vldx, src_tmp0, src_stride, src_tmp0, src_stride2, src1,
115 src2);
116 src3 = __lsx_vldx(src_tmp0, src_stride3);
117 src_tmp0 += src_stride4;
118 src4 = __lsx_vld(src_tmp0, 0);
119 DUP2_ARG2(__lsx_vldx, src_tmp0, src_stride, src_tmp0, src_stride2, src5,
120 src6);
121 src_tmp0 += src_stride3;
122 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
123 src1, src2, src3);
124 DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
125 src6 = __lsx_vxori_b(src6, 128);
126 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1, reg0,
127 reg1, reg2, reg3);
128 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
129
130 for (; loop_cnt--;) {
131 src7 = __lsx_vld(src_tmp0, 0);
132 DUP2_ARG2(__lsx_vldx, src_tmp0, src_stride, src_tmp0, src_stride2, src8,
133 src9);
134 src10 = __lsx_vldx(src_tmp0, src_stride3);
135 src_tmp0 += src_stride4;
136 src0 = __lsx_vldrepl_d(dst_tmp, 0);
137 dst_tmp += dst_stride;
138 src1 = __lsx_vldrepl_d(dst_tmp, 0);
139 dst_tmp += dst_stride;
140 src2 = __lsx_vldrepl_d(dst_tmp, 0);
141 dst_tmp += dst_stride;
142 src3 = __lsx_vldrepl_d(dst_tmp, 0);
143 dst_tmp += dst_stride;
144 DUP2_ARG2(__lsx_vilvl_d, src1, src0, src3, src2, src0, src1);
145 DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128, src7,
146 src8, src9, src10);
147 DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10, src9,
148 tmp0, tmp1, tmp2, tmp3);
149 out0 = filt_8tap_dpadd_s_h(reg0, reg1, reg2, tmp0, filter0, filter1,
150 filter2, filter3);
151 out1 = filt_8tap_dpadd_s_h(reg3, reg4, reg5, tmp1, filter0, filter1,
152 filter2, filter3);
153 out2 = filt_8tap_dpadd_s_h(reg1, reg2, tmp0, tmp2, filter0, filter1,
154 filter2, filter3);
155 out3 = filt_8tap_dpadd_s_h(reg4, reg5, tmp1, tmp3, filter0, filter1,
156 filter2, filter3);
157 DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
158 DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
159 DUP2_ARG2(__lsx_vavgr_bu, out0, src0, out1, src1, out0, out1);
160 __lsx_vstelm_d(out0, dst, 0, 0);
161 dst += dst_stride;
162 __lsx_vstelm_d(out0, dst, 0, 1);
163 dst += dst_stride;
164 __lsx_vstelm_d(out1, dst, 0, 0);
165 dst += dst_stride;
166 __lsx_vstelm_d(out1, dst, 0, 1);
167 dst += dst_stride;
168 reg0 = reg2;
169 reg1 = tmp0;
170 reg2 = tmp2;
171 reg3 = reg5;
172 reg4 = tmp1;
173 reg5 = tmp3;
174 src6 = src10;
175 }
176 }
177
common_vt_8t_and_aver_dst_16w_mult_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter,int32_t height,int32_t width)178 static void common_vt_8t_and_aver_dst_16w_mult_lsx(
179 const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
180 const int8_t *filter, int32_t height, int32_t width) {
181 uint8_t *src_tmp;
182 uint32_t cnt = width >> 4;
183 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
184 __m128i filter0, filter1, filter2, filter3;
185 __m128i reg0, reg1, reg2, reg3, reg4, reg5;
186 __m128i reg6, reg7, reg8, reg9, reg10, reg11;
187 __m128i tmp0, tmp1, tmp2, tmp3;
188 int32_t src_stride2 = src_stride << 1;
189 int32_t src_stride3 = src_stride + src_stride2;
190 int32_t src_stride4 = src_stride2 << 1;
191 int32_t dst_stride2 = dst_stride << 1;
192 int32_t dst_stride3 = dst_stride2 + dst_stride;
193 int32_t dst_stride4 = dst_stride2 << 1;
194 uint8_t *src_tmp0 = (uint8_t *)src - src_stride3;
195
196 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
197 filter0, filter1, filter2, filter3);
198 for (; cnt--;) {
199 uint32_t loop_cnt = height >> 2;
200 uint8_t *dst_reg = dst;
201
202 src_tmp = src_tmp0;
203 src0 = __lsx_vld(src_tmp, 0);
204 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2, src1,
205 src2);
206 src3 = __lsx_vldx(src_tmp, src_stride3);
207 src_tmp += src_stride4;
208 src4 = __lsx_vld(src_tmp, 0);
209 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2, src5,
210 src6);
211 src_tmp += src_stride3;
212 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
213 src1, src2, src3);
214 DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
215 src6 = __lsx_vxori_b(src6, 128);
216 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
217 reg0, reg1, reg2, reg3);
218 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
219 DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2, src1,
220 reg6, reg7, reg8, reg9);
221 DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
222 for (; loop_cnt--;) {
223 src7 = __lsx_vld(src_tmp, 0);
224 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2, src8,
225 src9);
226 src10 = __lsx_vldx(src_tmp, src_stride3);
227 src_tmp += src_stride4;
228 DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
229 src7, src8, src9, src10);
230 DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10, src9,
231 src0, src1, src2, src3);
232 DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8, src10, src9,
233 src4, src5, src7, src8);
234 tmp0 = filt_8tap_dpadd_s_h(reg0, reg1, reg2, src0, filter0, filter1,
235 filter2, filter3);
236 tmp1 = filt_8tap_dpadd_s_h(reg3, reg4, reg5, src1, filter0, filter1,
237 filter2, filter3);
238 tmp2 = filt_8tap_dpadd_s_h(reg6, reg7, reg8, src4, filter0, filter1,
239 filter2, filter3);
240 tmp3 = filt_8tap_dpadd_s_h(reg9, reg10, reg11, src5, filter0, filter1,
241 filter2, filter3);
242 DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
243 DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
244 tmp2 = __lsx_vld(dst_reg, 0);
245 tmp3 = __lsx_vldx(dst_reg, dst_stride);
246 DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0, tmp1);
247 __lsx_vst(tmp0, dst_reg, 0);
248 __lsx_vstx(tmp1, dst_reg, dst_stride);
249 tmp0 = filt_8tap_dpadd_s_h(reg1, reg2, src0, src2, filter0, filter1,
250 filter2, filter3);
251 tmp1 = filt_8tap_dpadd_s_h(reg4, reg5, src1, src3, filter0, filter1,
252 filter2, filter3);
253 tmp2 = filt_8tap_dpadd_s_h(reg7, reg8, src4, src7, filter0, filter1,
254 filter2, filter3);
255 tmp3 = filt_8tap_dpadd_s_h(reg10, reg11, src5, src8, filter0, filter1,
256 filter2, filter3);
257 DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
258 DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
259 tmp2 = __lsx_vldx(dst_reg, dst_stride2);
260 tmp3 = __lsx_vldx(dst_reg, dst_stride3);
261 DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0, tmp1);
262 __lsx_vstx(tmp0, dst_reg, dst_stride2);
263 __lsx_vstx(tmp1, dst_reg, dst_stride3);
264 dst_reg += dst_stride4;
265
266 reg0 = reg2;
267 reg1 = src0;
268 reg2 = src2;
269 reg3 = reg5;
270 reg4 = src1;
271 reg5 = src3;
272 reg6 = reg8;
273 reg7 = src4;
274 reg8 = src7;
275 reg9 = reg11;
276 reg10 = src5;
277 reg11 = src8;
278 src6 = src10;
279 }
280 src_tmp0 += 16;
281 dst += 16;
282 }
283 }
284
common_vt_8t_and_aver_dst_16w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter,int32_t height)285 static void common_vt_8t_and_aver_dst_16w_lsx(const uint8_t *src,
286 int32_t src_stride, uint8_t *dst,
287 int32_t dst_stride,
288 const int8_t *filter,
289 int32_t height) {
290 common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst, dst_stride,
291 filter, height, 16);
292 }
293
common_vt_8t_and_aver_dst_32w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter,int32_t height)294 static void common_vt_8t_and_aver_dst_32w_lsx(const uint8_t *src,
295 int32_t src_stride, uint8_t *dst,
296 int32_t dst_stride,
297 const int8_t *filter,
298 int32_t height) {
299 common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst, dst_stride,
300 filter, height, 32);
301 }
302
common_vt_8t_and_aver_dst_64w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter,int32_t height)303 static void common_vt_8t_and_aver_dst_64w_lsx(const uint8_t *src,
304 int32_t src_stride, uint8_t *dst,
305 int32_t dst_stride,
306 const int8_t *filter,
307 int32_t height) {
308 common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst, dst_stride,
309 filter, height, 64);
310 }
311
common_vt_2t_and_aver_dst_4x4_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)312 static void common_vt_2t_and_aver_dst_4x4_lsx(const uint8_t *src,
313 int32_t src_stride, uint8_t *dst,
314 int32_t dst_stride,
315 int8_t *filter) {
316 __m128i src0, src1, src2, src3, src4;
317 __m128i dst0, dst1, dst2, dst3, out, filt0, src2110, src4332;
318 __m128i src10_r, src32_r, src21_r, src43_r;
319 __m128i tmp0, tmp1;
320 uint8_t *dst_tmp = dst;
321 int32_t src_stride2 = src_stride << 1;
322 int32_t src_stride3 = src_stride2 + src_stride;
323 int32_t src_stride4 = src_stride2 << 1;
324
325 filt0 = __lsx_vldrepl_h(filter, 0);
326 src0 = __lsx_vld(src, 0);
327 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src1, src2);
328 src3 = __lsx_vldx(src, src_stride3);
329 src += src_stride4;
330 src4 = __lsx_vld(src, 0);
331 src += src_stride;
332
333 dst0 = __lsx_vldrepl_w(dst_tmp, 0);
334 dst_tmp += dst_stride;
335 dst1 = __lsx_vldrepl_w(dst_tmp, 0);
336 dst_tmp += dst_stride;
337 dst2 = __lsx_vldrepl_w(dst_tmp, 0);
338 dst_tmp += dst_stride;
339 dst3 = __lsx_vldrepl_w(dst_tmp, 0);
340 dst0 = __lsx_vilvl_w(dst1, dst0);
341 dst1 = __lsx_vilvl_w(dst3, dst2);
342 dst0 = __lsx_vilvl_d(dst1, dst0);
343 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src3, src2, src4, src3,
344 src10_r, src21_r, src32_r, src43_r);
345 DUP2_ARG2(__lsx_vilvl_d, src21_r, src10_r, src43_r, src32_r, src2110,
346 src4332);
347 DUP2_ARG2(__lsx_vdp2_h_bu, src2110, filt0, src4332, filt0, tmp0, tmp1);
348 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
349 out = __lsx_vavgr_bu(tmp0, dst0);
350 __lsx_vstelm_w(out, dst, 0, 0);
351 dst += dst_stride;
352 __lsx_vstelm_w(out, dst, 0, 1);
353 dst += dst_stride;
354 __lsx_vstelm_w(out, dst, 0, 2);
355 dst += dst_stride;
356 __lsx_vstelm_w(out, dst, 0, 3);
357 dst += dst_stride;
358 }
359
common_vt_2t_and_aver_dst_4x8_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)360 static void common_vt_2t_and_aver_dst_4x8_lsx(const uint8_t *src,
361 int32_t src_stride, uint8_t *dst,
362 int32_t dst_stride,
363 int8_t *filter) {
364 __m128i dst0, dst1, dst2, dst3, dst4;
365 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src87_r;
366 __m128i src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
367 __m128i src2110, src4332, src6554, src8776, filt0;
368 __m128i tmp0, tmp1, tmp2, tmp3;
369 uint8_t *dst_tmp = dst;
370 int32_t src_stride2 = src_stride << 1;
371 int32_t src_stride3 = src_stride2 + src_stride;
372 int32_t src_stride4 = src_stride2 << 1;
373
374 filt0 = __lsx_vldrepl_h(filter, 0);
375 src0 = __lsx_vld(src, 0);
376 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src1, src2);
377 src3 = __lsx_vldx(src, src_stride3);
378 src += src_stride4;
379 src4 = __lsx_vld(src, 0);
380 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src5, src6);
381 src7 = __lsx_vldx(src, src_stride3);
382 src += src_stride4;
383 src8 = __lsx_vld(src, 0);
384
385 dst0 = __lsx_vldrepl_w(dst_tmp, 0);
386 dst_tmp += dst_stride;
387 dst1 = __lsx_vldrepl_w(dst_tmp, 0);
388 dst_tmp += dst_stride;
389 dst2 = __lsx_vldrepl_w(dst_tmp, 0);
390 dst_tmp += dst_stride;
391 dst3 = __lsx_vldrepl_w(dst_tmp, 0);
392 dst_tmp += dst_stride;
393 dst0 = __lsx_vilvl_w(dst1, dst0);
394 dst1 = __lsx_vilvl_w(dst3, dst2);
395 dst0 = __lsx_vilvl_d(dst1, dst0);
396
397 dst1 = __lsx_vldrepl_w(dst_tmp, 0);
398 dst_tmp += dst_stride;
399 dst2 = __lsx_vldrepl_w(dst_tmp, 0);
400 dst_tmp += dst_stride;
401 dst3 = __lsx_vldrepl_w(dst_tmp, 0);
402 dst_tmp += dst_stride;
403 dst4 = __lsx_vldrepl_w(dst_tmp, 0);
404 dst1 = __lsx_vilvl_w(dst2, dst1);
405 dst2 = __lsx_vilvl_w(dst4, dst3);
406 dst1 = __lsx_vilvl_d(dst2, dst1);
407
408 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src3, src2, src4, src3,
409 src10_r, src21_r, src32_r, src43_r);
410 DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7,
411 src54_r, src65_r, src76_r, src87_r);
412 DUP4_ARG2(__lsx_vilvl_d, src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
413 src87_r, src76_r, src2110, src4332, src6554, src8776);
414 DUP4_ARG2(__lsx_vdp2_h_bu, src2110, filt0, src4332, filt0, src6554, filt0,
415 src8776, filt0, tmp0, tmp1, tmp2, tmp3);
416 DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, FILTER_BITS, tmp3, tmp2,
417 FILTER_BITS, tmp0, tmp2);
418 DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp2, dst1, tmp0, tmp2);
419 __lsx_vstelm_w(tmp0, dst, 0, 0);
420 dst += dst_stride;
421 __lsx_vstelm_w(tmp0, dst, 0, 1);
422 dst += dst_stride;
423 __lsx_vstelm_w(tmp0, dst, 0, 2);
424 dst += dst_stride;
425 __lsx_vstelm_w(tmp0, dst, 0, 3);
426 dst += dst_stride;
427
428 __lsx_vstelm_w(tmp2, dst, 0, 0);
429 dst += dst_stride;
430 __lsx_vstelm_w(tmp2, dst, 0, 1);
431 dst += dst_stride;
432 __lsx_vstelm_w(tmp2, dst, 0, 2);
433 dst += dst_stride;
434 __lsx_vstelm_w(tmp2, dst, 0, 3);
435 }
436
common_vt_2t_and_aver_dst_4w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)437 static void common_vt_2t_and_aver_dst_4w_lsx(const uint8_t *src,
438 int32_t src_stride, uint8_t *dst,
439 int32_t dst_stride, int8_t *filter,
440 int32_t height) {
441 if (height == 4) {
442 common_vt_2t_and_aver_dst_4x4_lsx(src, src_stride, dst, dst_stride, filter);
443 } else if (height == 8) {
444 common_vt_2t_and_aver_dst_4x8_lsx(src, src_stride, dst, dst_stride, filter);
445 }
446 }
447
common_vt_2t_and_aver_dst_8x4_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)448 static void common_vt_2t_and_aver_dst_8x4_lsx(const uint8_t *src,
449 int32_t src_stride, uint8_t *dst,
450 int32_t dst_stride,
451 int8_t *filter) {
452 __m128i src0, src1, src2, src3, src4;
453 __m128i dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3, filt0;
454 __m128i tmp0, tmp1, tmp2, tmp3;
455 uint8_t *dst_tmp = dst;
456 int32_t src_stride2 = src_stride << 1;
457 int32_t src_stride3 = src_stride2 + src_stride;
458 int32_t src_stride4 = src_stride2 << 1;
459
460 filt0 = __lsx_vldrepl_h(filter, 0);
461 src0 = __lsx_vld(src, 0);
462 DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src, src_stride3,
463 src, src_stride4, src1, src2, src3, src4);
464 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
465 dst_tmp += dst_stride;
466 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
467 dst_tmp += dst_stride;
468 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
469 dst_tmp += dst_stride;
470 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
471 dst_tmp += dst_stride;
472 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
473 DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, vec0, vec1);
474 DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, vec2, vec3);
475 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3, filt0,
476 tmp0, tmp1, tmp2, tmp3);
477 DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, FILTER_BITS, tmp3, tmp2,
478 FILTER_BITS, tmp0, tmp2);
479 DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp2, dst1, tmp0, tmp2);
480 __lsx_vstelm_d(tmp0, dst, 0, 0);
481 dst += dst_stride;
482 __lsx_vstelm_d(tmp0, dst, 0, 1);
483 dst += dst_stride;
484 __lsx_vstelm_d(tmp2, dst, 0, 0);
485 dst += dst_stride;
486 __lsx_vstelm_d(tmp2, dst, 0, 1);
487 }
488
common_vt_2t_and_aver_dst_8x8mult_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)489 static void common_vt_2t_and_aver_dst_8x8mult_lsx(
490 const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
491 int8_t *filter, int32_t height) {
492 uint32_t loop_cnt = (height >> 3);
493 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
494 __m128i dst0, dst1, dst2, dst3, dst4, dst5;
495 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
496 __m128i tmp0, tmp1, tmp2, tmp3;
497 uint8_t *dst_tmp = dst;
498 int32_t src_stride2 = src_stride << 1;
499 int32_t src_stride3 = src_stride2 + src_stride;
500 int32_t src_stride4 = src_stride2 << 1;
501
502 filt0 = __lsx_vldrepl_h(filter, 0);
503 src0 = __lsx_vld(src, 0);
504 src += src_stride;
505
506 for (; loop_cnt--;) {
507 src1 = __lsx_vld(src, 0);
508 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src2, src3);
509 src4 = __lsx_vldx(src, src_stride3);
510 src += src_stride4;
511 src5 = __lsx_vld(src, 0);
512 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src6, src7);
513 src8 = __lsx_vldx(src, src_stride3);
514 src += src_stride4;
515
516 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
517 dst_tmp += dst_stride;
518 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
519 dst_tmp += dst_stride;
520 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
521 dst_tmp += dst_stride;
522 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
523 dst_tmp += dst_stride;
524 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
525
526 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
527 dst_tmp += dst_stride;
528 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
529 dst_tmp += dst_stride;
530 dst4 = __lsx_vldrepl_d(dst_tmp, 0);
531 dst_tmp += dst_stride;
532 dst5 = __lsx_vldrepl_d(dst_tmp, 0);
533 dst_tmp += dst_stride;
534 DUP2_ARG2(__lsx_vilvl_d, dst3, dst2, dst5, dst4, dst2, dst3);
535
536 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src3, src2, src4, src3,
537 vec0, vec1, vec2, vec3);
538 DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7,
539 vec4, vec5, vec6, vec7);
540 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3,
541 filt0, tmp0, tmp1, tmp2, tmp3);
542
543 DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, FILTER_BITS, tmp3, tmp2,
544 FILTER_BITS, tmp0, tmp2);
545 DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp2, dst1, tmp0, tmp2);
546 __lsx_vstelm_d(tmp0, dst, 0, 0);
547 dst += dst_stride;
548 __lsx_vstelm_d(tmp0, dst, 0, 1);
549 dst += dst_stride;
550 __lsx_vstelm_d(tmp2, dst, 0, 0);
551 dst += dst_stride;
552 __lsx_vstelm_d(tmp2, dst, 0, 1);
553 dst += dst_stride;
554
555 DUP4_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, vec6, filt0, vec7,
556 filt0, tmp0, tmp1, tmp2, tmp3);
557 DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, FILTER_BITS, tmp3, tmp2,
558 FILTER_BITS, tmp0, tmp2);
559 DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst2, tmp2, dst3, tmp0, tmp2);
560 __lsx_vstelm_d(tmp0, dst, 0, 0);
561 dst += dst_stride;
562 __lsx_vstelm_d(tmp0, dst, 0, 1);
563 dst += dst_stride;
564 __lsx_vstelm_d(tmp2, dst, 0, 0);
565 dst += dst_stride;
566 __lsx_vstelm_d(tmp2, dst, 0, 1);
567 dst += dst_stride;
568
569 src0 = src8;
570 }
571 }
572
common_vt_2t_and_aver_dst_8w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)573 static void common_vt_2t_and_aver_dst_8w_lsx(const uint8_t *src,
574 int32_t src_stride, uint8_t *dst,
575 int32_t dst_stride, int8_t *filter,
576 int32_t height) {
577 if (height == 4) {
578 common_vt_2t_and_aver_dst_8x4_lsx(src, src_stride, dst, dst_stride, filter);
579 } else {
580 common_vt_2t_and_aver_dst_8x8mult_lsx(src, src_stride, dst, dst_stride,
581 filter, height);
582 }
583 }
584
common_vt_2t_and_aver_dst_16w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)585 static void common_vt_2t_and_aver_dst_16w_lsx(const uint8_t *src,
586 int32_t src_stride, uint8_t *dst,
587 int32_t dst_stride,
588 int8_t *filter, int32_t height) {
589 uint32_t loop_cnt = (height >> 2);
590 __m128i src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, filt0;
591 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
592 __m128i tmp0, tmp1;
593 int32_t src_stride2 = src_stride << 1;
594 int32_t src_stride3 = src_stride2 + src_stride;
595 int32_t src_stride4 = src_stride2 << 1;
596 int32_t dst_stride2 = dst_stride << 1;
597 int32_t dst_stride3 = dst_stride2 + dst_stride;
598
599 filt0 = __lsx_vldrepl_h(filter, 0);
600 src0 = __lsx_vld(src, 0);
601 src += src_stride;
602
603 for (; loop_cnt--;) {
604 src1 = __lsx_vld(src, 0);
605 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src2, src3);
606 src4 = __lsx_vldx(src, src_stride3);
607 src += src_stride4;
608
609 dst0 = __lsx_vld(dst, 0);
610 DUP2_ARG2(__lsx_vldx, dst, dst_stride, dst, dst_stride2, dst1, dst2);
611 dst3 = __lsx_vldx(dst, dst_stride3);
612
613 DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, vec0, vec2);
614 DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, vec1, vec3);
615 DUP2_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, tmp0, tmp1);
616 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
617 tmp0 = __lsx_vavgr_bu(tmp0, dst0);
618 __lsx_vst(tmp0, dst, 0);
619 dst += dst_stride;
620
621 DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, vec4, vec6);
622 DUP2_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, vec5, vec7);
623 DUP2_ARG2(__lsx_vdp2_h_bu, vec2, filt0, vec3, filt0, tmp0, tmp1);
624 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
625 tmp0 = __lsx_vavgr_bu(tmp0, dst1);
626 __lsx_vst(tmp0, dst, 0);
627 dst += dst_stride;
628
629 DUP2_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, tmp0, tmp1);
630 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
631 tmp0 = __lsx_vavgr_bu(tmp0, dst2);
632 __lsx_vst(tmp0, dst, 0);
633 dst += dst_stride;
634
635 DUP2_ARG2(__lsx_vdp2_h_bu, vec6, filt0, vec7, filt0, tmp0, tmp1);
636 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
637 tmp0 = __lsx_vavgr_bu(tmp0, dst3);
638 __lsx_vst(tmp0, dst, 0);
639 dst += dst_stride;
640
641 src0 = src4;
642 }
643 }
644
common_vt_2t_and_aver_dst_32w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)645 static void common_vt_2t_and_aver_dst_32w_lsx(const uint8_t *src,
646 int32_t src_stride, uint8_t *dst,
647 int32_t dst_stride,
648 int8_t *filter, int32_t height) {
649 uint32_t loop_cnt = (height >> 2);
650 uint8_t *src_tmp1;
651 uint8_t *dst_tmp1;
652 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9;
653 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
654 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
655 __m128i tmp0, tmp1;
656 int32_t src_stride2 = src_stride << 1;
657 int32_t src_stride3 = src_stride2 + src_stride;
658 int32_t src_stride4 = src_stride2 << 1;
659 int32_t dst_stride2 = dst_stride << 1;
660 int32_t dst_stride3 = dst_stride2 + dst_stride;
661
662 filt0 = __lsx_vldrepl_h(filter, 0);
663 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src5);
664 src += src_stride;
665
666 for (; loop_cnt--;) {
667 src1 = __lsx_vld(src, 0);
668 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src2, src3);
669 src4 = __lsx_vldx(src, src_stride3);
670
671 dst0 = __lsx_vld(dst, 0);
672 DUP2_ARG2(__lsx_vldx, dst, dst_stride, dst, dst_stride2, dst1, dst2);
673 dst3 = __lsx_vldx(dst, dst_stride3);
674
675 DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, vec0, vec2);
676 DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, vec1, vec3);
677
678 src_tmp1 = src + 16;
679 src6 = __lsx_vld(src_tmp1, 0);
680 DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1, src_stride2, src7,
681 src8);
682 src9 = __lsx_vldx(src_tmp1, src_stride3);
683
684 dst_tmp1 = dst + 16;
685 dst4 = __lsx_vld(dst_tmp1, 0);
686 DUP2_ARG2(__lsx_vldx, dst_tmp1, dst_stride, dst_tmp1, dst_stride2, dst5,
687 dst6);
688 dst7 = __lsx_vldx(dst_tmp1, dst_stride3);
689 src += src_stride4;
690
691 DUP2_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, tmp0, tmp1);
692 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
693 tmp0 = __lsx_vavgr_bu(tmp0, dst0);
694 __lsx_vst(tmp0, dst, 0);
695
696 DUP2_ARG2(__lsx_vdp2_h_bu, vec2, filt0, vec3, filt0, tmp0, tmp1);
697 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
698 tmp0 = __lsx_vavgr_bu(tmp0, dst1);
699 __lsx_vstx(tmp0, dst, dst_stride);
700
701 DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, vec4, vec6);
702 DUP2_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, vec5, vec7);
703 DUP2_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, tmp0, tmp1);
704 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
705 tmp0 = __lsx_vavgr_bu(tmp0, dst2);
706 __lsx_vstx(tmp0, dst, dst_stride2);
707
708 DUP2_ARG2(__lsx_vdp2_h_bu, vec6, filt0, vec7, filt0, tmp0, tmp1);
709 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
710 tmp0 = __lsx_vavgr_bu(tmp0, dst3);
711 __lsx_vstx(tmp0, dst, dst_stride3);
712
713 DUP2_ARG2(__lsx_vilvl_b, src6, src5, src7, src6, vec0, vec2);
714 DUP2_ARG2(__lsx_vilvh_b, src6, src5, src7, src6, vec1, vec3);
715 DUP2_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, tmp0, tmp1);
716 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
717 tmp0 = __lsx_vavgr_bu(tmp0, dst4);
718 __lsx_vst(tmp0, dst, 16);
719
720 DUP2_ARG2(__lsx_vdp2_h_bu, vec2, filt0, vec3, filt0, tmp0, tmp1);
721 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
722 tmp0 = __lsx_vavgr_bu(tmp0, dst5);
723 dst += dst_stride;
724 __lsx_vst(tmp0, dst, 16);
725
726 DUP2_ARG2(__lsx_vilvl_b, src8, src7, src9, src8, vec4, vec6);
727 DUP2_ARG2(__lsx_vilvh_b, src8, src7, src9, src8, vec5, vec7);
728 DUP2_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, tmp0, tmp1);
729 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
730 tmp0 = __lsx_vavgr_bu(tmp0, dst6);
731 dst += dst_stride;
732 __lsx_vst(tmp0, dst, 16);
733
734 DUP2_ARG2(__lsx_vdp2_h_bu, vec6, filt0, vec7, filt0, tmp0, tmp1);
735 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
736 tmp0 = __lsx_vavgr_bu(tmp0, dst7);
737 dst += dst_stride;
738 __lsx_vst(tmp0, dst, 16);
739 dst += dst_stride;
740
741 src0 = src4;
742 src5 = src9;
743 }
744 }
745
common_vt_2t_and_aver_dst_64w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)746 static void common_vt_2t_and_aver_dst_64w_lsx(const uint8_t *src,
747 int32_t src_stride, uint8_t *dst,
748 int32_t dst_stride,
749 int8_t *filter, int32_t height) {
750 uint32_t loop_cnt = (height >> 1);
751 int32_t src_stride2 = src_stride << 1;
752 int32_t dst_stride2 = dst_stride << 1;
753 uint8_t *src_tmp1;
754 uint8_t *dst_tmp1;
755 __m128i src0, src1, src2, src3, src4, src5;
756 __m128i src6, src7, src8, src9, src10, src11, filt0;
757 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
758 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
759 __m128i tmp0, tmp1;
760
761 filt0 = __lsx_vldrepl_h(filter, 0);
762 DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src0, src3, src6,
763 src9);
764 src += src_stride;
765
766 for (; loop_cnt--;) {
767 src2 = __lsx_vldx(src, src_stride);
768 dst1 = __lsx_vldx(dst, dst_stride);
769 DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src1, src4, src7,
770 src10);
771 DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, dst0, dst2, dst4,
772 dst6);
773 src_tmp1 = (uint8_t *)src + 16;
774 src5 = __lsx_vldx(src_tmp1, src_stride);
775 src_tmp1 = src_tmp1 + 16;
776 src8 = __lsx_vldx(src_tmp1, src_stride);
777 src_tmp1 = src_tmp1 + 16;
778 src11 = __lsx_vldx(src_tmp1, src_stride);
779
780 dst_tmp1 = dst + 16;
781 dst3 = __lsx_vldx(dst_tmp1, dst_stride);
782 dst_tmp1 = dst + 32;
783 dst5 = __lsx_vldx(dst_tmp1, dst_stride);
784 dst_tmp1 = dst + 48;
785 dst7 = __lsx_vldx(dst_tmp1, dst_stride);
786 src += src_stride2;
787
788 DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, vec0, vec2);
789 DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, vec1, vec3);
790 DUP2_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, tmp0, tmp1);
791 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
792 tmp0 = __lsx_vavgr_bu(tmp0, dst0);
793 __lsx_vst(tmp0, dst, 0);
794
795 DUP2_ARG2(__lsx_vdp2_h_bu, vec2, filt0, vec3, filt0, tmp0, tmp1);
796 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
797 tmp0 = __lsx_vavgr_bu(tmp0, dst1);
798 __lsx_vstx(tmp0, dst, dst_stride);
799
800 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src5, src4, vec4, vec6);
801 DUP2_ARG2(__lsx_vilvh_b, src4, src3, src5, src4, vec5, vec7);
802 DUP2_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, tmp0, tmp1);
803 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
804 tmp0 = __lsx_vavgr_bu(tmp0, dst2);
805 __lsx_vst(tmp0, dst, 16);
806
807 dst_tmp1 = dst + 16;
808 DUP2_ARG2(__lsx_vdp2_h_bu, vec6, filt0, vec7, filt0, tmp0, tmp1);
809 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
810 tmp0 = __lsx_vavgr_bu(tmp0, dst3);
811 __lsx_vstx(tmp0, dst_tmp1, dst_stride);
812
813 DUP2_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, vec0, vec2);
814 DUP2_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, vec1, vec3);
815 DUP2_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, tmp0, tmp1);
816 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
817 tmp0 = __lsx_vavgr_bu(tmp0, dst4);
818 __lsx_vst(tmp0, dst, 32);
819
820 dst_tmp1 = dst_tmp1 + 16;
821 DUP2_ARG2(__lsx_vdp2_h_bu, vec2, filt0, vec3, filt0, tmp0, tmp1);
822 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
823 tmp0 = __lsx_vavgr_bu(tmp0, dst5);
824 __lsx_vstx(tmp0, dst_tmp1, dst_stride);
825
826 DUP2_ARG2(__lsx_vilvl_b, src10, src9, src11, src10, vec4, vec6);
827 DUP2_ARG2(__lsx_vilvh_b, src10, src9, src11, src10, vec5, vec7);
828 DUP2_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, tmp0, tmp1);
829 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
830 tmp0 = __lsx_vavgr_bu(tmp0, dst6);
831 __lsx_vst(tmp0, dst, 48);
832
833 dst_tmp1 = dst_tmp1 + 16;
834 DUP2_ARG2(__lsx_vdp2_h_bu, vec6, filt0, vec7, filt0, tmp0, tmp1);
835 tmp0 = __lsx_vssrarni_bu_h(tmp1, tmp0, FILTER_BITS);
836 tmp0 = __lsx_vavgr_bu(tmp0, dst7);
837 __lsx_vstx(tmp0, dst_tmp1, dst_stride);
838 dst += dst_stride2;
839
840 src0 = src2;
841 src3 = src5;
842 src6 = src8;
843 src9 = src11;
844 }
845 }
846
vpx_convolve8_avg_vert_lsx(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)847 void vpx_convolve8_avg_vert_lsx(const uint8_t *src, ptrdiff_t src_stride,
848 uint8_t *dst, ptrdiff_t dst_stride,
849 const InterpKernel *filter, int x0_q4,
850 int x_step_q4, int y0_q4, int y_step_q4, int w,
851 int h) {
852 const int16_t *const filter_y = filter[y0_q4];
853 int8_t cnt, filt_ver[8];
854
855 assert(y_step_q4 == 16);
856 assert(((const int32_t *)filter_y)[1] != 0x800000);
857
858 for (cnt = 0; cnt < 8; ++cnt) {
859 filt_ver[cnt] = filter_y[cnt];
860 }
861
862 if (vpx_get_filter_taps(filter_y) == 2) {
863 switch (w) {
864 case 4:
865 common_vt_2t_and_aver_dst_4w_lsx(src, (int32_t)src_stride, dst,
866 (int32_t)dst_stride, &filt_ver[3], h);
867 break;
868 case 8:
869 common_vt_2t_and_aver_dst_8w_lsx(src, (int32_t)src_stride, dst,
870 (int32_t)dst_stride, &filt_ver[3], h);
871 break;
872 case 16:
873 common_vt_2t_and_aver_dst_16w_lsx(src, (int32_t)src_stride, dst,
874 (int32_t)dst_stride, &filt_ver[3], h);
875 break;
876 case 32:
877 common_vt_2t_and_aver_dst_32w_lsx(src, (int32_t)src_stride, dst,
878 (int32_t)dst_stride, &filt_ver[3], h);
879 break;
880 case 64:
881 common_vt_2t_and_aver_dst_64w_lsx(src, (int32_t)src_stride, dst,
882 (int32_t)dst_stride, &filt_ver[3], h);
883 break;
884 default:
885 vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter,
886 x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
887 break;
888 }
889 } else {
890 switch (w) {
891 case 4:
892 common_vt_8t_and_aver_dst_4w_lsx(src, (int32_t)src_stride, dst,
893 (int32_t)dst_stride, filt_ver, h);
894 break;
895 case 8:
896 common_vt_8t_and_aver_dst_8w_lsx(src, (int32_t)src_stride, dst,
897 (int32_t)dst_stride, filt_ver, h);
898 break;
899 case 16:
900 common_vt_8t_and_aver_dst_16w_lsx(src, (int32_t)src_stride, dst,
901 (int32_t)dst_stride, filt_ver, h);
902
903 break;
904 case 32:
905 common_vt_8t_and_aver_dst_32w_lsx(src, (int32_t)src_stride, dst,
906 (int32_t)dst_stride, filt_ver, h);
907 break;
908 case 64:
909 common_vt_8t_and_aver_dst_64w_lsx(src, (int32_t)src_stride, dst,
910 (int32_t)dst_stride, filt_ver, h);
911 break;
912 default:
913 vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter,
914 x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
915 break;
916 }
917 }
918 }
919