1 /*
2 * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include "./vpx_dsp_rtcd.h"
13 #include "vpx_dsp/loongarch/vpx_convolve_lsx.h"
14
15 static const uint8_t mc_filt_mask_arr[16 * 3] = {
16 /* 8 width cases */
17 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
18 /* 4 width cases */
19 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
20 /* 4 width cases */
21 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
22 };
23
common_hz_8t_and_aver_dst_4x4_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)24 static void common_hz_8t_and_aver_dst_4x4_lsx(const uint8_t *src,
25 int32_t src_stride, uint8_t *dst,
26 int32_t dst_stride,
27 int8_t *filter) {
28 uint8_t *dst_tmp = dst;
29 __m128i src0, src1, src2, src3;
30 __m128i filter0, filter1, filter2, filter3;
31 __m128i mask0, mask1, mask2, mask3;
32 __m128i tmp0, tmp1;
33 __m128i dst0, dst1, dst2, dst3;
34
35 mask0 = __lsx_vld(mc_filt_mask_arr, 16);
36 src -= 3;
37 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
38 mask3 = __lsx_vaddi_bu(mask0, 6);
39 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
40 filter0, filter1, filter2, filter3);
41 LSX_LD_4(src, src_stride, src0, src1, src2, src3);
42 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
43 src1, src2, src3);
44 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
45 filter0, filter1, filter2, filter3, tmp0, tmp1);
46 dst0 = __lsx_vldrepl_w(dst_tmp, 0);
47 dst_tmp += dst_stride;
48 dst1 = __lsx_vldrepl_w(dst_tmp, 0);
49 dst_tmp += dst_stride;
50 dst2 = __lsx_vldrepl_w(dst_tmp, 0);
51 dst_tmp += dst_stride;
52 dst3 = __lsx_vldrepl_w(dst_tmp, 0);
53 dst0 = __lsx_vilvl_w(dst1, dst0);
54 dst1 = __lsx_vilvl_w(dst3, dst2);
55 dst0 = __lsx_vilvl_d(dst1, dst0);
56 tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
57 tmp0 = __lsx_vxori_b(tmp0, 128);
58 dst0 = __lsx_vavgr_bu(tmp0, dst0);
59 __lsx_vstelm_w(dst0, dst, 0, 0);
60 dst += dst_stride;
61 __lsx_vstelm_w(dst0, dst, 0, 1);
62 dst += dst_stride;
63 __lsx_vstelm_w(dst0, dst, 0, 2);
64 dst += dst_stride;
65 __lsx_vstelm_w(dst0, dst, 0, 3);
66 }
67
common_hz_8t_and_aver_dst_4x8_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)68 static void common_hz_8t_and_aver_dst_4x8_lsx(const uint8_t *src,
69 int32_t src_stride, uint8_t *dst,
70 int32_t dst_stride,
71 int8_t *filter) {
72 uint8_t *dst_tmp = dst;
73 __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
74 __m128i mask0, mask1, mask2, mask3, tmp0, tmp1, tmp2, tmp3;
75 __m128i dst0, dst1;
76
77 mask0 = __lsx_vld(mc_filt_mask_arr, 16);
78 src -= 3;
79 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
80 mask3 = __lsx_vaddi_bu(mask0, 6);
81 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
82 filter0, filter1, filter2, filter3);
83
84 LSX_LD_4(src, src_stride, src0, src1, src2, src3);
85 src += src_stride;
86 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
87 src1, src2, src3);
88 tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
89 dst_tmp += dst_stride;
90 tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
91 dst_tmp += dst_stride;
92 tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
93 dst_tmp += dst_stride;
94 tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
95 dst_tmp += dst_stride;
96 tmp0 = __lsx_vilvl_w(tmp1, tmp0);
97 tmp1 = __lsx_vilvl_w(tmp3, tmp2);
98 dst0 = __lsx_vilvl_d(tmp1, tmp0);
99
100 tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
101 dst_tmp += dst_stride;
102 tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
103 dst_tmp += dst_stride;
104 tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
105 dst_tmp += dst_stride;
106 tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
107 tmp0 = __lsx_vilvl_w(tmp1, tmp0);
108 tmp1 = __lsx_vilvl_w(tmp3, tmp2);
109 dst1 = __lsx_vilvl_d(tmp1, tmp0);
110
111 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
112 filter0, filter1, filter2, filter3, tmp0, tmp1);
113 LSX_LD_4(src, src_stride, src0, src1, src2, src3);
114 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
115 src1, src2, src3);
116 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
117 filter0, filter1, filter2, filter3, tmp2, tmp3);
118 DUP4_ARG3(__lsx_vssrarni_b_h, tmp0, tmp0, 7, tmp1, tmp1, 7, tmp2, tmp2, 7,
119 tmp3, tmp3, 7, tmp0, tmp1, tmp2, tmp3);
120 DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
121 DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
122 DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
123 __lsx_vstelm_w(dst0, dst, 0, 0);
124 dst += dst_stride;
125 __lsx_vstelm_w(dst0, dst, 0, 1);
126 dst += dst_stride;
127 __lsx_vstelm_w(dst0, dst, 0, 2);
128 dst += dst_stride;
129 __lsx_vstelm_w(dst0, dst, 0, 3);
130 dst += dst_stride;
131 __lsx_vstelm_w(dst1, dst, 0, 0);
132 dst += dst_stride;
133 __lsx_vstelm_w(dst1, dst, 0, 1);
134 dst += dst_stride;
135 __lsx_vstelm_w(dst1, dst, 0, 2);
136 dst += dst_stride;
137 __lsx_vstelm_w(dst1, dst, 0, 3);
138 }
139
common_hz_8t_and_aver_dst_4w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)140 static void common_hz_8t_and_aver_dst_4w_lsx(const uint8_t *src,
141 int32_t src_stride, uint8_t *dst,
142 int32_t dst_stride, int8_t *filter,
143 int32_t height) {
144 if (height == 4) {
145 common_hz_8t_and_aver_dst_4x4_lsx(src, src_stride, dst, dst_stride, filter);
146 } else if (height == 8) {
147 common_hz_8t_and_aver_dst_4x8_lsx(src, src_stride, dst, dst_stride, filter);
148 }
149 }
150
common_hz_8t_and_aver_dst_8w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)151 static void common_hz_8t_and_aver_dst_8w_lsx(const uint8_t *src,
152 int32_t src_stride, uint8_t *dst,
153 int32_t dst_stride, int8_t *filter,
154 int32_t height) {
155 int32_t loop_cnt = height >> 2;
156 uint8_t *dst_tmp = dst;
157 __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
158 __m128i mask0, mask1, mask2, mask3;
159 __m128i tmp0, tmp1, tmp2, tmp3;
160 __m128i dst0, dst1, dst2, dst3;
161 int32_t src_stride2 = src_stride << 1;
162 int32_t src_stride3 = src_stride2 + src_stride;
163 int32_t src_stride4 = src_stride2 << 1;
164 uint8_t *_src = (uint8_t *)src - 3;
165
166 mask0 = __lsx_vld(mc_filt_mask_arr, 0);
167 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
168 mask3 = __lsx_vaddi_bu(mask0, 6);
169 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
170 filter0, filter1, filter2, filter3);
171 for (; loop_cnt--;) {
172 src0 = __lsx_vld(_src, 0);
173 DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
174 src3 = __lsx_vldx(_src, src_stride3);
175 _src += src_stride4;
176 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
177 src1, src2, src3);
178 HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
179 mask3, filter0, filter1, filter2, filter3, tmp0,
180 tmp1, tmp2, tmp3);
181 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
182 dst_tmp += dst_stride;
183 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
184 dst_tmp += dst_stride;
185 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
186 dst_tmp += dst_stride;
187 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
188 dst_tmp += dst_stride;
189 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
190 DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
191 DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
192 DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
193 __lsx_vstelm_d(dst0, dst, 0, 0);
194 dst += dst_stride;
195 __lsx_vstelm_d(dst0, dst, 0, 1);
196 dst += dst_stride;
197 __lsx_vstelm_d(dst1, dst, 0, 0);
198 dst += dst_stride;
199 __lsx_vstelm_d(dst1, dst, 0, 1);
200 dst += dst_stride;
201 }
202 }
203
common_hz_8t_and_aver_dst_16w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)204 static void common_hz_8t_and_aver_dst_16w_lsx(const uint8_t *src,
205 int32_t src_stride, uint8_t *dst,
206 int32_t dst_stride,
207 int8_t *filter, int32_t height) {
208 int32_t loop_cnt = height >> 1;
209 int32_t dst_stride2 = dst_stride << 1;
210 uint8_t *dst_tmp = dst;
211 __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
212 __m128i mask0, mask1, mask2, mask3, dst0, dst1, dst2, dst3;
213 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
214 __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
215
216 mask0 = __lsx_vld(mc_filt_mask_arr, 0);
217 src -= 3;
218 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
219 mask3 = __lsx_vaddi_bu(mask0, 6);
220 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
221 filter0, filter1, filter2, filter3);
222
223 for (; loop_cnt--;) {
224 DUP2_ARG2(__lsx_vld, src, 0, src, 8, src0, src1);
225 src += src_stride;
226 DUP2_ARG2(__lsx_vld, src, 0, src, 8, src2, src3);
227 src += src_stride;
228 dst0 = __lsx_vld(dst_tmp, 0);
229 dst1 = __lsx_vldx(dst_tmp, dst_stride);
230 dst_tmp += dst_stride2;
231 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
232 src1, src2, src3);
233 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, src2, src2,
234 mask0, src3, src3, mask0, tmp0, tmp1, tmp2, tmp3);
235 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, src2, src2,
236 mask1, src3, src3, mask1, tmp4, tmp5, tmp6, tmp7);
237 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, src2, src2,
238 mask2, src3, src3, mask2, tmp8, tmp9, tmp10, tmp11);
239 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3, src2, src2,
240 mask3, src3, src3, mask3, tmp12, tmp13, tmp14, tmp15);
241 DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2, filter0, tmp3,
242 filter0, tmp0, tmp1, tmp2, tmp3);
243 DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10, filter2,
244 tmp11, filter2, tmp8, tmp9, tmp10, tmp11);
245 DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5, filter1, tmp2,
246 tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1, tmp2, tmp3);
247 DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9, tmp13, filter3,
248 tmp10, tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5, tmp6,
249 tmp7);
250 DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3, tmp7,
251 tmp0, tmp1, tmp2, tmp3);
252 DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, dst2, dst3);
253 DUP2_ARG2(__lsx_vxori_b, dst2, 128, dst3, 128, dst2, dst3);
254 DUP2_ARG2(__lsx_vavgr_bu, dst0, dst2, dst1, dst3, dst0, dst1);
255 __lsx_vst(dst0, dst, 0);
256 __lsx_vstx(dst1, dst, dst_stride);
257 dst += dst_stride2;
258 }
259 }
260
common_hz_8t_and_aver_dst_32w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)261 static void common_hz_8t_and_aver_dst_32w_lsx(const uint8_t *src,
262 int32_t src_stride, uint8_t *dst,
263 int32_t dst_stride,
264 int8_t *filter, int32_t height) {
265 uint32_t loop_cnt = height;
266 uint8_t *dst_tmp = dst;
267 __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
268 __m128i mask0, mask1, mask2, mask3, dst0, dst1;
269 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
270 __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
271 __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
272
273 mask0 = __lsx_vld(mc_filt_mask_arr, 0);
274 src -= 3;
275 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
276 mask3 = __lsx_vaddi_bu(mask0, 6);
277 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
278 filter0, filter1, filter2, filter3);
279 for (; loop_cnt--;) {
280 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
281 src3 = __lsx_vld(src, 24);
282 src1 = __lsx_vshuf_b(src2, src0, shuff);
283 src += src_stride;
284 DUP2_ARG2(__lsx_vld, dst_tmp, 0, dst, 16, dst0, dst1);
285 dst_tmp += dst_stride;
286 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
287 src1, src2, src3);
288 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, src2, src2,
289 mask0, src3, src3, mask0, tmp0, tmp1, tmp2, tmp3);
290 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, src2, src2,
291 mask1, src3, src3, mask1, tmp4, tmp5, tmp6, tmp7);
292 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, src2, src2,
293 mask2, src3, src3, mask2, tmp8, tmp9, tmp10, tmp11);
294 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3, src2, src2,
295 mask3, src3, src3, mask3, tmp12, tmp13, tmp14, tmp15);
296 DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2, filter0, tmp3,
297 filter0, tmp0, tmp1, tmp2, tmp3);
298 DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10, filter2,
299 tmp11, filter2, tmp8, tmp9, tmp10, tmp11);
300 DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5, filter1, tmp2,
301 tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1, tmp2, tmp3);
302 DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9, tmp13, filter3,
303 tmp10, tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5, tmp6,
304 tmp7);
305 DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3, tmp7,
306 tmp0, tmp1, tmp2, tmp3);
307 DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
308 DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
309 DUP2_ARG2(__lsx_vavgr_bu, dst0, tmp0, dst1, tmp1, dst0, dst1);
310 __lsx_vst(dst0, dst, 0);
311 __lsx_vst(dst1, dst, 16);
312 dst += dst_stride;
313 }
314 }
315
common_hz_8t_and_aver_dst_64w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)316 static void common_hz_8t_and_aver_dst_64w_lsx(const uint8_t *src,
317 int32_t src_stride, uint8_t *dst,
318 int32_t dst_stride,
319 int8_t *filter, int32_t height) {
320 int32_t loop_cnt = height;
321 __m128i src0, src1, src2, src3;
322 __m128i filter0, filter1, filter2, filter3;
323 __m128i mask0, mask1, mask2, mask3;
324 __m128i out0, out1, out2, out3, dst0, dst1;
325 __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
326
327 mask0 = __lsx_vld(mc_filt_mask_arr, 0);
328 src -= 3;
329 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
330 mask3 = __lsx_vaddi_bu(mask0, 6);
331 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
332 filter0, filter1, filter2, filter3);
333 for (; loop_cnt--;) {
334 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
335 src3 = __lsx_vld(src, 24);
336 src1 = __lsx_vshuf_b(src2, src0, shuff);
337 DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
338 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
339 src1, src2, src3);
340 HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
341 mask3, filter0, filter1, filter2, filter3, out0,
342 out1, out2, out3);
343 DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
344 DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
345 DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
346 __lsx_vst(out0, dst, 0);
347 __lsx_vst(out1, dst, 16);
348
349 DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
350 src3 = __lsx_vld(src, 56);
351 src1 = __lsx_vshuf_b(src2, src0, shuff);
352 DUP2_ARG2(__lsx_vld, dst, 32, dst, 48, dst0, dst1);
353 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
354 src1, src2, src3);
355 HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
356 mask3, filter0, filter1, filter2, filter3, out0,
357 out1, out2, out3);
358 DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
359 DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
360 DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
361 __lsx_vst(out0, dst, 32);
362 __lsx_vst(out1, dst, 48);
363 src += src_stride;
364 dst += dst_stride;
365 }
366 }
367
common_hz_2t_and_aver_dst_4x4_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)368 static void common_hz_2t_and_aver_dst_4x4_lsx(const uint8_t *src,
369 int32_t src_stride, uint8_t *dst,
370 int32_t dst_stride,
371 int8_t *filter) {
372 __m128i src0, src1, src2, src3, mask;
373 __m128i dst0, dst1, dst2, dst3, vec0, vec1, filt0;
374 int32_t src_stride2 = src_stride << 1;
375 int32_t src_stride3 = src_stride2 + src_stride;
376 uint8_t *dst_tmp = dst;
377
378 mask = __lsx_vld(mc_filt_mask_arr, 16);
379
380 /* rearranging filter */
381 filt0 = __lsx_vldrepl_h(filter, 0);
382
383 src0 = __lsx_vld(src, 0);
384 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src1, src2);
385 src3 = __lsx_vldx(src, src_stride3);
386 dst0 = __lsx_vldrepl_w(dst_tmp, 0);
387 dst_tmp += dst_stride;
388 dst1 = __lsx_vldrepl_w(dst_tmp, 0);
389 dst_tmp += dst_stride;
390 dst2 = __lsx_vldrepl_w(dst_tmp, 0);
391 dst_tmp += dst_stride;
392 dst3 = __lsx_vldrepl_w(dst_tmp, 0);
393 dst_tmp += dst_stride;
394 DUP2_ARG2(__lsx_vilvl_w, dst1, dst0, dst3, dst2, dst0, dst1);
395 dst0 = __lsx_vilvl_d(dst1, dst0);
396 DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask, src3, src2, mask, vec0, vec1);
397 DUP2_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec0, vec1);
398 vec0 = __lsx_vssrarni_bu_h(vec1, vec0, FILTER_BITS);
399 vec0 = __lsx_vavgr_bu(vec0, dst0);
400 __lsx_vstelm_w(vec0, dst, 0, 0);
401 dst += dst_stride;
402 __lsx_vstelm_w(vec0, dst, 0, 1);
403 dst += dst_stride;
404 __lsx_vstelm_w(vec0, dst, 0, 2);
405 dst += dst_stride;
406 __lsx_vstelm_w(vec0, dst, 0, 3);
407 }
408
common_hz_2t_and_aver_dst_4x8_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)409 static void common_hz_2t_and_aver_dst_4x8_lsx(const uint8_t *src,
410 int32_t src_stride, uint8_t *dst,
411 int32_t dst_stride,
412 int8_t *filter) {
413 __m128i src0, src1, src2, src3, src4, src5, src6, src7, mask;
414 __m128i filt0, vec0, vec1, vec2, vec3, res0, res1, res2, res3;
415 __m128i dst0, dst1, dst2, dst3, dst4;
416 __m128i vec4, vec5, vec6, vec7;
417 int32_t src_stride2 = src_stride << 1;
418 int32_t src_stride3 = src_stride2 + src_stride;
419 int32_t src_stride4 = src_stride2 << 1;
420 uint8_t *src_tmp1 = (uint8_t *)src + src_stride4;
421 uint8_t *dst_tmp = dst;
422
423 mask = __lsx_vld(mc_filt_mask_arr, 16);
424
425 /* rearranging filter */
426 filt0 = __lsx_vldrepl_h(filter, 0);
427
428 src0 = __lsx_vld(src, 0);
429 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src1, src2);
430 src3 = __lsx_vldx(src, src_stride3);
431
432 src4 = __lsx_vld(src_tmp1, 0);
433 DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1, src_stride2, src5,
434 src6);
435 src7 = __lsx_vldx(src_tmp1, src_stride3);
436
437 dst0 = __lsx_vldrepl_w(dst_tmp, 0);
438 dst_tmp += dst_stride;
439 dst1 = __lsx_vldrepl_w(dst_tmp, 0);
440 dst_tmp += dst_stride;
441 dst2 = __lsx_vldrepl_w(dst_tmp, 0);
442 dst_tmp += dst_stride;
443 dst3 = __lsx_vldrepl_w(dst_tmp, 0);
444 dst_tmp += dst_stride;
445 DUP2_ARG2(__lsx_vilvl_w, dst1, dst0, dst3, dst2, dst0, dst1);
446 dst0 = __lsx_vilvl_d(dst1, dst0);
447
448 dst1 = __lsx_vldrepl_w(dst_tmp, 0);
449 dst_tmp += dst_stride;
450 dst2 = __lsx_vldrepl_w(dst_tmp, 0);
451 dst_tmp += dst_stride;
452 dst3 = __lsx_vldrepl_w(dst_tmp, 0);
453 dst_tmp += dst_stride;
454 dst4 = __lsx_vldrepl_w(dst_tmp, 0);
455 dst_tmp += dst_stride;
456 DUP2_ARG2(__lsx_vilvl_w, dst2, dst1, dst4, dst3, dst1, dst2);
457 dst1 = __lsx_vilvl_d(dst2, dst1);
458
459 DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask, src3, src2, mask, vec0, vec1);
460 DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask, src7, src6, mask, vec2, vec3);
461 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3, filt0,
462 vec4, vec5, vec6, vec7);
463 DUP4_ARG3(__lsx_vssrarni_bu_h, vec4, vec4, FILTER_BITS, vec5, vec5,
464 FILTER_BITS, vec6, vec6, FILTER_BITS, vec7, vec7, FILTER_BITS, res0,
465 res1, res2, res3);
466 DUP2_ARG2(__lsx_vilvl_d, res1, res0, res3, res2, res0, res2);
467 DUP2_ARG2(__lsx_vavgr_bu, res0, dst0, res2, dst1, res0, res2);
468
469 __lsx_vstelm_w(res0, dst, 0, 0);
470 dst += dst_stride;
471 __lsx_vstelm_w(res0, dst, 0, 1);
472 dst += dst_stride;
473 __lsx_vstelm_w(res0, dst, 0, 2);
474 dst += dst_stride;
475 __lsx_vstelm_w(res0, dst, 0, 3);
476 dst += dst_stride;
477
478 __lsx_vstelm_w(res2, dst, 0, 0);
479 dst += dst_stride;
480 __lsx_vstelm_w(res2, dst, 0, 1);
481 dst += dst_stride;
482 __lsx_vstelm_w(res2, dst, 0, 2);
483 dst += dst_stride;
484 __lsx_vstelm_w(res2, dst, 0, 3);
485 dst += dst_stride;
486 }
487
common_hz_2t_and_aver_dst_4w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)488 static void common_hz_2t_and_aver_dst_4w_lsx(const uint8_t *src,
489 int32_t src_stride, uint8_t *dst,
490 int32_t dst_stride, int8_t *filter,
491 int32_t height) {
492 if (height == 4) {
493 common_hz_2t_and_aver_dst_4x4_lsx(src, src_stride, dst, dst_stride, filter);
494 } else if (height == 8) {
495 common_hz_2t_and_aver_dst_4x8_lsx(src, src_stride, dst, dst_stride, filter);
496 }
497 }
498
common_hz_2t_and_aver_dst_8x4_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)499 static void common_hz_2t_and_aver_dst_8x4_lsx(const uint8_t *src,
500 int32_t src_stride, uint8_t *dst,
501 int32_t dst_stride,
502 int8_t *filter) {
503 __m128i src0, src1, src2, src3, mask;
504 __m128i filt0, dst0, dst1, dst2, dst3;
505 __m128i vec0, vec1, vec2, vec3;
506 int32_t src_stride2 = src_stride << 1;
507 int32_t src_stride3 = src_stride2 + src_stride;
508 uint8_t *dst_tmp = dst;
509
510 mask = __lsx_vld(mc_filt_mask_arr, 0);
511
512 /* rearranging filter */
513 filt0 = __lsx_vldrepl_h(filter, 0);
514
515 src0 = __lsx_vld(src, 0);
516 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src1, src2);
517 src3 = __lsx_vldx(src, src_stride3);
518 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
519 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
520 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3, filt0,
521 vec0, vec1, vec2, vec3);
522 DUP2_ARG3(__lsx_vssrarni_bu_h, vec1, vec0, FILTER_BITS, vec3, vec2,
523 FILTER_BITS, vec0, vec1);
524 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
525 dst_tmp += dst_stride;
526 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
527 dst_tmp += dst_stride;
528 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
529 dst_tmp += dst_stride;
530 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
531 dst_tmp += dst_stride;
532
533 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
534 DUP2_ARG2(__lsx_vavgr_bu, vec0, dst0, vec1, dst1, vec0, vec1);
535 __lsx_vstelm_d(vec0, dst, 0, 0);
536 dst += dst_stride;
537 __lsx_vstelm_d(vec0, dst, 0, 1);
538 dst += dst_stride;
539 __lsx_vstelm_d(vec1, dst, 0, 0);
540 dst += dst_stride;
541 __lsx_vstelm_d(vec1, dst, 0, 1);
542 }
543
common_hz_2t_and_aver_dst_8x8mult_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)544 static void common_hz_2t_and_aver_dst_8x8mult_lsx(
545 const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
546 int8_t *filter, int32_t height) {
547 __m128i src0, src1, src2, src3, mask;
548 __m128i filt0, dst0, dst1, dst2, dst3;
549 __m128i vec0, vec1, vec2, vec3;
550 int32_t src_stride2 = src_stride << 1;
551 int32_t src_stride3 = src_stride2 + src_stride;
552 uint8_t *dst_tmp = dst;
553
554 mask = __lsx_vld(mc_filt_mask_arr, 0);
555
556 /* rearranging filter */
557 filt0 = __lsx_vldrepl_h(filter, 0);
558
559 LSX_LD_4(src, src_stride, src0, src1, src2, src3);
560 src += src_stride;
561
562 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
563 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
564 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3, filt0,
565 vec0, vec1, vec2, vec3);
566 DUP2_ARG3(__lsx_vssrarni_bu_h, vec1, vec0, FILTER_BITS, vec3, vec2,
567 FILTER_BITS, vec0, vec2);
568 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
569 dst_tmp += dst_stride;
570 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
571 dst_tmp += dst_stride;
572 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
573 dst_tmp += dst_stride;
574 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
575 dst_tmp += dst_stride;
576 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
577
578 DUP2_ARG2(__lsx_vavgr_bu, vec0, dst0, vec2, dst1, vec0, vec2);
579 __lsx_vstelm_d(vec0, dst, 0, 0);
580 dst += dst_stride;
581 __lsx_vstelm_d(vec0, dst, 0, 1);
582 dst += dst_stride;
583 __lsx_vstelm_d(vec2, dst, 0, 0);
584 dst += dst_stride;
585 __lsx_vstelm_d(vec2, dst, 0, 1);
586 dst += dst_stride;
587
588 LSX_LD_4(src, src_stride, src0, src1, src2, src3);
589 src += src_stride;
590 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
591 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
592
593 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3, filt0,
594 vec0, vec1, vec2, vec3);
595 DUP2_ARG3(__lsx_vssrarni_bu_h, vec1, vec0, FILTER_BITS, vec3, vec2,
596 FILTER_BITS, vec0, vec2);
597 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
598 dst_tmp += dst_stride;
599 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
600 dst_tmp += dst_stride;
601 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
602 dst_tmp += dst_stride;
603 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
604 dst_tmp += dst_stride;
605 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
606 DUP2_ARG2(__lsx_vavgr_bu, vec0, dst0, vec2, dst1, vec0, vec2);
607 __lsx_vstelm_d(vec0, dst, 0, 0);
608 dst += dst_stride;
609 __lsx_vstelm_d(vec0, dst, 0, 1);
610 dst += dst_stride;
611 __lsx_vstelm_d(vec2, dst, 0, 0);
612 dst += dst_stride;
613 __lsx_vstelm_d(vec2, dst, 0, 1);
614 dst += dst_stride;
615
616 if (height == 16) {
617 LSX_LD_4(src, src_stride, src0, src1, src2, src3);
618 src += src_stride;
619
620 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
621 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
622 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3,
623 filt0, vec0, vec1, vec2, vec3);
624 DUP2_ARG3(__lsx_vssrarni_bu_h, vec1, vec0, FILTER_BITS, vec3, vec2,
625 FILTER_BITS, vec0, vec2);
626 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
627 dst_tmp += dst_stride;
628 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
629 dst_tmp += dst_stride;
630 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
631 dst_tmp += dst_stride;
632 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
633 dst_tmp += dst_stride;
634 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
635 DUP2_ARG2(__lsx_vavgr_bu, vec0, dst0, vec2, dst1, vec0, vec2);
636 __lsx_vstelm_d(vec0, dst, 0, 0);
637 dst += dst_stride;
638 __lsx_vstelm_d(vec0, dst, 0, 1);
639 dst += dst_stride;
640 __lsx_vstelm_d(vec2, dst, 0, 0);
641 dst += dst_stride;
642 __lsx_vstelm_d(vec2, dst, 0, 1);
643 dst += dst_stride;
644
645 src0 = __lsx_vld(src, 0);
646 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src1, src2);
647 src3 = __lsx_vldx(src, src_stride3);
648 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
649 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
650 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3,
651 filt0, vec0, vec1, vec2, vec3);
652 DUP2_ARG3(__lsx_vssrarni_bu_h, vec1, vec0, FILTER_BITS, vec3, vec2,
653 FILTER_BITS, vec0, vec2);
654 dst0 = __lsx_vldrepl_d(dst_tmp, 0);
655 dst_tmp += dst_stride;
656 dst1 = __lsx_vldrepl_d(dst_tmp, 0);
657 dst_tmp += dst_stride;
658 dst2 = __lsx_vldrepl_d(dst_tmp, 0);
659 dst_tmp += dst_stride;
660 dst3 = __lsx_vldrepl_d(dst_tmp, 0);
661 dst_tmp += dst_stride;
662 DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
663 DUP2_ARG2(__lsx_vavgr_bu, vec0, dst0, vec2, dst1, vec0, vec2);
664 __lsx_vstelm_d(vec0, dst, 0, 0);
665 dst += dst_stride;
666 __lsx_vstelm_d(vec0, dst, 0, 1);
667 dst += dst_stride;
668 __lsx_vstelm_d(vec2, dst, 0, 0);
669 dst += dst_stride;
670 __lsx_vstelm_d(vec2, dst, 0, 1);
671 dst += dst_stride;
672 }
673 }
674
common_hz_2t_and_aver_dst_8w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)675 static void common_hz_2t_and_aver_dst_8w_lsx(const uint8_t *src,
676 int32_t src_stride, uint8_t *dst,
677 int32_t dst_stride, int8_t *filter,
678 int32_t height) {
679 if (height == 4) {
680 common_hz_2t_and_aver_dst_8x4_lsx(src, src_stride, dst, dst_stride, filter);
681 } else {
682 common_hz_2t_and_aver_dst_8x8mult_lsx(src, src_stride, dst, dst_stride,
683 filter, height);
684 }
685 }
686
common_hz_2t_and_aver_dst_16w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)687 static void common_hz_2t_and_aver_dst_16w_lsx(const uint8_t *src,
688 int32_t src_stride, uint8_t *dst,
689 int32_t dst_stride,
690 int8_t *filter, int32_t height) {
691 uint32_t loop_cnt = (height >> 2) - 1;
692 __m128i src0, src1, src2, src3, src4, src5, src6, src7, mask;
693 __m128i filt0, dst0;
694 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
695 __m128i res0, res1, res2, res3, res4, res5, res6, res7;
696 int32_t src_stride2 = src_stride << 1;
697 int32_t src_stride3 = src_stride2 + src_stride;
698 int32_t src_stride4 = src_stride2 << 1;
699 uint8_t *src_tmp1 = (uint8_t *)src + 8;
700
701 mask = __lsx_vld(mc_filt_mask_arr, 0);
702
703 /* rearranging filter */
704 filt0 = __lsx_vldrepl_h(filter, 0);
705
706 src0 = __lsx_vld(src, 0);
707 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src2, src4);
708 src6 = __lsx_vldx(src, src_stride3);
709 src += src_stride4;
710
711 src1 = __lsx_vld(src_tmp1, 0);
712 DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1, src_stride2, src3,
713 src5);
714 src7 = __lsx_vldx(src_tmp1, src_stride3);
715 src_tmp1 += src_stride4;
716
717 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
718 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
719 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask, src5, src5, mask, vec4, vec5);
720 DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask, src7, src7, mask, vec6, vec7);
721
722 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3, filt0,
723 res0, res1, res2, res3);
724 DUP4_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, vec6, filt0, vec7, filt0,
725 res4, res5, res6, res7);
726 DUP4_ARG3(__lsx_vssrarni_bu_h, res1, res0, FILTER_BITS, res3, res2,
727 FILTER_BITS, res5, res4, FILTER_BITS, res7, res6, FILTER_BITS, res0,
728 res2, res4, res6);
729 dst0 = __lsx_vld(dst, 0);
730 res0 = __lsx_vavgr_bu(res0, dst0);
731 __lsx_vst(res0, dst, 0);
732 dst += dst_stride;
733
734 dst0 = __lsx_vld(dst, 0);
735 res2 = __lsx_vavgr_bu(res2, dst0);
736 __lsx_vst(res2, dst, 0);
737 dst += dst_stride;
738
739 dst0 = __lsx_vld(dst, 0);
740 res4 = __lsx_vavgr_bu(res4, dst0);
741 __lsx_vst(res4, dst, 0);
742 dst += dst_stride;
743
744 dst0 = __lsx_vld(dst, 0);
745 res6 = __lsx_vavgr_bu(res6, dst0);
746 __lsx_vst(res6, dst, 0);
747 dst += dst_stride;
748
749 for (; loop_cnt--;) {
750 src0 = __lsx_vld(src, 0);
751 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride2, src2, src4);
752 src6 = __lsx_vldx(src, src_stride3);
753 src += src_stride4;
754
755 src1 = __lsx_vld(src_tmp1, 0);
756 DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1, src_stride2, src3,
757 src5);
758 src7 = __lsx_vldx(src_tmp1, src_stride3);
759 src_tmp1 += src_stride4;
760
761 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
762 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
763 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask, src5, src5, mask, vec4, vec5);
764 DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask, src7, src7, mask, vec6, vec7);
765 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3,
766 filt0, res0, res1, res2, res3);
767 DUP4_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, vec6, filt0, vec7,
768 filt0, res4, res5, res6, res7);
769
770 DUP4_ARG3(__lsx_vssrarni_bu_h, res1, res0, FILTER_BITS, res3, res2,
771 FILTER_BITS, res5, res4, FILTER_BITS, res7, res6, FILTER_BITS,
772 res0, res2, res4, res6);
773 dst0 = __lsx_vld(dst, 0);
774 res0 = __lsx_vavgr_bu(res0, dst0);
775 __lsx_vst(res0, dst, 0);
776 dst += dst_stride;
777
778 dst0 = __lsx_vld(dst, 0);
779 res2 = __lsx_vavgr_bu(res2, dst0);
780 __lsx_vst(res2, dst, 0);
781 dst += dst_stride;
782
783 dst0 = __lsx_vld(dst, 0);
784 res4 = __lsx_vavgr_bu(res4, dst0);
785 __lsx_vst(res4, dst, 0);
786 dst += dst_stride;
787
788 dst0 = __lsx_vld(dst, 0);
789 res6 = __lsx_vavgr_bu(res6, dst0);
790 __lsx_vst(res6, dst, 0);
791 dst += dst_stride;
792 }
793 }
794
common_hz_2t_and_aver_dst_32w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)795 static void common_hz_2t_and_aver_dst_32w_lsx(const uint8_t *src,
796 int32_t src_stride, uint8_t *dst,
797 int32_t dst_stride,
798 int8_t *filter, int32_t height) {
799 uint32_t loop_cnt = (height >> 1);
800 __m128i src0, src1, src2, src3, src4, src5, src6, src7, mask;
801 __m128i filt0, dst0, dst1;
802 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
803 __m128i res0, res1, res2, res3, res4, res5, res6, res7;
804 __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
805
806 mask = __lsx_vld(mc_filt_mask_arr, 0);
807
808 /* rearranging filter */
809 filt0 = __lsx_vldrepl_h(filter, 0);
810
811 for (; loop_cnt--;) {
812 src0 = __lsx_vld(src, 0);
813 DUP2_ARG2(__lsx_vld, src, 16, src, 24, src2, src3);
814 src1 = __lsx_vshuf_b(src2, src0, shuff);
815 src += src_stride;
816 src4 = __lsx_vld(src, 0);
817 DUP2_ARG2(__lsx_vld, src, 16, src, 24, src6, src7);
818 src5 = __lsx_vshuf_b(src6, src4, shuff);
819 src += src_stride;
820
821 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
822 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
823 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask, src5, src5, mask, vec4, vec5);
824 DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask, src7, src7, mask, vec6, vec7);
825
826 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3,
827 filt0, res0, res1, res2, res3);
828 DUP4_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, vec6, filt0, vec7,
829 filt0, res4, res5, res6, res7);
830 DUP4_ARG3(__lsx_vssrarni_bu_h, res1, res0, FILTER_BITS, res3, res2,
831 FILTER_BITS, res5, res4, FILTER_BITS, res7, res6, FILTER_BITS,
832 res0, res2, res4, res6);
833
834 DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
835 res0 = __lsx_vavgr_bu(res0, dst0);
836 __lsx_vst(res0, dst, 0);
837 res2 = __lsx_vavgr_bu(res2, dst1);
838 __lsx_vst(res2, dst, 16);
839 dst += dst_stride;
840
841 DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
842 res4 = __lsx_vavgr_bu(res4, dst0);
843 __lsx_vst(res4, dst, 0);
844 res6 = __lsx_vavgr_bu(res6, dst1);
845 __lsx_vst(res6, dst, 16);
846 dst += dst_stride;
847 }
848 }
849
common_hz_2t_and_aver_dst_64w_lsx(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)850 static void common_hz_2t_and_aver_dst_64w_lsx(const uint8_t *src,
851 int32_t src_stride, uint8_t *dst,
852 int32_t dst_stride,
853 int8_t *filter, int32_t height) {
854 uint32_t loop_cnt = height;
855 __m128i src0, src1, src2, src3, src4, src5, src6, src7, mask;
856 __m128i filt0, dst0, dst1, dst2, dst3;
857 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
858 __m128i out0, out1, out2, out3, out4, out5, out6, out7;
859 __m128i shuff = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
860
861 mask = __lsx_vld(mc_filt_mask_arr, 0);
862
863 /* rearranging filter */
864 filt0 = __lsx_vldrepl_h(filter, 0);
865
866 for (; loop_cnt--;) {
867 DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, src0, src2, src4,
868 src6);
869 src7 = __lsx_vld(src, 56);
870 DUP2_ARG3(__lsx_vshuf_b, src2, src0, shuff, src4, src2, shuff, src1, src3);
871 src5 = __lsx_vshuf_b(src6, src4, shuff);
872 src += src_stride;
873
874 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask, src1, src1, mask, vec0, vec1);
875 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask, src3, src3, mask, vec2, vec3);
876 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask, src5, src5, mask, vec4, vec5);
877 DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask, src7, src7, mask, vec6, vec7);
878 DUP4_ARG2(__lsx_vdp2_h_bu, vec0, filt0, vec1, filt0, vec2, filt0, vec3,
879 filt0, out0, out1, out2, out3);
880 DUP4_ARG2(__lsx_vdp2_h_bu, vec4, filt0, vec5, filt0, vec6, filt0, vec7,
881 filt0, out4, out5, out6, out7);
882
883 DUP4_ARG3(__lsx_vssrarni_bu_h, out1, out0, FILTER_BITS, out3, out2,
884 FILTER_BITS, out5, out4, FILTER_BITS, out7, out6, FILTER_BITS,
885 out0, out2, out4, out6);
886
887 DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, dst0, dst1, dst2,
888 dst3);
889 out0 = __lsx_vavgr_bu(out0, dst0);
890 __lsx_vst(out0, dst, 0);
891 out2 = __lsx_vavgr_bu(out2, dst1);
892 __lsx_vst(out2, dst, 16);
893 out4 = __lsx_vavgr_bu(out4, dst2);
894 __lsx_vst(out4, dst, 32);
895 out6 = __lsx_vavgr_bu(out6, dst3);
896 __lsx_vst(out6, dst, 48);
897 dst += dst_stride;
898 }
899 }
900
vpx_convolve8_avg_horiz_lsx(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)901 void vpx_convolve8_avg_horiz_lsx(const uint8_t *src, ptrdiff_t src_stride,
902 uint8_t *dst, ptrdiff_t dst_stride,
903 const InterpKernel *filter, int x0_q4,
904 int x_step_q4, int y0_q4, int y_step_q4, int w,
905 int h) {
906 const int16_t *const filter_x = filter[x0_q4];
907 int8_t cnt, filt_hor[8];
908
909 assert(x_step_q4 == 16);
910 assert(((const int32_t *)filter_x)[1] != 0x800000);
911
912 for (cnt = 0; cnt < 8; ++cnt) {
913 filt_hor[cnt] = filter_x[cnt];
914 }
915
916 if (vpx_get_filter_taps(filter_x) == 2) {
917 switch (w) {
918 case 4:
919 common_hz_2t_and_aver_dst_4w_lsx(src, (int32_t)src_stride, dst,
920 (int32_t)dst_stride, &filt_hor[3], h);
921 break;
922 case 8:
923 common_hz_2t_and_aver_dst_8w_lsx(src, (int32_t)src_stride, dst,
924 (int32_t)dst_stride, &filt_hor[3], h);
925 break;
926 case 16:
927 common_hz_2t_and_aver_dst_16w_lsx(src, (int32_t)src_stride, dst,
928 (int32_t)dst_stride, &filt_hor[3], h);
929 break;
930
931 case 32:
932 common_hz_2t_and_aver_dst_32w_lsx(src, (int32_t)src_stride, dst,
933 (int32_t)dst_stride, &filt_hor[3], h);
934 break;
935 case 64:
936 common_hz_2t_and_aver_dst_64w_lsx(src, (int32_t)src_stride, dst,
937 (int32_t)dst_stride, &filt_hor[3], h);
938 break;
939 default:
940 vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter,
941 x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
942 break;
943 }
944 } else {
945 switch (w) {
946 case 4:
947 common_hz_8t_and_aver_dst_4w_lsx(src, (int32_t)src_stride, dst,
948 (int32_t)dst_stride, filt_hor, h);
949 break;
950 case 8:
951 common_hz_8t_and_aver_dst_8w_lsx(src, (int32_t)src_stride, dst,
952 (int32_t)dst_stride, filt_hor, h);
953 break;
954 case 16:
955 common_hz_8t_and_aver_dst_16w_lsx(src, (int32_t)src_stride, dst,
956 (int32_t)dst_stride, filt_hor, h);
957 break;
958 case 32:
959 common_hz_8t_and_aver_dst_32w_lsx(src, (int32_t)src_stride, dst,
960 (int32_t)dst_stride, filt_hor, h);
961 break;
962 case 64:
963 common_hz_8t_and_aver_dst_64w_lsx(src, (int32_t)src_stride, dst,
964 (int32_t)dst_stride, filt_hor, h);
965 break;
966 default:
967 vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter,
968 x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
969 break;
970 }
971 }
972 }
973