1 /*
2 * Copyright (c) 2022 Loongson Technology Corporation Limited
3 * Contributed by Lu Wang <wanglu@loongson.cn>
4 * Hao Chen <chenhao@loongson.cn>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavutil/loongarch/loongson_intrinsics.h"
24 #include "hevcdsp_lsx.h"
25
26 static const uint8_t ff_hevc_mask_arr[16 * 2] __attribute__((aligned(0x40))) = {
27 /* 8 width cases */
28 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
29 /* 4 width cases */
30 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
31 };
32
33 static av_always_inline
hevc_hv_8t_8x2_lsx(uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter_x,const int8_t * filter_y,int32_t height,int32_t weight,int32_t offset,int32_t rnd_val,int32_t width)34 void hevc_hv_8t_8x2_lsx(uint8_t *src, int32_t src_stride, uint8_t *dst,
35 int32_t dst_stride, const int8_t *filter_x,
36 const int8_t *filter_y, int32_t height, int32_t weight,
37 int32_t offset, int32_t rnd_val, int32_t width)
38 {
39 uint32_t loop_cnt, cnt;
40 uint8_t *src_tmp;
41 uint8_t *dst_tmp;
42 const int32_t src_stride_2x = (src_stride << 1);
43 const int32_t dst_stride_2x = (dst_stride << 1);
44 const int32_t src_stride_4x = (src_stride << 2);
45 const int32_t src_stride_3x = src_stride_2x + src_stride;
46
47 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
48 __m128i filt0, filt1, filt2, filt3;
49 __m128i filt_h0, filt_h1, filt_h2, filt_h3;
50 __m128i mask1, mask2, mask3;
51 __m128i filter_vec;
52 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
53 __m128i vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
54 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
55 __m128i dst0_r, dst0_l, dst1_r, dst1_l;
56 __m128i dst10_r, dst32_r, dst54_r, dst76_r;
57 __m128i dst10_l, dst32_l, dst54_l, dst76_l;
58 __m128i dst21_r, dst43_r, dst65_r, dst87_r;
59 __m128i dst21_l, dst43_l, dst65_l, dst87_l;
60 __m128i weight_vec, offset_vec, rnd_vec;
61 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
62
63 src -= (src_stride_3x + 3);
64 weight_vec = __lsx_vreplgr2vr_w(weight);
65 offset_vec = __lsx_vreplgr2vr_w(offset);
66 rnd_vec = __lsx_vreplgr2vr_w(rnd_val);
67
68 DUP4_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filter_x, 4,
69 filter_x, 6, filt0, filt1, filt2, filt3);
70 filter_vec = __lsx_vld(filter_y, 0);
71 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
72 DUP4_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filter_vec, 2,
73 filter_vec, 3, filt_h0, filt_h1, filt_h2, filt_h3);
74 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
75 mask3 = __lsx_vaddi_bu(mask0, 6);
76
77 for (cnt = width >> 3; cnt--;) {
78 src_tmp = src;
79 dst_tmp = dst;
80
81 src0 = __lsx_vld(src_tmp, 0);
82 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
83 src1, src2);
84 src3 = __lsx_vldx(src_tmp, src_stride_3x);
85 src_tmp += src_stride_4x;
86 src4 = __lsx_vld(src_tmp, 0);
87 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
88 src5, src6);
89 src_tmp += src_stride_3x;
90
91 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src0,
92 src0, mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
93 DUP4_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, src1,
94 src1, mask2, src1, src1, mask3, vec4, vec5, vec6, vec7);
95 DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src2,
96 src2, mask2, src2, src2, mask3, vec8, vec9, vec10, vec11);
97 DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src3,
98 src3, mask2, src3, src3, mask3, vec12, vec13, vec14, vec15);
99 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec4, filt0, vec8, filt0,
100 vec12, filt0, dst0, dst1, dst2, dst3);
101 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec5, filt1,
102 dst2, vec9, filt1, dst3, vec13, filt1, dst0, dst1, dst2, dst3);
103 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec2, filt2, dst1, vec6, filt2,
104 dst2, vec10, filt2, dst3, vec14, filt2, dst0, dst1, dst2, dst3);
105 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec3, filt3, dst1, vec7, filt3,
106 dst2, vec11, filt3, dst3, vec15, filt3, dst0, dst1, dst2, dst3);
107 DUP4_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, src4,
108 src4, mask2, src4, src4, mask3, vec0, vec1, vec2, vec3);
109 DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src5,
110 src5, mask2, src5, src5, mask3, vec4, vec5, vec6, vec7);
111 DUP4_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, src6,
112 src6, mask2, src6, src6, mask3, vec8, vec9, vec10, vec11);
113 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec4, filt0, dst4, dst5);
114 dst6 = __lsx_vdp2_h_bu_b(vec8, filt0);
115 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec1, filt1, dst5, vec5, filt1,
116 dst6, vec9, filt1, dst4, vec2, filt2, dst4, dst5, dst6, dst4);
117 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst5, vec6, filt2, dst6, vec10, filt2,
118 dst4, vec3, filt3, dst5, vec7, filt3, dst5, dst6, dst4, dst5);
119 dst6 = __lsx_vdp2add_h_bu_b(dst6, vec11, filt3);
120
121 DUP4_ARG2(__lsx_vilvl_h, dst1, dst0, dst3, dst2, dst5, dst4, dst2,
122 dst1, dst10_r, dst32_r, dst54_r, dst21_r);
123 DUP2_ARG2(__lsx_vilvl_h, dst4, dst3, dst6, dst5, dst43_r, dst65_r);
124 DUP4_ARG2(__lsx_vilvh_h, dst1, dst0, dst3, dst2, dst5, dst4, dst2,
125 dst1, dst10_l, dst32_l, dst54_l, dst21_l);
126 DUP2_ARG2(__lsx_vilvh_h, dst4, dst3, dst6, dst5, dst43_l, dst65_l);
127
128 for (loop_cnt = height >> 1; loop_cnt--;) {
129 src7 = __lsx_vld(src_tmp, 0);
130 src8 = __lsx_vldx(src_tmp, src_stride);
131 src_tmp += src_stride_2x;
132 DUP4_ARG3(__lsx_vshuf_b, src7, src7, mask0, src7, src7, mask1, src7,
133 src7, mask2, src7, src7, mask3, vec0, vec1, vec2, vec3);
134 dst7 = __lsx_vdp2_h_bu_b(vec0, filt0);
135 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst7, vec1, filt1, dst7, vec2,
136 filt2, dst7, dst7);
137 dst7 = __lsx_vdp2add_h_bu_b(dst7, vec3, filt3);
138 dst76_r = __lsx_vilvl_h(dst7, dst6);
139 dst76_l = __lsx_vilvh_h(dst7, dst6);
140 DUP2_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0,
141 dst0_r, dst0_l);
142 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
143 dst32_l, filt_h1, dst0_r, dst54_r, filt_h2, dst0_l,
144 dst54_l, filt_h2, dst0_r, dst0_l, dst0_r, dst0_l);
145 DUP2_ARG3(__lsx_vdp2add_w_h, dst0_r, dst76_r, filt_h3, dst0_l,
146 dst76_l, filt_h3, dst0_r, dst0_l);
147 DUP2_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst0_r, dst0_l);
148
149 /* row 8 */
150 DUP4_ARG3(__lsx_vshuf_b, src8, src8, mask0, src8, src8, mask1, src8,
151 src8, mask2, src8, src8, mask3, vec0, vec1, vec2, vec3);
152 dst8 = __lsx_vdp2_h_bu_b(vec0, filt0);
153 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst8, vec1, filt1, dst8, vec2,
154 filt2, dst8, dst8);
155 dst8 = __lsx_vdp2add_h_bu_b(dst8, vec3, filt3);
156
157 dst87_r = __lsx_vilvl_h(dst8, dst7);
158 dst87_l = __lsx_vilvh_h(dst8, dst7);
159 DUP2_ARG2(__lsx_vdp2_w_h, dst21_r, filt_h0, dst21_l, filt_h0,
160 dst1_r, dst1_l);
161 DUP4_ARG3(__lsx_vdp2add_w_h, dst1_r, dst43_r, filt_h1, dst1_l,
162 dst43_l, filt_h1, dst1_r, dst65_r, filt_h2, dst1_l,
163 dst65_l, filt_h2, dst1_r, dst1_l, dst1_r, dst1_l);
164 DUP2_ARG3(__lsx_vdp2add_w_h, dst1_r, dst87_r, filt_h3, dst1_l,
165 dst87_l, filt_h3, dst1_r, dst1_l);
166 DUP2_ARG2(__lsx_vsrai_w, dst1_r, 6, dst1_l, 6, dst1_r, dst1_l);
167
168 DUP2_ARG2(__lsx_vmul_w, dst0_r, weight_vec, dst0_l, weight_vec,
169 dst0_r, dst0_l);
170 DUP2_ARG2(__lsx_vmul_w, dst1_r, weight_vec, dst1_l, weight_vec,
171 dst1_r, dst1_l);
172 DUP4_ARG2(__lsx_vsrar_w, dst0_r, rnd_vec, dst1_r, rnd_vec, dst0_l,
173 rnd_vec, dst1_l, rnd_vec, dst0_r, dst1_r, dst0_l, dst1_l);
174
175 DUP2_ARG2(__lsx_vadd_w, dst0_r, offset_vec, dst0_l, offset_vec,
176 dst0_r, dst0_l);
177 DUP2_ARG2(__lsx_vadd_w, dst1_r, offset_vec, dst1_l, offset_vec,
178 dst1_r, dst1_l);
179 DUP4_ARG1(__lsx_vclip255_w, dst0_r, dst1_r, dst0_l, dst1_l, dst0_r,
180 dst1_r, dst0_l, dst1_l);
181 DUP2_ARG2(__lsx_vpickev_h, dst0_l, dst0_r, dst1_l, dst1_r,
182 dst0_r, dst1_r);
183 dst0_r = __lsx_vpickev_b(dst1_r, dst0_r);
184
185 __lsx_vstelm_d(dst0_r, dst_tmp, 0, 0);
186 __lsx_vstelm_d(dst0_r, dst_tmp + dst_stride, 0, 1);
187 dst_tmp += dst_stride_2x;
188
189 dst10_r = dst32_r;
190 dst32_r = dst54_r;
191 dst54_r = dst76_r;
192 dst10_l = dst32_l;
193 dst32_l = dst54_l;
194 dst54_l = dst76_l;
195 dst21_r = dst43_r;
196 dst43_r = dst65_r;
197 dst65_r = dst87_r;
198 dst21_l = dst43_l;
199 dst43_l = dst65_l;
200 dst65_l = dst87_l;
201 dst6 = dst8;
202 }
203
204 src += 8;
205 dst += 8;
206 }
207 }
208
209 static
hevc_hv_8t_8w_lsx(uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter_x,const int8_t * filter_y,int32_t height,int32_t weight,int32_t offset,int32_t rnd_val)210 void hevc_hv_8t_8w_lsx(uint8_t *src, int32_t src_stride, uint8_t *dst,
211 int32_t dst_stride, const int8_t *filter_x,
212 const int8_t *filter_y, int32_t height, int32_t weight,
213 int32_t offset, int32_t rnd_val)
214 {
215 hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x,
216 filter_y, height, weight, offset, rnd_val, 8);
217 }
218
219 static
hevc_hv_8t_16w_lsx(uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter_x,const int8_t * filter_y,int32_t height,int32_t weight,int32_t offset,int32_t rnd_val)220 void hevc_hv_8t_16w_lsx(uint8_t *src, int32_t src_stride, uint8_t *dst,
221 int32_t dst_stride, const int8_t *filter_x,
222 const int8_t *filter_y, int32_t height, int32_t weight,
223 int32_t offset, int32_t rnd_val)
224 {
225 hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x,
226 filter_y, height, weight, offset, rnd_val, 16);
227 }
228
229 static
hevc_hv_8t_24w_lsx(uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter_x,const int8_t * filter_y,int32_t height,int32_t weight,int32_t offset,int32_t rnd_val)230 void hevc_hv_8t_24w_lsx(uint8_t *src, int32_t src_stride, uint8_t *dst,
231 int32_t dst_stride, const int8_t *filter_x,
232 const int8_t *filter_y, int32_t height, int32_t weight,
233 int32_t offset, int32_t rnd_val)
234 {
235 hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x,
236 filter_y, height, weight, offset, rnd_val, 24);
237 }
238
239 static
hevc_hv_8t_32w_lsx(uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter_x,const int8_t * filter_y,int32_t height,int32_t weight,int32_t offset,int32_t rnd_val)240 void hevc_hv_8t_32w_lsx(uint8_t *src, int32_t src_stride, uint8_t *dst,
241 int32_t dst_stride, const int8_t *filter_x,
242 const int8_t *filter_y, int32_t height, int32_t weight,
243 int32_t offset, int32_t rnd_val)
244 {
245 hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x,
246 filter_y, height, weight, offset, rnd_val, 32);
247 }
248
249 static
hevc_hv_8t_48w_lsx(uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter_x,const int8_t * filter_y,int32_t height,int32_t weight,int32_t offset,int32_t rnd_val)250 void hevc_hv_8t_48w_lsx(uint8_t *src, int32_t src_stride, uint8_t *dst,
251 int32_t dst_stride, const int8_t *filter_x,
252 const int8_t *filter_y, int32_t height, int32_t weight,
253 int32_t offset, int32_t rnd_val)
254 {
255 hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x,
256 filter_y, height, weight, offset, rnd_val, 48);
257 }
258
259 static
hevc_hv_8t_64w_lsx(uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int8_t * filter_x,const int8_t * filter_y,int32_t height,int32_t weight,int32_t offset,int32_t rnd_val)260 void hevc_hv_8t_64w_lsx(uint8_t *src, int32_t src_stride, uint8_t *dst,
261 int32_t dst_stride, const int8_t *filter_x,
262 const int8_t *filter_y, int32_t height, int32_t weight,
263 int32_t offset, int32_t rnd_val)
264 {
265 hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x,
266 filter_y, height, weight, offset, rnd_val, 64);
267 }
268
269
270 #define UNI_W_MC_HV(PEL, WIDTH, TAP) \
271 void ff_hevc_put_hevc_uni_w_##PEL##_hv##WIDTH##_8_lsx(uint8_t *dst, \
272 ptrdiff_t dst_stride, \
273 uint8_t *src, \
274 ptrdiff_t src_stride, \
275 int height, \
276 int denom, \
277 int weight, \
278 int offset, \
279 intptr_t mx, \
280 intptr_t my, \
281 int width) \
282 { \
283 const int8_t *filter_x = ff_hevc_##PEL##_filters[mx - 1]; \
284 const int8_t *filter_y = ff_hevc_##PEL##_filters[my - 1]; \
285 int shift = denom + 14 - 8; \
286 \
287 hevc_hv_##TAP##t_##WIDTH##w_lsx(src, src_stride, dst, dst_stride, filter_x,\
288 filter_y, height, weight, offset, shift); \
289 }
290
291 UNI_W_MC_HV(qpel, 8, 8);
292 UNI_W_MC_HV(qpel, 16, 8);
293 UNI_W_MC_HV(qpel, 24, 8);
294 UNI_W_MC_HV(qpel, 32, 8);
295 UNI_W_MC_HV(qpel, 48, 8);
296 UNI_W_MC_HV(qpel, 64, 8);
297
298 #undef UNI_W_MC_HV
299