1 /*
2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <stdlib.h>
12
13 #include "./macros_msa.h"
14
15 extern const int16_t vpx_rv[];
16
17 #define VPX_TRANSPOSE8x16_UB_UB( \
18 in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, \
19 out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15) \
20 { \
21 v8i16 temp0, temp1, temp2, temp3, temp4; \
22 v8i16 temp5, temp6, temp7, temp8, temp9; \
23 \
24 ILVR_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \
25 temp3); \
26 ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
27 ILVRL_W2_SH(temp5, temp4, temp6, temp7); \
28 ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
29 ILVRL_W2_SH(temp5, temp4, temp8, temp9); \
30 ILVL_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \
31 temp3); \
32 ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
33 ILVRL_W2_UB(temp5, temp4, out8, out10); \
34 ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
35 ILVRL_W2_UB(temp5, temp4, out12, out14); \
36 out0 = (v16u8)temp6; \
37 out2 = (v16u8)temp7; \
38 out4 = (v16u8)temp8; \
39 out6 = (v16u8)temp9; \
40 out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \
41 out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \
42 out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \
43 out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \
44 out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
45 out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
46 out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \
47 out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \
48 }
49
50 #define VPX_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
51 ref, out) \
52 { \
53 v16u8 temp0, temp1; \
54 \
55 temp1 = __msa_aver_u_b(above2_in, above1_in); \
56 temp0 = __msa_aver_u_b(below2_in, below1_in); \
57 temp1 = __msa_aver_u_b(temp1, temp0); \
58 out = __msa_aver_u_b(src_in, temp1); \
59 temp0 = __msa_asub_u_b(src_in, above2_in); \
60 temp1 = __msa_asub_u_b(src_in, above1_in); \
61 temp0 = (temp0 < ref); \
62 temp1 = (temp1 < ref); \
63 temp0 = temp0 & temp1; \
64 temp1 = __msa_asub_u_b(src_in, below1_in); \
65 temp1 = (temp1 < ref); \
66 temp0 = temp0 & temp1; \
67 temp1 = __msa_asub_u_b(src_in, below2_in); \
68 temp1 = (temp1 < ref); \
69 temp0 = temp0 & temp1; \
70 out = __msa_bmz_v(out, src_in, temp0); \
71 }
72
73 #define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
74 in10, in11, in12, in13, in14, in15) \
75 { \
76 v8i16 temp0, temp1, temp2, temp3, temp4; \
77 v8i16 temp5, temp6, temp7, temp8, temp9; \
78 \
79 ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \
80 ILVRL_H2_SH(temp1, temp0, temp2, temp3); \
81 ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \
82 ILVRL_H2_SH(temp1, temp0, temp4, temp5); \
83 ILVRL_W2_SH(temp4, temp2, temp0, temp1); \
84 ILVRL_W2_SH(temp5, temp3, temp2, temp3); \
85 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
86 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
87 ILVRL_H2_SH(temp5, temp4, temp6, temp7); \
88 ILVR_B2_SH(in13, in12, in15, in14, temp4, temp5); \
89 ILVRL_H2_SH(temp5, temp4, temp8, temp9); \
90 ILVRL_W2_SH(temp8, temp6, temp4, temp5); \
91 ILVRL_W2_SH(temp9, temp7, temp6, temp7); \
92 ILVL_B2_SH(in1, in0, in3, in2, temp8, temp9); \
93 ILVR_D2_UB(temp4, temp0, temp5, temp1, in0, in2); \
94 in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0); \
95 in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1); \
96 ILVL_B2_SH(in5, in4, in7, in6, temp0, temp1); \
97 ILVR_D2_UB(temp6, temp2, temp7, temp3, in4, in6); \
98 in5 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp2); \
99 in7 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp3); \
100 ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, temp2, temp3, \
101 temp4, temp5); \
102 ILVR_H4_SH(temp9, temp8, temp1, temp0, temp3, temp2, temp5, temp4, temp6, \
103 temp7, temp8, temp9); \
104 ILVR_W2_SH(temp7, temp6, temp9, temp8, temp0, temp1); \
105 in8 = (v16u8)__msa_ilvr_d((v2i64)temp1, (v2i64)temp0); \
106 in9 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp0); \
107 ILVL_W2_SH(temp7, temp6, temp9, temp8, temp2, temp3); \
108 in10 = (v16u8)__msa_ilvr_d((v2i64)temp3, (v2i64)temp2); \
109 in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2); \
110 }
111
112 #define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
113 in9, in10, in11) \
114 { \
115 v8i16 temp0, temp1, temp2, temp3; \
116 v8i16 temp4, temp5, temp6, temp7; \
117 \
118 ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \
119 ILVRL_H2_SH(temp1, temp0, temp2, temp3); \
120 ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \
121 ILVRL_H2_SH(temp1, temp0, temp4, temp5); \
122 ILVRL_W2_SH(temp4, temp2, temp0, temp1); \
123 ILVRL_W2_SH(temp5, temp3, temp2, temp3); \
124 ILVL_B2_SH(in1, in0, in3, in2, temp4, temp5); \
125 temp4 = __msa_ilvr_h(temp5, temp4); \
126 ILVL_B2_SH(in5, in4, in7, in6, temp6, temp7); \
127 temp5 = __msa_ilvr_h(temp7, temp6); \
128 ILVRL_W2_SH(temp5, temp4, temp6, temp7); \
129 in0 = (v16u8)temp0; \
130 in2 = (v16u8)temp1; \
131 in4 = (v16u8)temp2; \
132 in6 = (v16u8)temp3; \
133 in8 = (v16u8)temp6; \
134 in10 = (v16u8)temp7; \
135 in1 = (v16u8)__msa_ilvl_d((v2i64)temp0, (v2i64)temp0); \
136 in3 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp1); \
137 in5 = (v16u8)__msa_ilvl_d((v2i64)temp2, (v2i64)temp2); \
138 in7 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp3); \
139 in9 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp6); \
140 in11 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp7); \
141 }
142
postproc_down_across_chroma_msa(uint8_t * src_ptr,uint8_t * dst_ptr,int32_t src_stride,int32_t dst_stride,int32_t cols,uint8_t * f)143 static void postproc_down_across_chroma_msa(uint8_t *src_ptr, uint8_t *dst_ptr,
144 int32_t src_stride,
145 int32_t dst_stride, int32_t cols,
146 uint8_t *f) {
147 uint8_t *p_src = src_ptr;
148 uint8_t *p_dst = dst_ptr;
149 uint8_t *f_orig = f;
150 uint8_t *p_dst_st = dst_ptr;
151 uint16_t col;
152 uint64_t out0, out1, out2, out3;
153 v16u8 above2, above1, below2, below1, src, ref, ref_temp;
154 v16u8 inter0, inter1, inter2, inter3, inter4, inter5;
155 v16u8 inter6, inter7, inter8, inter9, inter10, inter11;
156
157 for (col = (cols / 16); col--;) {
158 ref = LD_UB(f);
159 LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
160 src = LD_UB(p_src);
161 LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
162 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
163 above2 = LD_UB(p_src + 3 * src_stride);
164 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
165 above1 = LD_UB(p_src + 4 * src_stride);
166 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
167 src = LD_UB(p_src + 5 * src_stride);
168 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
169 below1 = LD_UB(p_src + 6 * src_stride);
170 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
171 below2 = LD_UB(p_src + 7 * src_stride);
172 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
173 above2 = LD_UB(p_src + 8 * src_stride);
174 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
175 above1 = LD_UB(p_src + 9 * src_stride);
176 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
177 ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
178 p_dst, dst_stride);
179
180 p_dst += 16;
181 p_src += 16;
182 f += 16;
183 }
184
185 if (0 != (cols / 16)) {
186 ref = LD_UB(f);
187 LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
188 src = LD_UB(p_src);
189 LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
190 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
191 above2 = LD_UB(p_src + 3 * src_stride);
192 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
193 above1 = LD_UB(p_src + 4 * src_stride);
194 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
195 src = LD_UB(p_src + 5 * src_stride);
196 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
197 below1 = LD_UB(p_src + 6 * src_stride);
198 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
199 below2 = LD_UB(p_src + 7 * src_stride);
200 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
201 above2 = LD_UB(p_src + 8 * src_stride);
202 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
203 above1 = LD_UB(p_src + 9 * src_stride);
204 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
205 out0 = __msa_copy_u_d((v2i64)inter0, 0);
206 out1 = __msa_copy_u_d((v2i64)inter1, 0);
207 out2 = __msa_copy_u_d((v2i64)inter2, 0);
208 out3 = __msa_copy_u_d((v2i64)inter3, 0);
209 SD4(out0, out1, out2, out3, p_dst, dst_stride);
210
211 out0 = __msa_copy_u_d((v2i64)inter4, 0);
212 out1 = __msa_copy_u_d((v2i64)inter5, 0);
213 out2 = __msa_copy_u_d((v2i64)inter6, 0);
214 out3 = __msa_copy_u_d((v2i64)inter7, 0);
215 SD4(out0, out1, out2, out3, p_dst + 4 * dst_stride, dst_stride);
216 }
217
218 f = f_orig;
219 p_dst = dst_ptr - 2;
220 LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5,
221 inter6, inter7);
222
223 for (col = 0; col < (cols / 8); ++col) {
224 ref = LD_UB(f);
225 f += 8;
226 VPX_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5,
227 inter6, inter7, inter8, inter9, inter10, inter11);
228 if (0 == col) {
229 above2 = inter2;
230 above1 = inter2;
231 } else {
232 above2 = inter0;
233 above1 = inter1;
234 }
235 src = inter2;
236 below1 = inter3;
237 below2 = inter4;
238 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
239 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
240 above2 = inter5;
241 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
242 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
243 above1 = inter6;
244 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
245 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
246 src = inter7;
247 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
248 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
249 below1 = inter8;
250 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
251 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
252 below2 = inter9;
253 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
254 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
255 if (col == (cols / 8 - 1)) {
256 above2 = inter9;
257 } else {
258 above2 = inter10;
259 }
260 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
261 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
262 if (col == (cols / 8 - 1)) {
263 above1 = inter9;
264 } else {
265 above1 = inter11;
266 }
267 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
268 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
269 TRANSPOSE8x8_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, inter8,
270 inter9, inter2, inter3, inter4, inter5, inter6, inter7,
271 inter8, inter9);
272 p_dst += 8;
273 LD_UB2(p_dst, dst_stride, inter0, inter1);
274 ST8x1_UB(inter2, p_dst_st);
275 ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride));
276 LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3);
277 ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride));
278 ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride));
279 LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5);
280 ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride));
281 ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride));
282 LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7);
283 ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride));
284 ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride));
285 p_dst_st += 8;
286 }
287 }
288
postproc_down_across_luma_msa(uint8_t * src_ptr,uint8_t * dst_ptr,int32_t src_stride,int32_t dst_stride,int32_t cols,uint8_t * f)289 static void postproc_down_across_luma_msa(uint8_t *src_ptr, uint8_t *dst_ptr,
290 int32_t src_stride,
291 int32_t dst_stride, int32_t cols,
292 uint8_t *f) {
293 uint8_t *p_src = src_ptr;
294 uint8_t *p_dst = dst_ptr;
295 uint8_t *p_dst_st = dst_ptr;
296 uint8_t *f_orig = f;
297 uint16_t col;
298 uint64_t out0, out1, out2, out3;
299 v16u8 above2, above1, below2, below1;
300 v16u8 src, ref, ref_temp;
301 v16u8 inter0, inter1, inter2, inter3, inter4, inter5, inter6;
302 v16u8 inter7, inter8, inter9, inter10, inter11;
303 v16u8 inter12, inter13, inter14, inter15;
304
305 for (col = (cols / 16); col--;) {
306 ref = LD_UB(f);
307 LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
308 src = LD_UB(p_src);
309 LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
310 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
311 above2 = LD_UB(p_src + 3 * src_stride);
312 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
313 above1 = LD_UB(p_src + 4 * src_stride);
314 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
315 src = LD_UB(p_src + 5 * src_stride);
316 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
317 below1 = LD_UB(p_src + 6 * src_stride);
318 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
319 below2 = LD_UB(p_src + 7 * src_stride);
320 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
321 above2 = LD_UB(p_src + 8 * src_stride);
322 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
323 above1 = LD_UB(p_src + 9 * src_stride);
324 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
325 src = LD_UB(p_src + 10 * src_stride);
326 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
327 below1 = LD_UB(p_src + 11 * src_stride);
328 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
329 below2 = LD_UB(p_src + 12 * src_stride);
330 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
331 above2 = LD_UB(p_src + 13 * src_stride);
332 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
333 above1 = LD_UB(p_src + 14 * src_stride);
334 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
335 src = LD_UB(p_src + 15 * src_stride);
336 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
337 below1 = LD_UB(p_src + 16 * src_stride);
338 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
339 below2 = LD_UB(p_src + 17 * src_stride);
340 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
341 ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
342 p_dst, dst_stride);
343 ST_UB8(inter8, inter9, inter10, inter11, inter12, inter13, inter14, inter15,
344 p_dst + 8 * dst_stride, dst_stride);
345 p_src += 16;
346 p_dst += 16;
347 f += 16;
348 }
349
350 if (0 != (cols / 16)) {
351 ref = LD_UB(f);
352 LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
353 src = LD_UB(p_src);
354 LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
355 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
356 above2 = LD_UB(p_src + 3 * src_stride);
357 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
358 above1 = LD_UB(p_src + 4 * src_stride);
359 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
360 src = LD_UB(p_src + 5 * src_stride);
361 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
362 below1 = LD_UB(p_src + 6 * src_stride);
363 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
364 below2 = LD_UB(p_src + 7 * src_stride);
365 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
366 above2 = LD_UB(p_src + 8 * src_stride);
367 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
368 above1 = LD_UB(p_src + 9 * src_stride);
369 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
370 src = LD_UB(p_src + 10 * src_stride);
371 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
372 below1 = LD_UB(p_src + 11 * src_stride);
373 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
374 below2 = LD_UB(p_src + 12 * src_stride);
375 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
376 above2 = LD_UB(p_src + 13 * src_stride);
377 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
378 above1 = LD_UB(p_src + 14 * src_stride);
379 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
380 src = LD_UB(p_src + 15 * src_stride);
381 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
382 below1 = LD_UB(p_src + 16 * src_stride);
383 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
384 below2 = LD_UB(p_src + 17 * src_stride);
385 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
386 out0 = __msa_copy_u_d((v2i64)inter0, 0);
387 out1 = __msa_copy_u_d((v2i64)inter1, 0);
388 out2 = __msa_copy_u_d((v2i64)inter2, 0);
389 out3 = __msa_copy_u_d((v2i64)inter3, 0);
390 SD4(out0, out1, out2, out3, p_dst, dst_stride);
391
392 out0 = __msa_copy_u_d((v2i64)inter4, 0);
393 out1 = __msa_copy_u_d((v2i64)inter5, 0);
394 out2 = __msa_copy_u_d((v2i64)inter6, 0);
395 out3 = __msa_copy_u_d((v2i64)inter7, 0);
396 SD4(out0, out1, out2, out3, p_dst + 4 * dst_stride, dst_stride);
397
398 out0 = __msa_copy_u_d((v2i64)inter8, 0);
399 out1 = __msa_copy_u_d((v2i64)inter9, 0);
400 out2 = __msa_copy_u_d((v2i64)inter10, 0);
401 out3 = __msa_copy_u_d((v2i64)inter11, 0);
402 SD4(out0, out1, out2, out3, p_dst + 8 * dst_stride, dst_stride);
403
404 out0 = __msa_copy_u_d((v2i64)inter12, 0);
405 out1 = __msa_copy_u_d((v2i64)inter13, 0);
406 out2 = __msa_copy_u_d((v2i64)inter14, 0);
407 out3 = __msa_copy_u_d((v2i64)inter15, 0);
408 SD4(out0, out1, out2, out3, p_dst + 12 * dst_stride, dst_stride);
409 }
410
411 f = f_orig;
412 p_dst = dst_ptr - 2;
413 LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5,
414 inter6, inter7);
415 LD_UB8(p_dst + 8 * dst_stride, dst_stride, inter8, inter9, inter10, inter11,
416 inter12, inter13, inter14, inter15);
417
418 for (col = 0; col < cols / 8; ++col) {
419 ref = LD_UB(f);
420 f += 8;
421 TRANSPOSE12x16_B(inter0, inter1, inter2, inter3, inter4, inter5, inter6,
422 inter7, inter8, inter9, inter10, inter11, inter12, inter13,
423 inter14, inter15);
424 if (0 == col) {
425 above2 = inter2;
426 above1 = inter2;
427 } else {
428 above2 = inter0;
429 above1 = inter1;
430 }
431
432 src = inter2;
433 below1 = inter3;
434 below2 = inter4;
435 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
436 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
437 above2 = inter5;
438 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
439 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
440 above1 = inter6;
441 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
442 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
443 src = inter7;
444 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
445 VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
446 below1 = inter8;
447 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
448 VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
449 below2 = inter9;
450 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
451 VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
452 if (col == (cols / 8 - 1)) {
453 above2 = inter9;
454 } else {
455 above2 = inter10;
456 }
457 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
458 VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
459 if (col == (cols / 8 - 1)) {
460 above1 = inter9;
461 } else {
462 above1 = inter11;
463 }
464 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
465 VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
466 VPX_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7,
467 inter8, inter9, inter2, inter3, inter4, inter5,
468 inter6, inter7, inter8, inter9, inter10, inter11,
469 inter12, inter13, inter14, inter15, above2, above1);
470
471 p_dst += 8;
472 LD_UB2(p_dst, dst_stride, inter0, inter1);
473 ST8x1_UB(inter2, p_dst_st);
474 ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride));
475 LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3);
476 ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride));
477 ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride));
478 LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5);
479 ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride));
480 ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride));
481 LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7);
482 ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride));
483 ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride));
484 LD_UB2(p_dst + 8 * dst_stride, dst_stride, inter8, inter9);
485 ST8x1_UB(inter10, (p_dst_st + 8 * dst_stride));
486 ST8x1_UB(inter11, (p_dst_st + 9 * dst_stride));
487 LD_UB2(p_dst + 10 * dst_stride, dst_stride, inter10, inter11);
488 ST8x1_UB(inter12, (p_dst_st + 10 * dst_stride));
489 ST8x1_UB(inter13, (p_dst_st + 11 * dst_stride));
490 LD_UB2(p_dst + 12 * dst_stride, dst_stride, inter12, inter13);
491 ST8x1_UB(inter14, (p_dst_st + 12 * dst_stride));
492 ST8x1_UB(inter15, (p_dst_st + 13 * dst_stride));
493 LD_UB2(p_dst + 14 * dst_stride, dst_stride, inter14, inter15);
494 ST8x1_UB(above2, (p_dst_st + 14 * dst_stride));
495 ST8x1_UB(above1, (p_dst_st + 15 * dst_stride));
496 p_dst_st += 8;
497 }
498 }
499
vpx_post_proc_down_and_across_mb_row_msa(uint8_t * src,uint8_t * dst,int32_t src_stride,int32_t dst_stride,int32_t cols,uint8_t * f,int32_t size)500 void vpx_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
501 int32_t src_stride,
502 int32_t dst_stride, int32_t cols,
503 uint8_t *f, int32_t size) {
504 if (8 == size) {
505 postproc_down_across_chroma_msa(src, dst, src_stride, dst_stride, cols, f);
506 } else if (16 == size) {
507 postproc_down_across_luma_msa(src, dst, src_stride, dst_stride, cols, f);
508 }
509 }
510
vpx_mbpost_proc_across_ip_msa(uint8_t * src,int32_t pitch,int32_t rows,int32_t cols,int32_t flimit)511 void vpx_mbpost_proc_across_ip_msa(uint8_t *src, int32_t pitch, int32_t rows,
512 int32_t cols, int32_t flimit) {
513 int32_t row, col, cnt;
514 uint8_t *src_dup = src;
515 v16u8 src0, src1, tmp_orig;
516 v16u8 tmp = { 0 };
517 v16i8 zero = { 0 };
518 v8u16 sum_h, src_r_h, src_l_h;
519 v4u32 src_r_w;
520 v4i32 flimit_vec;
521
522 flimit_vec = __msa_fill_w(flimit);
523 for (row = rows; row--;) {
524 int32_t sum_sq;
525 int32_t sum = 0;
526 src0 = (v16u8)__msa_fill_b(src_dup[0]);
527 ST8x1_UB(src0, (src_dup - 8));
528
529 src0 = (v16u8)__msa_fill_b(src_dup[cols - 1]);
530 ST_UB(src0, src_dup + cols);
531 src_dup[cols + 16] = src_dup[cols - 1];
532 tmp_orig = (v16u8)__msa_ldi_b(0);
533 tmp_orig[15] = tmp[15];
534 src1 = LD_UB(src_dup - 8);
535 src1[15] = 0;
536 ILVRL_B2_UH(zero, src1, src_r_h, src_l_h);
537 src_r_w = __msa_dotp_u_w(src_r_h, src_r_h);
538 src_r_w += __msa_dotp_u_w(src_l_h, src_l_h);
539 sum_sq = HADD_SW_S32(src_r_w) + 16;
540 sum_h = __msa_hadd_u_h(src1, src1);
541 sum = HADD_UH_U32(sum_h);
542 {
543 v16u8 src7, src8, src_r, src_l;
544 v16i8 mask;
545 v8u16 add_r, add_l;
546 v8i16 sub_r, sub_l, sum_r, sum_l, mask0, mask1;
547 v4i32 sum_sq0, sum_sq1, sum_sq2, sum_sq3;
548 v4i32 sub0, sub1, sub2, sub3;
549 v4i32 sum0_w, sum1_w, sum2_w, sum3_w;
550 v4i32 mul0, mul1, mul2, mul3;
551 v4i32 total0, total1, total2, total3;
552 v8i16 const8 = __msa_fill_h(8);
553
554 src7 = LD_UB(src_dup + 7);
555 src8 = LD_UB(src_dup - 8);
556 for (col = 0; col < (cols >> 4); ++col) {
557 ILVRL_B2_UB(src7, src8, src_r, src_l);
558 HSUB_UB2_SH(src_r, src_l, sub_r, sub_l);
559
560 sum_r[0] = sum + sub_r[0];
561 for (cnt = 0; cnt < 7; ++cnt) {
562 sum_r[cnt + 1] = sum_r[cnt] + sub_r[cnt + 1];
563 }
564 sum_l[0] = sum_r[7] + sub_l[0];
565 for (cnt = 0; cnt < 7; ++cnt) {
566 sum_l[cnt + 1] = sum_l[cnt] + sub_l[cnt + 1];
567 }
568 sum = sum_l[7];
569 src1 = LD_UB(src_dup + 16 * col);
570 ILVRL_B2_UH(zero, src1, src_r_h, src_l_h);
571 src7 = (v16u8)((const8 + sum_r + (v8i16)src_r_h) >> 4);
572 src8 = (v16u8)((const8 + sum_l + (v8i16)src_l_h) >> 4);
573 tmp = (v16u8)__msa_pckev_b((v16i8)src8, (v16i8)src7);
574
575 HADD_UB2_UH(src_r, src_l, add_r, add_l);
576 UNPCK_SH_SW(sub_r, sub0, sub1);
577 UNPCK_SH_SW(sub_l, sub2, sub3);
578 ILVR_H2_SW(zero, add_r, zero, add_l, sum0_w, sum2_w);
579 ILVL_H2_SW(zero, add_r, zero, add_l, sum1_w, sum3_w);
580 MUL4(sum0_w, sub0, sum1_w, sub1, sum2_w, sub2, sum3_w, sub3, mul0, mul1,
581 mul2, mul3);
582 sum_sq0[0] = sum_sq + mul0[0];
583 for (cnt = 0; cnt < 3; ++cnt) {
584 sum_sq0[cnt + 1] = sum_sq0[cnt] + mul0[cnt + 1];
585 }
586 sum_sq1[0] = sum_sq0[3] + mul1[0];
587 for (cnt = 0; cnt < 3; ++cnt) {
588 sum_sq1[cnt + 1] = sum_sq1[cnt] + mul1[cnt + 1];
589 }
590 sum_sq2[0] = sum_sq1[3] + mul2[0];
591 for (cnt = 0; cnt < 3; ++cnt) {
592 sum_sq2[cnt + 1] = sum_sq2[cnt] + mul2[cnt + 1];
593 }
594 sum_sq3[0] = sum_sq2[3] + mul3[0];
595 for (cnt = 0; cnt < 3; ++cnt) {
596 sum_sq3[cnt + 1] = sum_sq3[cnt] + mul3[cnt + 1];
597 }
598 sum_sq = sum_sq3[3];
599
600 UNPCK_SH_SW(sum_r, sum0_w, sum1_w);
601 UNPCK_SH_SW(sum_l, sum2_w, sum3_w);
602 total0 = sum_sq0 * __msa_ldi_w(15);
603 total0 -= sum0_w * sum0_w;
604 total1 = sum_sq1 * __msa_ldi_w(15);
605 total1 -= sum1_w * sum1_w;
606 total2 = sum_sq2 * __msa_ldi_w(15);
607 total2 -= sum2_w * sum2_w;
608 total3 = sum_sq3 * __msa_ldi_w(15);
609 total3 -= sum3_w * sum3_w;
610 total0 = (total0 < flimit_vec);
611 total1 = (total1 < flimit_vec);
612 total2 = (total2 < flimit_vec);
613 total3 = (total3 < flimit_vec);
614 PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1);
615 mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0);
616 tmp = __msa_bmz_v(tmp, src1, (v16u8)mask);
617
618 if (col == 0) {
619 uint64_t src_d;
620
621 src_d = __msa_copy_u_d((v2i64)tmp_orig, 1);
622 SD(src_d, (src_dup - 8));
623 }
624
625 src7 = LD_UB(src_dup + 16 * (col + 1) + 7);
626 src8 = LD_UB(src_dup + 16 * (col + 1) - 8);
627 ST_UB(tmp, (src_dup + (16 * col)));
628 }
629
630 src_dup += pitch;
631 }
632 }
633 }
634
vpx_mbpost_proc_down_msa(uint8_t * dst_ptr,int32_t pitch,int32_t rows,int32_t cols,int32_t flimit)635 void vpx_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows,
636 int32_t cols, int32_t flimit) {
637 int32_t row, col, cnt, i;
638 v4i32 flimit_vec;
639 v16u8 dst7, dst8, dst_r_b, dst_l_b;
640 v16i8 mask;
641 v8u16 add_r, add_l;
642 v8i16 dst_r_h, dst_l_h, sub_r, sub_l, mask0, mask1;
643 v4i32 sub0, sub1, sub2, sub3, total0, total1, total2, total3;
644
645 flimit_vec = __msa_fill_w(flimit);
646
647 for (col = 0; col < (cols >> 4); ++col) {
648 uint8_t *dst_tmp = &dst_ptr[col << 4];
649 v16u8 dst;
650 v16i8 zero = { 0 };
651 v16u8 tmp[16];
652 v8i16 mult0, mult1, rv2_0, rv2_1;
653 v8i16 sum0_h = { 0 };
654 v8i16 sum1_h = { 0 };
655 v4i32 mul0 = { 0 };
656 v4i32 mul1 = { 0 };
657 v4i32 mul2 = { 0 };
658 v4i32 mul3 = { 0 };
659 v4i32 sum0_w, sum1_w, sum2_w, sum3_w;
660 v4i32 add0, add1, add2, add3;
661 const int16_t *rv2[16];
662
663 dst = LD_UB(dst_tmp);
664 for (cnt = (col << 4), i = 0; i < 16; ++cnt) {
665 rv2[i] = vpx_rv + (i & 7);
666 ++i;
667 }
668 for (cnt = -8; cnt < 0; ++cnt) {
669 ST_UB(dst, dst_tmp + cnt * pitch);
670 }
671
672 dst = LD_UB((dst_tmp + (rows - 1) * pitch));
673 for (cnt = rows; cnt < rows + 17; ++cnt) {
674 ST_UB(dst, dst_tmp + cnt * pitch);
675 }
676 for (cnt = -8; cnt <= 6; ++cnt) {
677 dst = LD_UB(dst_tmp + (cnt * pitch));
678 UNPCK_UB_SH(dst, dst_r_h, dst_l_h);
679 MUL2(dst_r_h, dst_r_h, dst_l_h, dst_l_h, mult0, mult1);
680 mul0 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult0);
681 mul1 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult0);
682 mul2 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult1);
683 mul3 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult1);
684 ADD2(sum0_h, dst_r_h, sum1_h, dst_l_h, sum0_h, sum1_h);
685 }
686
687 for (row = 0; row < (rows + 8); ++row) {
688 for (i = 0; i < 8; ++i) {
689 rv2_0[i] = *(rv2[i] + (row & 127));
690 rv2_1[i] = *(rv2[i + 8] + (row & 127));
691 }
692 dst7 = LD_UB(dst_tmp + (7 * pitch));
693 dst8 = LD_UB(dst_tmp - (8 * pitch));
694 ILVRL_B2_UB(dst7, dst8, dst_r_b, dst_l_b);
695
696 HSUB_UB2_SH(dst_r_b, dst_l_b, sub_r, sub_l);
697 UNPCK_SH_SW(sub_r, sub0, sub1);
698 UNPCK_SH_SW(sub_l, sub2, sub3);
699 sum0_h += sub_r;
700 sum1_h += sub_l;
701
702 HADD_UB2_UH(dst_r_b, dst_l_b, add_r, add_l);
703
704 ILVRL_H2_SW(zero, add_r, add0, add1);
705 ILVRL_H2_SW(zero, add_l, add2, add3);
706 mul0 += add0 * sub0;
707 mul1 += add1 * sub1;
708 mul2 += add2 * sub2;
709 mul3 += add3 * sub3;
710 dst = LD_UB(dst_tmp);
711 ILVRL_B2_SH(zero, dst, dst_r_h, dst_l_h);
712 dst7 = (v16u8)((rv2_0 + sum0_h + dst_r_h) >> 4);
713 dst8 = (v16u8)((rv2_1 + sum1_h + dst_l_h) >> 4);
714 tmp[row & 15] = (v16u8)__msa_pckev_b((v16i8)dst8, (v16i8)dst7);
715
716 UNPCK_SH_SW(sum0_h, sum0_w, sum1_w);
717 UNPCK_SH_SW(sum1_h, sum2_w, sum3_w);
718 total0 = mul0 * __msa_ldi_w(15);
719 total0 -= sum0_w * sum0_w;
720 total1 = mul1 * __msa_ldi_w(15);
721 total1 -= sum1_w * sum1_w;
722 total2 = mul2 * __msa_ldi_w(15);
723 total2 -= sum2_w * sum2_w;
724 total3 = mul3 * __msa_ldi_w(15);
725 total3 -= sum3_w * sum3_w;
726 total0 = (total0 < flimit_vec);
727 total1 = (total1 < flimit_vec);
728 total2 = (total2 < flimit_vec);
729 total3 = (total3 < flimit_vec);
730 PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1);
731 mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0);
732 tmp[row & 15] = __msa_bmz_v(tmp[row & 15], dst, (v16u8)mask);
733
734 if (row >= 8) {
735 ST_UB(tmp[(row - 8) & 15], (dst_tmp - 8 * pitch));
736 }
737
738 dst_tmp += pitch;
739 }
740 }
741 }
742