1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
13 #include "./vpx_config.h"
14 #include "vp8/common/arm/loopfilter_arm.h"
15
vp8_mbloop_filter_neon(uint8x16_t qblimit,uint8x16_t qlimit,uint8x16_t qthresh,uint8x16_t q3,uint8x16_t q4,uint8x16_t q5,uint8x16_t q6,uint8x16_t q7,uint8x16_t q8,uint8x16_t q9,uint8x16_t q10,uint8x16_t * q4r,uint8x16_t * q5r,uint8x16_t * q6r,uint8x16_t * q7r,uint8x16_t * q8r,uint8x16_t * q9r)16 static INLINE void vp8_mbloop_filter_neon(uint8x16_t qblimit, // mblimit
17 uint8x16_t qlimit, // limit
18 uint8x16_t qthresh, // thresh
19 uint8x16_t q3, // p2
20 uint8x16_t q4, // p2
21 uint8x16_t q5, // p1
22 uint8x16_t q6, // p0
23 uint8x16_t q7, // q0
24 uint8x16_t q8, // q1
25 uint8x16_t q9, // q2
26 uint8x16_t q10, // q3
27 uint8x16_t *q4r, // p1
28 uint8x16_t *q5r, // p1
29 uint8x16_t *q6r, // p0
30 uint8x16_t *q7r, // q0
31 uint8x16_t *q8r, // q1
32 uint8x16_t *q9r) { // q1
33 uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8;
34 int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16;
35 int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8;
36 uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16;
37 int8x16_t q0s8, q12s8, q14s8, q15s8;
38 int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29;
39
40 q11u8 = vabdq_u8(q3, q4);
41 q12u8 = vabdq_u8(q4, q5);
42 q13u8 = vabdq_u8(q5, q6);
43 q14u8 = vabdq_u8(q8, q7);
44 q1u8 = vabdq_u8(q9, q8);
45 q0u8 = vabdq_u8(q10, q9);
46
47 q11u8 = vmaxq_u8(q11u8, q12u8);
48 q12u8 = vmaxq_u8(q13u8, q14u8);
49 q1u8 = vmaxq_u8(q1u8, q0u8);
50 q15u8 = vmaxq_u8(q11u8, q12u8);
51
52 q12u8 = vabdq_u8(q6, q7);
53
54 // vp8_hevmask
55 q13u8 = vcgtq_u8(q13u8, qthresh);
56 q14u8 = vcgtq_u8(q14u8, qthresh);
57 q15u8 = vmaxq_u8(q15u8, q1u8);
58
59 q15u8 = vcgeq_u8(qlimit, q15u8);
60
61 q1u8 = vabdq_u8(q5, q8);
62 q12u8 = vqaddq_u8(q12u8, q12u8);
63
64 // vp8_filter() function
65 // convert to signed
66 q0u8 = vdupq_n_u8(0x80);
67 q9 = veorq_u8(q9, q0u8);
68 q8 = veorq_u8(q8, q0u8);
69 q7 = veorq_u8(q7, q0u8);
70 q6 = veorq_u8(q6, q0u8);
71 q5 = veorq_u8(q5, q0u8);
72 q4 = veorq_u8(q4, q0u8);
73
74 q1u8 = vshrq_n_u8(q1u8, 1);
75 q12u8 = vqaddq_u8(q12u8, q1u8);
76
77 q14u8 = vorrq_u8(q13u8, q14u8);
78 q12u8 = vcgeq_u8(qblimit, q12u8);
79
80 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
81 vget_low_s8(vreinterpretq_s8_u8(q6)));
82 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
83 vget_high_s8(vreinterpretq_s8_u8(q6)));
84
85 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8));
86
87 q11s16 = vdupq_n_s16(3);
88 q2s16 = vmulq_s16(q2s16, q11s16);
89 q13s16 = vmulq_s16(q13s16, q11s16);
90
91 q15u8 = vandq_u8(q15u8, q12u8);
92
93 q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
94 q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8));
95
96 q12u8 = vdupq_n_u8(3);
97 q11u8 = vdupq_n_u8(4);
98 // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
99 d2 = vqmovn_s16(q2s16);
100 d3 = vqmovn_s16(q13s16);
101 q1s8 = vcombine_s8(d2, d3);
102 q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8));
103 q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
104
105 q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8));
106 q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8));
107 q2s8 = vshrq_n_s8(q2s8, 3);
108 q13s8 = vshrq_n_s8(q13s8, 3);
109
110 q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8);
111 q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8);
112
113 q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
114
115 q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63);
116 d5 = vdup_n_s8(9);
117 d4 = vdup_n_s8(18);
118
119 q0s16 = vmlal_s8(vreinterpretq_s16_u16(q0u16), vget_low_s8(q1s8), d5);
120 q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5);
121 d5 = vdup_n_s8(27);
122 q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8), d4);
123 q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4);
124 q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8), d5);
125 q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5);
126
127 d0 = vqshrn_n_s16(q0s16, 7);
128 d1 = vqshrn_n_s16(q11s16, 7);
129 d24 = vqshrn_n_s16(q12s16, 7);
130 d25 = vqshrn_n_s16(q13s16, 7);
131 d28 = vqshrn_n_s16(q14s16, 7);
132 d29 = vqshrn_n_s16(q15s16, 7);
133
134 q0s8 = vcombine_s8(d0, d1);
135 q12s8 = vcombine_s8(d24, d25);
136 q14s8 = vcombine_s8(d28, d29);
137
138 q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8);
139 q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
140 q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8);
141 q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
142 q15s8 = vqsubq_s8((q7s8), q14s8);
143 q14s8 = vqaddq_s8((q6s8), q14s8);
144
145 q1u8 = vdupq_n_u8(0x80);
146 *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8);
147 *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8);
148 *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8);
149 *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8);
150 *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8);
151 *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8);
152 return;
153 }
154
vp8_mbloop_filter_horizontal_edge_y_neon(unsigned char * src,int pitch,unsigned char blimit,unsigned char limit,unsigned char thresh)155 void vp8_mbloop_filter_horizontal_edge_y_neon(unsigned char *src, int pitch,
156 unsigned char blimit,
157 unsigned char limit,
158 unsigned char thresh) {
159 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
160 uint8x16_t q5, q6, q7, q8, q9, q10;
161
162 qblimit = vdupq_n_u8(blimit);
163 qlimit = vdupq_n_u8(limit);
164 qthresh = vdupq_n_u8(thresh);
165
166 src -= (pitch << 2);
167
168 q3 = vld1q_u8(src);
169 src += pitch;
170 q4 = vld1q_u8(src);
171 src += pitch;
172 q5 = vld1q_u8(src);
173 src += pitch;
174 q6 = vld1q_u8(src);
175 src += pitch;
176 q7 = vld1q_u8(src);
177 src += pitch;
178 q8 = vld1q_u8(src);
179 src += pitch;
180 q9 = vld1q_u8(src);
181 src += pitch;
182 q10 = vld1q_u8(src);
183
184 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
185 q10, &q4, &q5, &q6, &q7, &q8, &q9);
186
187 src -= (pitch * 6);
188 vst1q_u8(src, q4);
189 src += pitch;
190 vst1q_u8(src, q5);
191 src += pitch;
192 vst1q_u8(src, q6);
193 src += pitch;
194 vst1q_u8(src, q7);
195 src += pitch;
196 vst1q_u8(src, q8);
197 src += pitch;
198 vst1q_u8(src, q9);
199 return;
200 }
201
vp8_mbloop_filter_horizontal_edge_uv_neon(unsigned char * u,int pitch,unsigned char blimit,unsigned char limit,unsigned char thresh,unsigned char * v)202 void vp8_mbloop_filter_horizontal_edge_uv_neon(unsigned char *u, int pitch,
203 unsigned char blimit,
204 unsigned char limit,
205 unsigned char thresh,
206 unsigned char *v) {
207 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
208 uint8x16_t q5, q6, q7, q8, q9, q10;
209 uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
210 uint8x8_t d15, d16, d17, d18, d19, d20, d21;
211
212 qblimit = vdupq_n_u8(blimit);
213 qlimit = vdupq_n_u8(limit);
214 qthresh = vdupq_n_u8(thresh);
215
216 u -= (pitch << 2);
217 v -= (pitch << 2);
218
219 d6 = vld1_u8(u);
220 u += pitch;
221 d7 = vld1_u8(v);
222 v += pitch;
223 d8 = vld1_u8(u);
224 u += pitch;
225 d9 = vld1_u8(v);
226 v += pitch;
227 d10 = vld1_u8(u);
228 u += pitch;
229 d11 = vld1_u8(v);
230 v += pitch;
231 d12 = vld1_u8(u);
232 u += pitch;
233 d13 = vld1_u8(v);
234 v += pitch;
235 d14 = vld1_u8(u);
236 u += pitch;
237 d15 = vld1_u8(v);
238 v += pitch;
239 d16 = vld1_u8(u);
240 u += pitch;
241 d17 = vld1_u8(v);
242 v += pitch;
243 d18 = vld1_u8(u);
244 u += pitch;
245 d19 = vld1_u8(v);
246 v += pitch;
247 d20 = vld1_u8(u);
248 d21 = vld1_u8(v);
249
250 q3 = vcombine_u8(d6, d7);
251 q4 = vcombine_u8(d8, d9);
252 q5 = vcombine_u8(d10, d11);
253 q6 = vcombine_u8(d12, d13);
254 q7 = vcombine_u8(d14, d15);
255 q8 = vcombine_u8(d16, d17);
256 q9 = vcombine_u8(d18, d19);
257 q10 = vcombine_u8(d20, d21);
258
259 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
260 q10, &q4, &q5, &q6, &q7, &q8, &q9);
261
262 u -= (pitch * 6);
263 v -= (pitch * 6);
264 vst1_u8(u, vget_low_u8(q4));
265 u += pitch;
266 vst1_u8(v, vget_high_u8(q4));
267 v += pitch;
268 vst1_u8(u, vget_low_u8(q5));
269 u += pitch;
270 vst1_u8(v, vget_high_u8(q5));
271 v += pitch;
272 vst1_u8(u, vget_low_u8(q6));
273 u += pitch;
274 vst1_u8(v, vget_high_u8(q6));
275 v += pitch;
276 vst1_u8(u, vget_low_u8(q7));
277 u += pitch;
278 vst1_u8(v, vget_high_u8(q7));
279 v += pitch;
280 vst1_u8(u, vget_low_u8(q8));
281 u += pitch;
282 vst1_u8(v, vget_high_u8(q8));
283 v += pitch;
284 vst1_u8(u, vget_low_u8(q9));
285 vst1_u8(v, vget_high_u8(q9));
286 return;
287 }
288
vp8_mbloop_filter_vertical_edge_y_neon(unsigned char * src,int pitch,unsigned char blimit,unsigned char limit,unsigned char thresh)289 void vp8_mbloop_filter_vertical_edge_y_neon(unsigned char *src, int pitch,
290 unsigned char blimit,
291 unsigned char limit,
292 unsigned char thresh) {
293 unsigned char *s1, *s2;
294 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
295 uint8x16_t q5, q6, q7, q8, q9, q10;
296 uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
297 uint8x8_t d15, d16, d17, d18, d19, d20, d21;
298 uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
299 uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
300 uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
301
302 qblimit = vdupq_n_u8(blimit);
303 qlimit = vdupq_n_u8(limit);
304 qthresh = vdupq_n_u8(thresh);
305
306 s1 = src - 4;
307 s2 = s1 + 8 * pitch;
308 d6 = vld1_u8(s1);
309 s1 += pitch;
310 d7 = vld1_u8(s2);
311 s2 += pitch;
312 d8 = vld1_u8(s1);
313 s1 += pitch;
314 d9 = vld1_u8(s2);
315 s2 += pitch;
316 d10 = vld1_u8(s1);
317 s1 += pitch;
318 d11 = vld1_u8(s2);
319 s2 += pitch;
320 d12 = vld1_u8(s1);
321 s1 += pitch;
322 d13 = vld1_u8(s2);
323 s2 += pitch;
324 d14 = vld1_u8(s1);
325 s1 += pitch;
326 d15 = vld1_u8(s2);
327 s2 += pitch;
328 d16 = vld1_u8(s1);
329 s1 += pitch;
330 d17 = vld1_u8(s2);
331 s2 += pitch;
332 d18 = vld1_u8(s1);
333 s1 += pitch;
334 d19 = vld1_u8(s2);
335 s2 += pitch;
336 d20 = vld1_u8(s1);
337 d21 = vld1_u8(s2);
338
339 q3 = vcombine_u8(d6, d7);
340 q4 = vcombine_u8(d8, d9);
341 q5 = vcombine_u8(d10, d11);
342 q6 = vcombine_u8(d12, d13);
343 q7 = vcombine_u8(d14, d15);
344 q8 = vcombine_u8(d16, d17);
345 q9 = vcombine_u8(d18, d19);
346 q10 = vcombine_u8(d20, d21);
347
348 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
349 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
350 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
351 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
352
353 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
354 vreinterpretq_u16_u32(q2tmp2.val[0]));
355 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
356 vreinterpretq_u16_u32(q2tmp3.val[0]));
357 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
358 vreinterpretq_u16_u32(q2tmp2.val[1]));
359 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
360 vreinterpretq_u16_u32(q2tmp3.val[1]));
361
362 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
363 vreinterpretq_u8_u16(q2tmp5.val[0]));
364 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
365 vreinterpretq_u8_u16(q2tmp5.val[1]));
366 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
367 vreinterpretq_u8_u16(q2tmp7.val[0]));
368 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
369 vreinterpretq_u8_u16(q2tmp7.val[1]));
370
371 q3 = q2tmp8.val[0];
372 q4 = q2tmp8.val[1];
373 q5 = q2tmp9.val[0];
374 q6 = q2tmp9.val[1];
375 q7 = q2tmp10.val[0];
376 q8 = q2tmp10.val[1];
377 q9 = q2tmp11.val[0];
378 q10 = q2tmp11.val[1];
379
380 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
381 q10, &q4, &q5, &q6, &q7, &q8, &q9);
382
383 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
384 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
385 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
386 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
387
388 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
389 vreinterpretq_u16_u32(q2tmp2.val[0]));
390 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
391 vreinterpretq_u16_u32(q2tmp3.val[0]));
392 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
393 vreinterpretq_u16_u32(q2tmp2.val[1]));
394 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
395 vreinterpretq_u16_u32(q2tmp3.val[1]));
396
397 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
398 vreinterpretq_u8_u16(q2tmp5.val[0]));
399 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
400 vreinterpretq_u8_u16(q2tmp5.val[1]));
401 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
402 vreinterpretq_u8_u16(q2tmp7.val[0]));
403 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
404 vreinterpretq_u8_u16(q2tmp7.val[1]));
405
406 q3 = q2tmp8.val[0];
407 q4 = q2tmp8.val[1];
408 q5 = q2tmp9.val[0];
409 q6 = q2tmp9.val[1];
410 q7 = q2tmp10.val[0];
411 q8 = q2tmp10.val[1];
412 q9 = q2tmp11.val[0];
413 q10 = q2tmp11.val[1];
414
415 s1 -= 7 * pitch;
416 s2 -= 7 * pitch;
417
418 vst1_u8(s1, vget_low_u8(q3));
419 s1 += pitch;
420 vst1_u8(s2, vget_high_u8(q3));
421 s2 += pitch;
422 vst1_u8(s1, vget_low_u8(q4));
423 s1 += pitch;
424 vst1_u8(s2, vget_high_u8(q4));
425 s2 += pitch;
426 vst1_u8(s1, vget_low_u8(q5));
427 s1 += pitch;
428 vst1_u8(s2, vget_high_u8(q5));
429 s2 += pitch;
430 vst1_u8(s1, vget_low_u8(q6));
431 s1 += pitch;
432 vst1_u8(s2, vget_high_u8(q6));
433 s2 += pitch;
434 vst1_u8(s1, vget_low_u8(q7));
435 s1 += pitch;
436 vst1_u8(s2, vget_high_u8(q7));
437 s2 += pitch;
438 vst1_u8(s1, vget_low_u8(q8));
439 s1 += pitch;
440 vst1_u8(s2, vget_high_u8(q8));
441 s2 += pitch;
442 vst1_u8(s1, vget_low_u8(q9));
443 s1 += pitch;
444 vst1_u8(s2, vget_high_u8(q9));
445 s2 += pitch;
446 vst1_u8(s1, vget_low_u8(q10));
447 vst1_u8(s2, vget_high_u8(q10));
448 return;
449 }
450
vp8_mbloop_filter_vertical_edge_uv_neon(unsigned char * u,int pitch,unsigned char blimit,unsigned char limit,unsigned char thresh,unsigned char * v)451 void vp8_mbloop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch,
452 unsigned char blimit,
453 unsigned char limit,
454 unsigned char thresh,
455 unsigned char *v) {
456 unsigned char *us, *ud;
457 unsigned char *vs, *vd;
458 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
459 uint8x16_t q5, q6, q7, q8, q9, q10;
460 uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
461 uint8x8_t d15, d16, d17, d18, d19, d20, d21;
462 uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
463 uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
464 uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
465
466 qblimit = vdupq_n_u8(blimit);
467 qlimit = vdupq_n_u8(limit);
468 qthresh = vdupq_n_u8(thresh);
469
470 us = u - 4;
471 vs = v - 4;
472 d6 = vld1_u8(us);
473 us += pitch;
474 d7 = vld1_u8(vs);
475 vs += pitch;
476 d8 = vld1_u8(us);
477 us += pitch;
478 d9 = vld1_u8(vs);
479 vs += pitch;
480 d10 = vld1_u8(us);
481 us += pitch;
482 d11 = vld1_u8(vs);
483 vs += pitch;
484 d12 = vld1_u8(us);
485 us += pitch;
486 d13 = vld1_u8(vs);
487 vs += pitch;
488 d14 = vld1_u8(us);
489 us += pitch;
490 d15 = vld1_u8(vs);
491 vs += pitch;
492 d16 = vld1_u8(us);
493 us += pitch;
494 d17 = vld1_u8(vs);
495 vs += pitch;
496 d18 = vld1_u8(us);
497 us += pitch;
498 d19 = vld1_u8(vs);
499 vs += pitch;
500 d20 = vld1_u8(us);
501 d21 = vld1_u8(vs);
502
503 q3 = vcombine_u8(d6, d7);
504 q4 = vcombine_u8(d8, d9);
505 q5 = vcombine_u8(d10, d11);
506 q6 = vcombine_u8(d12, d13);
507 q7 = vcombine_u8(d14, d15);
508 q8 = vcombine_u8(d16, d17);
509 q9 = vcombine_u8(d18, d19);
510 q10 = vcombine_u8(d20, d21);
511
512 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
513 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
514 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
515 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
516
517 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
518 vreinterpretq_u16_u32(q2tmp2.val[0]));
519 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
520 vreinterpretq_u16_u32(q2tmp3.val[0]));
521 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
522 vreinterpretq_u16_u32(q2tmp2.val[1]));
523 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
524 vreinterpretq_u16_u32(q2tmp3.val[1]));
525
526 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
527 vreinterpretq_u8_u16(q2tmp5.val[0]));
528 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
529 vreinterpretq_u8_u16(q2tmp5.val[1]));
530 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
531 vreinterpretq_u8_u16(q2tmp7.val[0]));
532 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
533 vreinterpretq_u8_u16(q2tmp7.val[1]));
534
535 q3 = q2tmp8.val[0];
536 q4 = q2tmp8.val[1];
537 q5 = q2tmp9.val[0];
538 q6 = q2tmp9.val[1];
539 q7 = q2tmp10.val[0];
540 q8 = q2tmp10.val[1];
541 q9 = q2tmp11.val[0];
542 q10 = q2tmp11.val[1];
543
544 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9,
545 q10, &q4, &q5, &q6, &q7, &q8, &q9);
546
547 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
548 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
549 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
550 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
551
552 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
553 vreinterpretq_u16_u32(q2tmp2.val[0]));
554 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
555 vreinterpretq_u16_u32(q2tmp3.val[0]));
556 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
557 vreinterpretq_u16_u32(q2tmp2.val[1]));
558 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
559 vreinterpretq_u16_u32(q2tmp3.val[1]));
560
561 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
562 vreinterpretq_u8_u16(q2tmp5.val[0]));
563 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
564 vreinterpretq_u8_u16(q2tmp5.val[1]));
565 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
566 vreinterpretq_u8_u16(q2tmp7.val[0]));
567 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
568 vreinterpretq_u8_u16(q2tmp7.val[1]));
569
570 q3 = q2tmp8.val[0];
571 q4 = q2tmp8.val[1];
572 q5 = q2tmp9.val[0];
573 q6 = q2tmp9.val[1];
574 q7 = q2tmp10.val[0];
575 q8 = q2tmp10.val[1];
576 q9 = q2tmp11.val[0];
577 q10 = q2tmp11.val[1];
578
579 ud = u - 4;
580 vst1_u8(ud, vget_low_u8(q3));
581 ud += pitch;
582 vst1_u8(ud, vget_low_u8(q4));
583 ud += pitch;
584 vst1_u8(ud, vget_low_u8(q5));
585 ud += pitch;
586 vst1_u8(ud, vget_low_u8(q6));
587 ud += pitch;
588 vst1_u8(ud, vget_low_u8(q7));
589 ud += pitch;
590 vst1_u8(ud, vget_low_u8(q8));
591 ud += pitch;
592 vst1_u8(ud, vget_low_u8(q9));
593 ud += pitch;
594 vst1_u8(ud, vget_low_u8(q10));
595
596 vd = v - 4;
597 vst1_u8(vd, vget_high_u8(q3));
598 vd += pitch;
599 vst1_u8(vd, vget_high_u8(q4));
600 vd += pitch;
601 vst1_u8(vd, vget_high_u8(q5));
602 vd += pitch;
603 vst1_u8(vd, vget_high_u8(q6));
604 vd += pitch;
605 vst1_u8(vd, vget_high_u8(q7));
606 vd += pitch;
607 vst1_u8(vd, vget_high_u8(q8));
608 vd += pitch;
609 vst1_u8(vd, vget_high_u8(q9));
610 vd += pitch;
611 vst1_u8(vd, vget_high_u8(q10));
612 return;
613 }
614