1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <emmintrin.h> // SSE2
13
14 #include "config/aom_dsp_rtcd.h"
15
16 #include "aom_dsp/x86/lpf_common_sse2.h"
17
pixel_clamp(const __m128i * min,const __m128i * max,__m128i * pixel)18 static AOM_FORCE_INLINE void pixel_clamp(const __m128i *min, const __m128i *max,
19 __m128i *pixel) {
20 *pixel = _mm_min_epi16(*pixel, *max);
21 *pixel = _mm_max_epi16(*pixel, *min);
22 }
23
abs_diff16(__m128i a,__m128i b)24 static AOM_FORCE_INLINE __m128i abs_diff16(__m128i a, __m128i b) {
25 return _mm_or_si128(_mm_subs_epu16(a, b), _mm_subs_epu16(b, a));
26 }
27
get_limit(const uint8_t * bl,const uint8_t * l,const uint8_t * t,int bd,__m128i * blt,__m128i * lt,__m128i * thr,__m128i * t80_out)28 static INLINE void get_limit(const uint8_t *bl, const uint8_t *l,
29 const uint8_t *t, int bd, __m128i *blt,
30 __m128i *lt, __m128i *thr, __m128i *t80_out) {
31 const int shift = bd - 8;
32 const __m128i zero = _mm_setzero_si128();
33
34 __m128i x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)bl), zero);
35 *blt = _mm_slli_epi16(x, shift);
36
37 x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)l), zero);
38 *lt = _mm_slli_epi16(x, shift);
39
40 x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)t), zero);
41 *thr = _mm_slli_epi16(x, shift);
42
43 *t80_out = _mm_set1_epi16(1 << (bd - 1));
44 }
45
get_limit_dual(const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd,__m128i * blt_out,__m128i * lt_out,__m128i * thr_out,__m128i * t80_out)46 static INLINE void get_limit_dual(
47 const uint8_t *_blimit0, const uint8_t *_limit0, const uint8_t *_thresh0,
48 const uint8_t *_blimit1, const uint8_t *_limit1, const uint8_t *_thresh1,
49 int bd, __m128i *blt_out, __m128i *lt_out, __m128i *thr_out,
50 __m128i *t80_out) {
51 const int shift = bd - 8;
52 const __m128i zero = _mm_setzero_si128();
53
54 __m128i x0 =
55 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit0), zero);
56 __m128i x1 =
57 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit1), zero);
58 x0 = _mm_unpacklo_epi64(x0, x1);
59 *blt_out = _mm_slli_epi16(x0, shift);
60
61 x0 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit0), zero);
62 x1 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit1), zero);
63 x0 = _mm_unpacklo_epi64(x0, x1);
64 *lt_out = _mm_slli_epi16(x0, shift);
65
66 x0 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh0), zero);
67 x1 = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh1), zero);
68 x0 = _mm_unpacklo_epi64(x0, x1);
69 *thr_out = _mm_slli_epi16(x0, shift);
70
71 *t80_out = _mm_set1_epi16(1 << (bd - 1));
72 }
73
load_highbd_pixel(const uint16_t * s,int size,int pitch,__m128i * p,__m128i * q)74 static INLINE void load_highbd_pixel(const uint16_t *s, int size, int pitch,
75 __m128i *p, __m128i *q) {
76 int i;
77 for (i = 0; i < size; i++) {
78 p[i] = _mm_loadu_si128((__m128i *)(s - (i + 1) * pitch));
79 q[i] = _mm_loadu_si128((__m128i *)(s + i * pitch));
80 }
81 }
82
highbd_filter_mask_dual(const __m128i * p,const __m128i * q,const __m128i * l,const __m128i * bl,__m128i * mask)83 static INLINE void highbd_filter_mask_dual(const __m128i *p, const __m128i *q,
84 const __m128i *l, const __m128i *bl,
85 __m128i *mask) {
86 __m128i abs_p0q0 = abs_diff16(p[0], q[0]);
87 __m128i abs_p1q1 = abs_diff16(p[1], q[1]);
88 abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
89 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
90
91 const __m128i zero = _mm_setzero_si128();
92 const __m128i one = _mm_set1_epi16(1);
93 const __m128i ffff = _mm_set1_epi16((short)0xFFFF);
94
95 __m128i max = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), *bl);
96 max = _mm_xor_si128(_mm_cmpeq_epi16(max, zero), ffff);
97 max = _mm_and_si128(max, _mm_adds_epu16(*l, one));
98
99 int i;
100 for (i = 1; i < 4; ++i) {
101 max = _mm_max_epi16(max, abs_diff16(p[i], p[i - 1]));
102 max = _mm_max_epi16(max, abs_diff16(q[i], q[i - 1]));
103 }
104 max = _mm_subs_epu16(max, *l);
105 *mask = _mm_cmpeq_epi16(max, zero); // return ~mask
106 }
107
highbd_hev_filter_mask_x_sse2(__m128i * pq,int x,__m128i * p1p0,__m128i * q1q0,__m128i * abs_p1p0,__m128i * l,__m128i * bl,__m128i * t,__m128i * hev,__m128i * mask)108 static INLINE void highbd_hev_filter_mask_x_sse2(__m128i *pq, int x,
109 __m128i *p1p0, __m128i *q1q0,
110 __m128i *abs_p1p0, __m128i *l,
111 __m128i *bl, __m128i *t,
112 __m128i *hev, __m128i *mask) {
113 const __m128i zero = _mm_setzero_si128();
114 const __m128i one = _mm_set1_epi16(1);
115 const __m128i ffff = _mm_set1_epi16((short)0xFFFF);
116 __m128i abs_p0q0_p1q1, abs_p0q0, abs_p1q1, abs_q1q0;
117 __m128i max, max01, h;
118
119 *p1p0 = _mm_unpacklo_epi64(pq[0], pq[1]);
120 *q1q0 = _mm_unpackhi_epi64(pq[0], pq[1]);
121
122 abs_p0q0_p1q1 = abs_diff16(*p1p0, *q1q0);
123 abs_p0q0 = _mm_adds_epu16(abs_p0q0_p1q1, abs_p0q0_p1q1);
124 abs_p0q0 = _mm_unpacklo_epi64(abs_p0q0, zero);
125
126 abs_p1q1 = _mm_srli_si128(abs_p0q0_p1q1, 8);
127 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); // divide by 2
128
129 max = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), *bl);
130 max = _mm_xor_si128(_mm_cmpeq_epi16(max, zero), ffff);
131 // mask |= (abs(*p0 - *q0) * 2 + abs(*p1 - *q1) / 2 > blimit) * -1;
132 // So taking maximums continues to work:
133 max = _mm_and_si128(max, _mm_adds_epu16(*l, one));
134
135 *abs_p1p0 = abs_diff16(pq[0], pq[1]);
136 abs_q1q0 = _mm_srli_si128(*abs_p1p0, 8);
137 max01 = _mm_max_epi16(*abs_p1p0, abs_q1q0);
138 // mask |= (abs(*p1 - *p0) > limit) * -1;
139 // mask |= (abs(*q1 - *q0) > limit) * -1;
140 h = _mm_subs_epu16(max01, *t);
141
142 *hev = _mm_xor_si128(_mm_cmpeq_epi16(h, zero), ffff);
143 // replicate for the further "merged variables" usage
144 *hev = _mm_unpacklo_epi64(*hev, *hev);
145
146 max = _mm_max_epi16(max, max01);
147 int i;
148 for (i = 2; i < x; ++i) {
149 max = _mm_max_epi16(max, abs_diff16(pq[i], pq[i - 1]));
150 }
151 max = _mm_max_epi16(max, _mm_srli_si128(max, 8));
152
153 max = _mm_subs_epu16(max, *l);
154 *mask = _mm_cmpeq_epi16(max, zero); // ~mask
155 }
156
flat_mask_internal(const __m128i * th,const __m128i * pq,int start,int end,__m128i * flat)157 static INLINE void flat_mask_internal(const __m128i *th, const __m128i *pq,
158 int start, int end, __m128i *flat) {
159 int i;
160 __m128i max = _mm_max_epi16(abs_diff16(pq[start], pq[0]),
161 abs_diff16(pq[start + 1], pq[0]));
162
163 for (i = start + 2; i < end; ++i) {
164 max = _mm_max_epi16(max, abs_diff16(pq[i], pq[0]));
165 }
166 max = _mm_max_epi16(max, _mm_srli_si128(max, 8));
167
168 __m128i ft;
169 ft = _mm_subs_epu16(max, *th);
170
171 const __m128i zero = _mm_setzero_si128();
172 *flat = _mm_cmpeq_epi16(ft, zero);
173 }
174
flat_mask_internal_dual(const __m128i * th,const __m128i * p,const __m128i * q,int start,int end,__m128i * flat)175 static INLINE void flat_mask_internal_dual(const __m128i *th, const __m128i *p,
176 const __m128i *q, int start, int end,
177 __m128i *flat) {
178 int i;
179 __m128i max =
180 _mm_max_epi16(abs_diff16(q[start], q[0]), abs_diff16(p[start], p[0]));
181
182 for (i = start + 1; i < end; ++i) {
183 max = _mm_max_epi16(max, abs_diff16(p[i], p[0]));
184 max = _mm_max_epi16(max, abs_diff16(q[i], q[0]));
185 }
186
187 __m128i ft;
188 ft = _mm_subs_epu16(max, *th);
189
190 const __m128i zero = _mm_setzero_si128();
191 *flat = _mm_cmpeq_epi16(ft, zero);
192 }
193
highbd_flat_mask4_sse2(__m128i * pq,__m128i * flat,__m128i * flat2,int bd)194 static INLINE void highbd_flat_mask4_sse2(__m128i *pq, __m128i *flat,
195 __m128i *flat2, int bd) {
196 // check the distance 1,2,3 against 0
197 __m128i th = _mm_set1_epi16(1);
198 th = _mm_slli_epi16(th, bd - 8);
199 flat_mask_internal(&th, pq, 1, 4, flat);
200 flat_mask_internal(&th, pq, 4, 7, flat2);
201 }
202
highbd_flat_mask4_dual_sse2(const __m128i * p,const __m128i * q,__m128i * flat,__m128i * flat2,int bd)203 static INLINE void highbd_flat_mask4_dual_sse2(const __m128i *p,
204 const __m128i *q, __m128i *flat,
205 __m128i *flat2, int bd) {
206 // check the distance 1,2,3 against 0
207 __m128i th = _mm_set1_epi16(1);
208 th = _mm_slli_epi16(th, bd - 8);
209 flat_mask_internal_dual(&th, p, q, 1, 4, flat);
210 flat_mask_internal_dual(&th, p, q, 4, 7, flat2);
211 }
212
highbd_filter4_sse2(__m128i * p1p0,__m128i * q1q0,__m128i * hev,__m128i * mask,__m128i * qs1qs0,__m128i * ps1ps0,__m128i * t80,int bd)213 static AOM_FORCE_INLINE void highbd_filter4_sse2(__m128i *p1p0, __m128i *q1q0,
214 __m128i *hev, __m128i *mask,
215 __m128i *qs1qs0,
216 __m128i *ps1ps0, __m128i *t80,
217 int bd) {
218 const __m128i zero = _mm_setzero_si128();
219 const __m128i one = _mm_set1_epi16(1);
220 const __m128i pmax =
221 _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, bd), one), *t80);
222 const __m128i pmin = _mm_subs_epi16(zero, *t80);
223
224 const __m128i t3t4 = _mm_set_epi16(3, 3, 3, 3, 4, 4, 4, 4);
225 __m128i ps1ps0_work, qs1qs0_work, work;
226 __m128i filt, filter2filter1, filter2filt, filter1filt;
227
228 ps1ps0_work = _mm_subs_epi16(*p1p0, *t80);
229 qs1qs0_work = _mm_subs_epi16(*q1q0, *t80);
230
231 work = _mm_subs_epi16(ps1ps0_work, qs1qs0_work);
232 pixel_clamp(&pmin, &pmax, &work);
233 filt = _mm_and_si128(_mm_srli_si128(work, 8), *hev);
234
235 filt = _mm_subs_epi16(filt, work);
236 filt = _mm_subs_epi16(filt, work);
237 filt = _mm_subs_epi16(filt, work);
238 // (aom_filter + 3 * (qs0 - ps0)) & mask
239 pixel_clamp(&pmin, &pmax, &filt);
240 filt = _mm_and_si128(filt, *mask);
241 filt = _mm_unpacklo_epi64(filt, filt);
242
243 filter2filter1 = _mm_adds_epi16(filt, t3t4); /* signed_short_clamp */
244 pixel_clamp(&pmin, &pmax, &filter2filter1);
245 filter2filter1 = _mm_srai_epi16(filter2filter1, 3); /* >> 3 */
246
247 filt = _mm_unpacklo_epi64(filter2filter1, filter2filter1);
248
249 // filt >> 1
250 filt = _mm_adds_epi16(filt, one);
251 filt = _mm_srai_epi16(filt, 1);
252 filt = _mm_andnot_si128(*hev, filt);
253
254 filter2filt = _mm_unpackhi_epi64(filter2filter1, filt);
255 filter1filt = _mm_unpacklo_epi64(filter2filter1, filt);
256
257 qs1qs0_work = _mm_subs_epi16(qs1qs0_work, filter1filt);
258 ps1ps0_work = _mm_adds_epi16(ps1ps0_work, filter2filt);
259
260 pixel_clamp(&pmin, &pmax, &qs1qs0_work);
261 pixel_clamp(&pmin, &pmax, &ps1ps0_work);
262
263 *qs1qs0 = _mm_adds_epi16(qs1qs0_work, *t80);
264 *ps1ps0 = _mm_adds_epi16(ps1ps0_work, *t80);
265 }
266
highbd_filter4_dual_sse2(__m128i * p,__m128i * q,__m128i * ps,__m128i * qs,const __m128i * mask,const __m128i * th,int bd,__m128i * t80)267 static INLINE void highbd_filter4_dual_sse2(__m128i *p, __m128i *q, __m128i *ps,
268 __m128i *qs, const __m128i *mask,
269 const __m128i *th, int bd,
270 __m128i *t80) {
271 __m128i ps0 = _mm_subs_epi16(p[0], *t80);
272 __m128i ps1 = _mm_subs_epi16(p[1], *t80);
273 __m128i qs0 = _mm_subs_epi16(q[0], *t80);
274 __m128i qs1 = _mm_subs_epi16(q[1], *t80);
275 const __m128i one = _mm_set1_epi16(1);
276 const __m128i pmax =
277 _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, bd), one), *t80);
278
279 const __m128i zero = _mm_setzero_si128();
280 const __m128i pmin = _mm_subs_epi16(zero, *t80);
281 __m128i filter = _mm_subs_epi16(ps1, qs1);
282 pixel_clamp(&pmin, &pmax, &filter);
283
284 // hev_filter
285 __m128i hev;
286 const __m128i abs_p1p0 = abs_diff16(p[1], p[0]);
287 const __m128i abs_q1q0 = abs_diff16(q[1], q[0]);
288 __m128i h = _mm_max_epi16(abs_p1p0, abs_q1q0);
289 h = _mm_subs_epu16(h, *th);
290 const __m128i ffff = _mm_cmpeq_epi16(h, h);
291 hev = _mm_xor_si128(_mm_cmpeq_epi16(h, zero), ffff);
292
293 filter = _mm_and_si128(filter, hev);
294
295 const __m128i x = _mm_subs_epi16(qs0, ps0);
296 filter = _mm_adds_epi16(filter, x);
297 filter = _mm_adds_epi16(filter, x);
298 filter = _mm_adds_epi16(filter, x);
299 pixel_clamp(&pmin, &pmax, &filter);
300 filter = _mm_and_si128(filter, *mask);
301 const __m128i t3 = _mm_set1_epi16(3);
302 const __m128i t4 = _mm_set1_epi16(4);
303 __m128i filter1 = _mm_adds_epi16(filter, t4);
304 __m128i filter2 = _mm_adds_epi16(filter, t3);
305 pixel_clamp(&pmin, &pmax, &filter1);
306 pixel_clamp(&pmin, &pmax, &filter2);
307 filter1 = _mm_srai_epi16(filter1, 3);
308 filter2 = _mm_srai_epi16(filter2, 3);
309 qs0 = _mm_subs_epi16(qs0, filter1);
310 pixel_clamp(&pmin, &pmax, &qs0);
311 ps0 = _mm_adds_epi16(ps0, filter2);
312 pixel_clamp(&pmin, &pmax, &ps0);
313 qs[0] = _mm_adds_epi16(qs0, *t80);
314 ps[0] = _mm_adds_epi16(ps0, *t80);
315 filter = _mm_adds_epi16(filter1, one);
316 filter = _mm_srai_epi16(filter, 1);
317 filter = _mm_andnot_si128(hev, filter);
318 qs1 = _mm_subs_epi16(qs1, filter);
319 pixel_clamp(&pmin, &pmax, &qs1);
320 ps1 = _mm_adds_epi16(ps1, filter);
321 pixel_clamp(&pmin, &pmax, &ps1);
322 qs[1] = _mm_adds_epi16(qs1, *t80);
323 ps[1] = _mm_adds_epi16(ps1, *t80);
324 }
325
highbd_lpf_internal_14_sse2(__m128i * p,__m128i * q,__m128i * pq,const unsigned char * blt,const unsigned char * lt,const unsigned char * thr,int bd)326 static AOM_FORCE_INLINE void highbd_lpf_internal_14_sse2(
327 __m128i *p, __m128i *q, __m128i *pq, const unsigned char *blt,
328 const unsigned char *lt, const unsigned char *thr, int bd) {
329 int i;
330 const __m128i zero = _mm_setzero_si128();
331 __m128i blimit, limit, thresh;
332 __m128i t80;
333 get_limit(blt, lt, thr, bd, &blimit, &limit, &thresh, &t80);
334
335 for (i = 0; i < 7; i++) {
336 pq[i] = _mm_unpacklo_epi64(p[i], q[i]);
337 }
338 __m128i mask, hevhev;
339 __m128i p1p0, q1q0, abs_p1p0;
340
341 highbd_hev_filter_mask_x_sse2(pq, 4, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
342 &thresh, &hevhev, &mask);
343
344 __m128i ps0ps1, qs0qs1;
345 // filter4
346 highbd_filter4_sse2(&p1p0, &q1q0, &hevhev, &mask, &qs0qs1, &ps0ps1, &t80, bd);
347
348 __m128i flat, flat2;
349 highbd_flat_mask4_sse2(pq, &flat, &flat2, bd);
350
351 flat = _mm_and_si128(flat, mask);
352 flat2 = _mm_and_si128(flat2, flat);
353
354 // replicate for the further "merged variables" usage
355 flat = _mm_unpacklo_epi64(flat, flat);
356 flat2 = _mm_unpacklo_epi64(flat2, flat2);
357
358 // flat and wide flat calculations
359
360 // if flat ==0 then flat2 is zero as well and we don't need any calc below
361 // sse4.1 if (0==_mm_test_all_zeros(flat,ff))
362 if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
363 __m128i flat_p[3], flat_q[3], flat_pq[3];
364 __m128i flat2_p[6], flat2_q[6];
365 __m128i flat2_pq[6];
366 __m128i sum_p6, sum_p3;
367 const __m128i eight = _mm_set1_epi16(8);
368 const __m128i four = _mm_set1_epi16(4);
369
370 __m128i work0, work0_0, work0_1, sum_p_0;
371 __m128i sum_p = _mm_add_epi16(pq[5], _mm_add_epi16(pq[4], pq[3]));
372 __m128i sum_lp = _mm_add_epi16(pq[0], _mm_add_epi16(pq[2], pq[1]));
373 sum_p = _mm_add_epi16(sum_p, sum_lp);
374
375 __m128i sum_lq = _mm_srli_si128(sum_lp, 8);
376 __m128i sum_q = _mm_srli_si128(sum_p, 8);
377
378 sum_p_0 = _mm_add_epi16(eight, _mm_add_epi16(sum_p, sum_q));
379 sum_lp = _mm_add_epi16(four, _mm_add_epi16(sum_lp, sum_lq));
380
381 flat_p[0] = _mm_add_epi16(sum_lp, _mm_add_epi16(pq[3], pq[0]));
382 flat_q[0] = _mm_add_epi16(sum_lp, _mm_add_epi16(q[3], q[0]));
383
384 sum_p6 = _mm_add_epi16(pq[6], pq[6]);
385 sum_p3 = _mm_add_epi16(pq[3], pq[3]);
386
387 sum_q = _mm_sub_epi16(sum_p_0, pq[5]);
388 sum_p = _mm_sub_epi16(sum_p_0, q[5]);
389
390 work0_0 = _mm_add_epi16(_mm_add_epi16(pq[6], pq[0]), pq[1]);
391 work0_1 = _mm_add_epi16(sum_p6,
392 _mm_add_epi16(pq[1], _mm_add_epi16(pq[2], pq[0])));
393
394 sum_lq = _mm_sub_epi16(sum_lp, pq[2]);
395 sum_lp = _mm_sub_epi16(sum_lp, q[2]);
396
397 work0 = _mm_add_epi16(sum_p3, pq[1]);
398 flat_p[1] = _mm_add_epi16(sum_lp, work0);
399 flat_q[1] = _mm_add_epi16(sum_lq, _mm_srli_si128(work0, 8));
400
401 flat_pq[0] = _mm_srli_epi16(_mm_unpacklo_epi64(flat_p[0], flat_q[0]), 3);
402 flat_pq[1] = _mm_srli_epi16(_mm_unpacklo_epi64(flat_p[1], flat_q[1]), 3);
403
404 sum_lp = _mm_sub_epi16(sum_lp, q[1]);
405 sum_lq = _mm_sub_epi16(sum_lq, pq[1]);
406
407 sum_p3 = _mm_add_epi16(sum_p3, pq[3]);
408 work0 = _mm_add_epi16(sum_p3, pq[2]);
409
410 flat_p[2] = _mm_add_epi16(sum_lp, work0);
411 flat_q[2] = _mm_add_epi16(sum_lq, _mm_srli_si128(work0, 8));
412 flat_pq[2] = _mm_srli_epi16(_mm_unpacklo_epi64(flat_p[2], flat_q[2]), 3);
413
414 int flat2_mask =
415 (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat2, zero)));
416 if (flat2_mask) {
417 flat2_p[0] = _mm_add_epi16(sum_p_0, _mm_add_epi16(work0_0, q[0]));
418 flat2_q[0] = _mm_add_epi16(
419 sum_p_0, _mm_add_epi16(_mm_srli_si128(work0_0, 8), pq[0]));
420
421 flat2_p[1] = _mm_add_epi16(sum_p, work0_1);
422 flat2_q[1] = _mm_add_epi16(sum_q, _mm_srli_si128(work0_1, 8));
423
424 flat2_pq[0] =
425 _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[0], flat2_q[0]), 4);
426 flat2_pq[1] =
427 _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[1], flat2_q[1]), 4);
428
429 sum_p = _mm_sub_epi16(sum_p, q[4]);
430 sum_q = _mm_sub_epi16(sum_q, pq[4]);
431
432 sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
433 work0 = _mm_add_epi16(sum_p6,
434 _mm_add_epi16(pq[2], _mm_add_epi16(pq[3], pq[1])));
435 flat2_p[2] = _mm_add_epi16(sum_p, work0);
436 flat2_q[2] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
437 flat2_pq[2] =
438 _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[2], flat2_q[2]), 4);
439
440 sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
441 sum_p = _mm_sub_epi16(sum_p, q[3]);
442 sum_q = _mm_sub_epi16(sum_q, pq[3]);
443
444 work0 = _mm_add_epi16(sum_p6,
445 _mm_add_epi16(pq[3], _mm_add_epi16(pq[4], pq[2])));
446 flat2_p[3] = _mm_add_epi16(sum_p, work0);
447 flat2_q[3] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
448 flat2_pq[3] =
449 _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[3], flat2_q[3]), 4);
450
451 sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
452 sum_p = _mm_sub_epi16(sum_p, q[2]);
453 sum_q = _mm_sub_epi16(sum_q, pq[2]);
454
455 work0 = _mm_add_epi16(sum_p6,
456 _mm_add_epi16(pq[4], _mm_add_epi16(pq[5], pq[3])));
457 flat2_p[4] = _mm_add_epi16(sum_p, work0);
458 flat2_q[4] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
459 flat2_pq[4] =
460 _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[4], flat2_q[4]), 4);
461
462 sum_p6 = _mm_add_epi16(sum_p6, pq[6]);
463 sum_p = _mm_sub_epi16(sum_p, q[1]);
464 sum_q = _mm_sub_epi16(sum_q, pq[1]);
465
466 work0 = _mm_add_epi16(sum_p6,
467 _mm_add_epi16(pq[5], _mm_add_epi16(pq[6], pq[4])));
468 flat2_p[5] = _mm_add_epi16(sum_p, work0);
469 flat2_q[5] = _mm_add_epi16(sum_q, _mm_srli_si128(work0, 8));
470 flat2_pq[5] =
471 _mm_srli_epi16(_mm_unpacklo_epi64(flat2_p[5], flat2_q[5]), 4);
472 } // flat2
473 // ~~~~~~~~~~ apply flat ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
474 // highbd_filter8
475 pq[0] = _mm_unpacklo_epi64(ps0ps1, qs0qs1);
476 pq[1] = _mm_unpackhi_epi64(ps0ps1, qs0qs1);
477
478 for (i = 0; i < 3; i++) {
479 pq[i] = _mm_andnot_si128(flat, pq[i]);
480 flat_pq[i] = _mm_and_si128(flat, flat_pq[i]);
481 pq[i] = _mm_or_si128(pq[i], flat_pq[i]);
482 }
483
484 // wide flat
485 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
486 if (flat2_mask) {
487 for (i = 0; i < 6; i++) {
488 pq[i] = _mm_andnot_si128(flat2, pq[i]);
489 flat2_pq[i] = _mm_and_si128(flat2, flat2_pq[i]);
490 pq[i] = _mm_or_si128(pq[i], flat2_pq[i]); // full list of pq values
491 }
492 }
493 } else {
494 pq[0] = _mm_unpacklo_epi64(ps0ps1, qs0qs1);
495 pq[1] = _mm_unpackhi_epi64(ps0ps1, qs0qs1);
496 }
497 }
498
aom_highbd_lpf_horizontal_14_sse2(uint16_t * s,int pitch,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)499 void aom_highbd_lpf_horizontal_14_sse2(uint16_t *s, int pitch,
500 const uint8_t *blimit,
501 const uint8_t *limit,
502 const uint8_t *thresh, int bd) {
503 __m128i p[7], q[7], pq[7];
504 int i;
505
506 for (i = 0; i < 7; i++) {
507 p[i] = _mm_loadl_epi64((__m128i *)(s - (i + 1) * pitch));
508 q[i] = _mm_loadl_epi64((__m128i *)(s + i * pitch));
509 }
510
511 highbd_lpf_internal_14_sse2(p, q, pq, blimit, limit, thresh, bd);
512
513 for (i = 0; i < 6; i++) {
514 _mm_storel_epi64((__m128i *)(s - (i + 1) * pitch), pq[i]);
515 _mm_storel_epi64((__m128i *)(s + i * pitch), _mm_srli_si128(pq[i], 8));
516 }
517 }
518
highbd_lpf_internal_14_dual_sse2(__m128i * p,__m128i * q,const uint8_t * blt0,const uint8_t * lt0,const uint8_t * thr0,const uint8_t * blt1,const uint8_t * lt1,const uint8_t * thr1,int bd)519 static AOM_FORCE_INLINE void highbd_lpf_internal_14_dual_sse2(
520 __m128i *p, __m128i *q, const uint8_t *blt0, const uint8_t *lt0,
521 const uint8_t *thr0, const uint8_t *blt1, const uint8_t *lt1,
522 const uint8_t *thr1, int bd) {
523 __m128i blimit, limit, thresh, t80;
524 const __m128i zero = _mm_setzero_si128();
525
526 get_limit_dual(blt0, lt0, thr0, blt1, lt1, thr1, bd, &blimit, &limit, &thresh,
527 &t80);
528 __m128i mask;
529 highbd_filter_mask_dual(p, q, &limit, &blimit, &mask);
530 __m128i flat, flat2;
531 highbd_flat_mask4_dual_sse2(p, q, &flat, &flat2, bd);
532
533 flat = _mm_and_si128(flat, mask);
534 flat2 = _mm_and_si128(flat2, flat);
535 __m128i ps[2], qs[2];
536 highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh, bd, &t80);
537 // flat and wide flat calculations
538
539 // if flat ==0 then flat2 is zero as well and we don't need any calc below
540 // sse4.1 if (0==_mm_test_all_zeros(flat,ff))
541 if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
542 __m128i flat_p[3], flat_q[3];
543 __m128i flat2_p[6], flat2_q[6];
544 const __m128i eight = _mm_set1_epi16(8);
545 const __m128i four = _mm_set1_epi16(4);
546 __m128i sum_p_0 = _mm_add_epi16(p[5], _mm_add_epi16(p[4], p[3]));
547 __m128i sum_q = _mm_add_epi16(q[5], _mm_add_epi16(q[4], q[3]));
548 __m128i sum_lp = _mm_add_epi16(p[0], _mm_add_epi16(p[2], p[1]));
549 sum_p_0 = _mm_add_epi16(sum_p_0, sum_lp);
550 __m128i sum_lq = _mm_add_epi16(q[0], _mm_add_epi16(q[2], q[1]));
551 sum_q = _mm_add_epi16(sum_q, sum_lq);
552 sum_p_0 = _mm_add_epi16(eight, _mm_add_epi16(sum_p_0, sum_q));
553 sum_lp = _mm_add_epi16(four, _mm_add_epi16(sum_lp, sum_lq));
554 flat_p[0] =
555 _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(p[3], p[0])), 3);
556 flat_q[0] =
557 _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(q[3], q[0])), 3);
558 __m128i sum_p6 = _mm_add_epi16(p[6], p[6]);
559 __m128i sum_q6 = _mm_add_epi16(q[6], q[6]);
560 __m128i sum_p3 = _mm_add_epi16(p[3], p[3]);
561 __m128i sum_q3 = _mm_add_epi16(q[3], q[3]);
562
563 sum_q = _mm_sub_epi16(sum_p_0, p[5]);
564 __m128i sum_p = _mm_sub_epi16(sum_p_0, q[5]);
565
566 sum_lq = _mm_sub_epi16(sum_lp, p[2]);
567 sum_lp = _mm_sub_epi16(sum_lp, q[2]);
568 flat_p[1] =
569 _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(sum_p3, p[1])), 3);
570 flat_q[1] =
571 _mm_srli_epi16(_mm_add_epi16(sum_lq, _mm_add_epi16(sum_q3, q[1])), 3);
572
573 sum_lp = _mm_sub_epi16(sum_lp, q[1]);
574 sum_lq = _mm_sub_epi16(sum_lq, p[1]);
575 sum_p3 = _mm_add_epi16(sum_p3, p[3]);
576 sum_q3 = _mm_add_epi16(sum_q3, q[3]);
577 flat_p[2] =
578 _mm_srli_epi16(_mm_add_epi16(sum_lp, _mm_add_epi16(sum_p3, p[2])), 3);
579 flat_q[2] =
580 _mm_srli_epi16(_mm_add_epi16(sum_lq, _mm_add_epi16(sum_q3, q[2])), 3);
581
582 int flat2_mask =
583 (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat2, zero)));
584 if (flat2_mask) {
585 flat2_p[0] = _mm_srli_epi16(
586 _mm_add_epi16(sum_p_0, _mm_add_epi16(_mm_add_epi16(p[6], p[0]),
587 _mm_add_epi16(p[1], q[0]))),
588 4);
589 flat2_q[0] = _mm_srli_epi16(
590 _mm_add_epi16(sum_p_0, _mm_add_epi16(_mm_add_epi16(q[6], q[0]),
591 _mm_add_epi16(p[0], q[1]))),
592 4);
593
594 flat2_p[1] = _mm_srli_epi16(
595 _mm_add_epi16(
596 sum_p,
597 _mm_add_epi16(sum_p6,
598 _mm_add_epi16(p[1], _mm_add_epi16(p[2], p[0])))),
599 4);
600 flat2_q[1] = _mm_srli_epi16(
601 _mm_add_epi16(
602 sum_q,
603 _mm_add_epi16(sum_q6,
604 _mm_add_epi16(q[1], _mm_add_epi16(q[0], q[2])))),
605 4);
606 sum_p6 = _mm_add_epi16(sum_p6, p[6]);
607 sum_q6 = _mm_add_epi16(sum_q6, q[6]);
608 sum_p = _mm_sub_epi16(sum_p, q[4]);
609 sum_q = _mm_sub_epi16(sum_q, p[4]);
610 flat2_p[2] = _mm_srli_epi16(
611 _mm_add_epi16(
612 sum_p,
613 _mm_add_epi16(sum_p6,
614 _mm_add_epi16(p[2], _mm_add_epi16(p[3], p[1])))),
615 4);
616 flat2_q[2] = _mm_srli_epi16(
617 _mm_add_epi16(
618 sum_q,
619 _mm_add_epi16(sum_q6,
620 _mm_add_epi16(q[2], _mm_add_epi16(q[1], q[3])))),
621 4);
622 sum_p6 = _mm_add_epi16(sum_p6, p[6]);
623 sum_q6 = _mm_add_epi16(sum_q6, q[6]);
624 sum_p = _mm_sub_epi16(sum_p, q[3]);
625 sum_q = _mm_sub_epi16(sum_q, p[3]);
626 flat2_p[3] = _mm_srli_epi16(
627 _mm_add_epi16(
628 sum_p,
629 _mm_add_epi16(sum_p6,
630 _mm_add_epi16(p[3], _mm_add_epi16(p[4], p[2])))),
631 4);
632 flat2_q[3] = _mm_srli_epi16(
633 _mm_add_epi16(
634 sum_q,
635 _mm_add_epi16(sum_q6,
636 _mm_add_epi16(q[3], _mm_add_epi16(q[2], q[4])))),
637 4);
638 sum_p6 = _mm_add_epi16(sum_p6, p[6]);
639 sum_q6 = _mm_add_epi16(sum_q6, q[6]);
640 sum_p = _mm_sub_epi16(sum_p, q[2]);
641 sum_q = _mm_sub_epi16(sum_q, p[2]);
642 flat2_p[4] = _mm_srli_epi16(
643 _mm_add_epi16(
644 sum_p,
645 _mm_add_epi16(sum_p6,
646 _mm_add_epi16(p[4], _mm_add_epi16(p[5], p[3])))),
647 4);
648 flat2_q[4] = _mm_srli_epi16(
649 _mm_add_epi16(
650 sum_q,
651 _mm_add_epi16(sum_q6,
652 _mm_add_epi16(q[4], _mm_add_epi16(q[3], q[5])))),
653 4);
654 sum_p6 = _mm_add_epi16(sum_p6, p[6]);
655 sum_q6 = _mm_add_epi16(sum_q6, q[6]);
656 sum_p = _mm_sub_epi16(sum_p, q[1]);
657 sum_q = _mm_sub_epi16(sum_q, p[1]);
658 flat2_p[5] = _mm_srli_epi16(
659 _mm_add_epi16(
660 sum_p,
661 _mm_add_epi16(sum_p6,
662 _mm_add_epi16(p[5], _mm_add_epi16(p[6], p[4])))),
663 4);
664 flat2_q[5] = _mm_srli_epi16(
665 _mm_add_epi16(
666 sum_q,
667 _mm_add_epi16(sum_q6,
668 _mm_add_epi16(q[5], _mm_add_epi16(q[4], q[6])))),
669 4);
670 }
671 // highbd_filter8
672 int i;
673 for (i = 0; i < 2; i++) {
674 ps[i] = _mm_andnot_si128(flat, ps[i]);
675 flat_p[i] = _mm_and_si128(flat, flat_p[i]);
676 p[i] = _mm_or_si128(ps[i], flat_p[i]);
677 qs[i] = _mm_andnot_si128(flat, qs[i]);
678 flat_q[i] = _mm_and_si128(flat, flat_q[i]);
679 q[i] = _mm_or_si128(qs[i], flat_q[i]);
680 }
681 p[2] = _mm_andnot_si128(flat, p[2]);
682 // p2 remains unchanged if !(flat && mask)
683 flat_p[2] = _mm_and_si128(flat, flat_p[2]);
684 // when (flat && mask)
685 p[2] = _mm_or_si128(p[2], flat_p[2]); // full list of p2 values
686 q[2] = _mm_andnot_si128(flat, q[2]);
687 flat_q[2] = _mm_and_si128(flat, flat_q[2]);
688 q[2] = _mm_or_si128(q[2], flat_q[2]); // full list of q2 values
689
690 for (i = 0; i < 2; i++) {
691 ps[i] = _mm_andnot_si128(flat, ps[i]);
692 flat_p[i] = _mm_and_si128(flat, flat_p[i]);
693 p[i] = _mm_or_si128(ps[i], flat_p[i]);
694 qs[i] = _mm_andnot_si128(flat, qs[i]);
695 flat_q[i] = _mm_and_si128(flat, flat_q[i]);
696 q[i] = _mm_or_si128(qs[i], flat_q[i]);
697 }
698 // highbd_filter16
699 if (flat2_mask) {
700 for (i = 0; i < 6; i++) {
701 // p[i] remains unchanged if !(flat2 && flat && mask)
702 p[i] = _mm_andnot_si128(flat2, p[i]);
703 flat2_p[i] = _mm_and_si128(flat2, flat2_p[i]);
704 // get values for when (flat2 && flat && mask)
705 p[i] = _mm_or_si128(p[i], flat2_p[i]); // full list of p values
706 q[i] = _mm_andnot_si128(flat2, q[i]);
707 flat2_q[i] = _mm_and_si128(flat2, flat2_q[i]);
708 q[i] = _mm_or_si128(q[i], flat2_q[i]);
709 }
710 }
711 } else {
712 p[0] = ps[0];
713 q[0] = qs[0];
714 p[1] = ps[1];
715 q[1] = qs[1];
716 }
717 }
718
aom_highbd_lpf_horizontal_14_dual_sse2(uint16_t * s,int pitch,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)719 void aom_highbd_lpf_horizontal_14_dual_sse2(
720 uint16_t *s, int pitch, const uint8_t *_blimit0, const uint8_t *_limit0,
721 const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
722 const uint8_t *_thresh1, int bd) {
723 __m128i p[7], q[7];
724 int i;
725 load_highbd_pixel(s, 7, pitch, p, q);
726
727 highbd_lpf_internal_14_dual_sse2(p, q, _blimit0, _limit0, _thresh0, _blimit1,
728 _limit1, _thresh1, bd);
729
730 for (i = 0; i < 6; i++) {
731 _mm_storeu_si128((__m128i *)(s - (i + 1) * pitch), p[i]);
732 _mm_storeu_si128((__m128i *)(s + i * pitch), q[i]);
733 }
734 }
735
highbd_lpf_internal_6_sse2(__m128i * p2,__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * q2,__m128i * p1p0_out,__m128i * q1q0_out,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)736 static AOM_FORCE_INLINE void highbd_lpf_internal_6_sse2(
737 __m128i *p2, __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1,
738 __m128i *q2, __m128i *p1p0_out, __m128i *q1q0_out, const uint8_t *_blimit,
739 const uint8_t *_limit, const uint8_t *_thresh, int bd) {
740 __m128i blimit, limit, thresh;
741 __m128i mask, hev, flat;
742 __m128i pq[3];
743 __m128i p1p0, q1q0, abs_p1p0, ps1ps0, qs1qs0;
744 __m128i flat_p1p0, flat_q0q1;
745
746 pq[0] = _mm_unpacklo_epi64(*p0, *q0);
747 pq[1] = _mm_unpacklo_epi64(*p1, *q1);
748 pq[2] = _mm_unpacklo_epi64(*p2, *q2);
749
750 const __m128i zero = _mm_setzero_si128();
751 const __m128i four = _mm_set1_epi16(4);
752 __m128i t80;
753 const __m128i one = _mm_set1_epi16(0x1);
754
755 get_limit(_blimit, _limit, _thresh, bd, &blimit, &limit, &thresh, &t80);
756
757 highbd_hev_filter_mask_x_sse2(pq, 3, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
758 &thresh, &hev, &mask);
759
760 // lp filter
761 highbd_filter4_sse2(&p1p0, &q1q0, &hev, &mask, q1q0_out, p1p0_out, &t80, bd);
762
763 // flat_mask
764 flat = _mm_max_epi16(abs_diff16(pq[2], pq[0]), abs_p1p0);
765 flat = _mm_max_epi16(flat, _mm_srli_si128(flat, 8));
766
767 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
768
769 flat = _mm_cmpeq_epi16(flat, zero);
770 flat = _mm_and_si128(flat, mask);
771 // replicate for the further "merged variables" usage
772 flat = _mm_unpacklo_epi64(flat, flat);
773
774 // 5 tap filter
775 // need it only if flat !=0
776 if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
777 __m128i workp_a, workp_b, workp_c;
778 __m128i pq0x2_pq1, pq1_pq2;
779
780 // op1
781 pq0x2_pq1 =
782 _mm_add_epi16(_mm_add_epi16(pq[0], pq[0]), pq[1]); // p0 *2 + p1
783 pq1_pq2 = _mm_add_epi16(pq[1], pq[2]); // p1 + p2
784 workp_a = _mm_add_epi16(_mm_add_epi16(pq0x2_pq1, four),
785 pq1_pq2); // p2 + p0 * 2 + p1 * 2 + 4
786
787 workp_b = _mm_add_epi16(_mm_add_epi16(pq[2], pq[2]), *q0);
788 workp_b =
789 _mm_add_epi16(workp_a, workp_b); // p2 * 3 + p1 * 2 + p0 * 2 + q0 + 4
790
791 // op0
792 workp_c = _mm_srli_si128(pq0x2_pq1, 8); // q0 * 2 + q1
793 workp_a = _mm_add_epi16(workp_a,
794 workp_c); // p2 + p0 * 2 + p1 * 2 + q0 * 2 + q1 + 4
795 workp_b = _mm_unpacklo_epi64(workp_a, workp_b);
796 flat_p1p0 = _mm_srli_epi16(workp_b, 3);
797
798 // oq0
799 workp_a = _mm_sub_epi16(_mm_sub_epi16(workp_a, pq[2]),
800 pq[1]); // p0 * 2 + p1 + q0 * 2 + q1 + 4
801 workp_b = _mm_srli_si128(pq1_pq2, 8);
802 workp_a = _mm_add_epi16(
803 workp_a, workp_b); // p0 * 2 + p1 + q0 * 2 + q1 * 2 + q2 + 4
804 // workp_shft0 = _mm_srli_epi16(workp_a, 3);
805
806 // oq1
807 workp_c = _mm_sub_epi16(_mm_sub_epi16(workp_a, pq[1]),
808 pq[0]); // p0 + q0 * 2 + q1 * 2 + q2 + 4
809 workp_b = _mm_add_epi16(*q2, *q2);
810 workp_b =
811 _mm_add_epi16(workp_c, workp_b); // p0 + q0 * 2 + q1 * 2 + q2 * 3 + 4
812
813 workp_a = _mm_unpacklo_epi64(workp_a, workp_b);
814 flat_q0q1 = _mm_srli_epi16(workp_a, 3);
815
816 qs1qs0 = _mm_andnot_si128(flat, *q1q0_out);
817 q1q0 = _mm_and_si128(flat, flat_q0q1);
818 *q1q0_out = _mm_or_si128(qs1qs0, q1q0);
819
820 ps1ps0 = _mm_andnot_si128(flat, *p1p0_out);
821 p1p0 = _mm_and_si128(flat, flat_p1p0);
822 *p1p0_out = _mm_or_si128(ps1ps0, p1p0);
823 }
824 }
825
highbd_lpf_internal_6_dual_sse2(__m128i * p2,__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * q2,const unsigned char * _blimit0,const unsigned char * _limit0,const unsigned char * _thresh0,const unsigned char * _blimit1,const unsigned char * _limit1,const unsigned char * _thresh1,int bd)826 static AOM_FORCE_INLINE void highbd_lpf_internal_6_dual_sse2(
827 __m128i *p2, __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1,
828 __m128i *q2, const unsigned char *_blimit0, const unsigned char *_limit0,
829 const unsigned char *_thresh0, const unsigned char *_blimit1,
830 const unsigned char *_limit1, const unsigned char *_thresh1, int bd) {
831 const __m128i zero = _mm_setzero_si128();
832 __m128i blimit0, limit0, thresh0;
833 __m128i t80;
834 __m128i mask, flat, work;
835 __m128i abs_p1q1, abs_p0q0, abs_p1p0, abs_p2p1, abs_q1q0, abs_q2q1;
836 __m128i op1, op0, oq0, oq1;
837 const __m128i four = _mm_set1_epi16(4);
838 const __m128i one = _mm_set1_epi16(0x1);
839 const __m128i ffff = _mm_cmpeq_epi16(one, one);
840
841 get_limit_dual(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
842 &blimit0, &limit0, &thresh0, &t80);
843
844 abs_p2p1 = abs_diff16(*p2, *p1);
845 abs_p1p0 = abs_diff16(*p1, *p0);
846 abs_q1q0 = abs_diff16(*q1, *q0);
847 abs_q2q1 = abs_diff16(*q2, *q1);
848
849 abs_p0q0 = abs_diff16(*p0, *q0);
850 abs_p1q1 = abs_diff16(*p1, *q1);
851
852 abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
853 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
854 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit0);
855 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
856 // mask |= (abs(*p0 - *q0) * 2 + abs(*p1 - *q1) / 2 > blimit) * -1;
857 // So taking maximums continues to work:
858 mask = _mm_and_si128(mask, _mm_adds_epu16(limit0, one));
859
860 mask = _mm_max_epi16(abs_q2q1, mask);
861 work = _mm_max_epi16(abs_p1p0, abs_q1q0);
862 mask = _mm_max_epi16(work, mask);
863 mask = _mm_max_epi16(mask, abs_p2p1);
864 mask = _mm_subs_epu16(mask, limit0);
865 mask = _mm_cmpeq_epi16(mask, zero);
866
867 // lp filter
868 __m128i ps[2], qs[2], p[2], q[2];
869 {
870 p[0] = *p0;
871 p[1] = *p1;
872 q[0] = *q0;
873 q[1] = *q1;
874 // filter_mask and hev_mask
875 highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh0, bd, &t80);
876 }
877
878 // flat_mask
879 flat = _mm_max_epi16(abs_diff16(*q2, *q0), abs_diff16(*p2, *p0));
880 flat = _mm_max_epi16(flat, work);
881
882 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
883
884 flat = _mm_cmpeq_epi16(flat, zero);
885 flat = _mm_and_si128(flat, mask); // flat & mask
886
887 // 5 tap filter
888 // need it only if flat !=0
889 if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
890 __m128i workp_a, workp_b, workp_shft0, workp_shft1;
891
892 // op1
893 workp_a = _mm_add_epi16(_mm_add_epi16(*p0, *p0),
894 _mm_add_epi16(*p1, *p1)); // *p0 *2 + *p1 * 2
895 workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four),
896 *p2); // *p2 + *p0 * 2 + *p1 * 2 + 4
897
898 workp_b = _mm_add_epi16(_mm_add_epi16(*p2, *p2), *q0);
899 workp_shft0 = _mm_add_epi16(
900 workp_a, workp_b); // *p2 * 3 + *p1 * 2 + *p0 * 2 + *q0 + 4
901 op1 = _mm_srli_epi16(workp_shft0, 3);
902
903 // op0
904 workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *q0), *q1); // *q0 * 2 + *q1
905 workp_a =
906 _mm_add_epi16(workp_a,
907 workp_b); // *p2 + *p0 * 2 + *p1 * 2 + *q0 * 2 + *q1 + 4
908 op0 = _mm_srli_epi16(workp_a, 3);
909
910 // oq0
911 workp_a = _mm_sub_epi16(_mm_sub_epi16(workp_a, *p2),
912 *p1); // *p0 * 2 + *p1 + *q0 * 2 + *q1 + 4
913 workp_b = _mm_add_epi16(*q1, *q2);
914 workp_shft0 = _mm_add_epi16(
915 workp_a, workp_b); // *p0 * 2 + *p1 + *q0 * 2 + *q1 * 2 + *q2 + 4
916 oq0 = _mm_srli_epi16(workp_shft0, 3);
917
918 // oq1
919 workp_a = _mm_sub_epi16(_mm_sub_epi16(workp_shft0, *p1),
920 *p0); // *p0 + *q0 * 2 + *q1 * 2 + *q2 + 4
921 workp_b = _mm_add_epi16(*q2, *q2);
922 workp_shft1 = _mm_add_epi16(
923 workp_a, workp_b); // *p0 + *q0 * 2 + *q1 * 2 + *q2 * 3 + 4
924 oq1 = _mm_srli_epi16(workp_shft1, 3);
925
926 qs[0] = _mm_andnot_si128(flat, qs[0]);
927 oq0 = _mm_and_si128(flat, oq0);
928 *q0 = _mm_or_si128(qs[0], oq0);
929
930 qs[1] = _mm_andnot_si128(flat, qs[1]);
931 oq1 = _mm_and_si128(flat, oq1);
932 *q1 = _mm_or_si128(qs[1], oq1);
933
934 ps[0] = _mm_andnot_si128(flat, ps[0]);
935 op0 = _mm_and_si128(flat, op0);
936 *p0 = _mm_or_si128(ps[0], op0);
937
938 ps[1] = _mm_andnot_si128(flat, ps[1]);
939 op1 = _mm_and_si128(flat, op1);
940 *p1 = _mm_or_si128(ps[1], op1);
941 } else {
942 *q0 = qs[0];
943 *q1 = qs[1];
944 *p0 = ps[0];
945 *p1 = ps[1];
946 }
947 }
948
aom_highbd_lpf_horizontal_6_sse2(uint16_t * s,int p,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)949 void aom_highbd_lpf_horizontal_6_sse2(uint16_t *s, int p,
950 const uint8_t *_blimit,
951 const uint8_t *_limit,
952 const uint8_t *_thresh, int bd) {
953 __m128i p2, p1, p0, q0, q1, q2, p1p0_out, q1q0_out;
954
955 p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
956 p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
957 p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
958 q0 = _mm_loadl_epi64((__m128i *)(s + 0 * p));
959 q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
960 q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p));
961
962 highbd_lpf_internal_6_sse2(&p2, &p1, &p0, &q0, &q1, &q2, &p1p0_out, &q1q0_out,
963 _blimit, _limit, _thresh, bd);
964
965 _mm_storel_epi64((__m128i *)(s - 2 * p), _mm_srli_si128(p1p0_out, 8));
966 _mm_storel_epi64((__m128i *)(s - 1 * p), p1p0_out);
967 _mm_storel_epi64((__m128i *)(s + 0 * p), q1q0_out);
968 _mm_storel_epi64((__m128i *)(s + 1 * p), _mm_srli_si128(q1q0_out, 8));
969 }
970
aom_highbd_lpf_horizontal_6_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)971 void aom_highbd_lpf_horizontal_6_dual_sse2(
972 uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
973 const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
974 const uint8_t *_thresh1, int bd) {
975 __m128i p2, p1, p0, q0, q1, q2;
976
977 p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
978 p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
979 p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
980 q0 = _mm_loadu_si128((__m128i *)(s + 0 * p));
981 q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
982 q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
983
984 highbd_lpf_internal_6_dual_sse2(&p2, &p1, &p0, &q0, &q1, &q2, _blimit0,
985 _limit0, _thresh0, _blimit1, _limit1,
986 _thresh1, bd);
987
988 _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
989 _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
990 _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
991 _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
992 }
993
highbd_lpf_internal_8_sse2(__m128i * p3,__m128i * q3,__m128i * p2,__m128i * q2,__m128i * p1,__m128i * q1,__m128i * p0,__m128i * q0,__m128i * q1q0_out,__m128i * p1p0_out,const unsigned char * _blimit,const unsigned char * _limit,const unsigned char * _thresh,int bd)994 static AOM_FORCE_INLINE void highbd_lpf_internal_8_sse2(
995 __m128i *p3, __m128i *q3, __m128i *p2, __m128i *q2, __m128i *p1,
996 __m128i *q1, __m128i *p0, __m128i *q0, __m128i *q1q0_out, __m128i *p1p0_out,
997 const unsigned char *_blimit, const unsigned char *_limit,
998 const unsigned char *_thresh, int bd) {
999 const __m128i zero = _mm_setzero_si128();
1000 __m128i blimit, limit, thresh;
1001 __m128i mask, hev, flat;
1002 __m128i pq[4];
1003 __m128i p1p0, q1q0, ps1ps0, qs1qs0;
1004 __m128i work_a, opq2, flat_p1p0, flat_q0q1;
1005
1006 pq[0] = _mm_unpacklo_epi64(*p0, *q0);
1007 pq[1] = _mm_unpacklo_epi64(*p1, *q1);
1008 pq[2] = _mm_unpacklo_epi64(*p2, *q2);
1009 pq[3] = _mm_unpacklo_epi64(*p3, *q3);
1010
1011 __m128i abs_p1p0;
1012
1013 const __m128i four = _mm_set1_epi16(4);
1014 __m128i t80;
1015 const __m128i one = _mm_set1_epi16(0x1);
1016
1017 get_limit(_blimit, _limit, _thresh, bd, &blimit, &limit, &thresh, &t80);
1018
1019 highbd_hev_filter_mask_x_sse2(pq, 4, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
1020 &thresh, &hev, &mask);
1021
1022 // lp filter
1023 highbd_filter4_sse2(&p1p0, &q1q0, &hev, &mask, q1q0_out, p1p0_out, &t80, bd);
1024
1025 // flat_mask4
1026 flat = _mm_max_epi16(abs_diff16(pq[2], pq[0]), abs_diff16(pq[3], pq[0]));
1027 flat = _mm_max_epi16(abs_p1p0, flat);
1028 flat = _mm_max_epi16(flat, _mm_srli_si128(flat, 8));
1029
1030 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
1031
1032 flat = _mm_cmpeq_epi16(flat, zero);
1033 flat = _mm_and_si128(flat, mask);
1034 // replicate for the further "merged variables" usage
1035 flat = _mm_unpacklo_epi64(flat, flat);
1036
1037 if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
1038 __m128i workp_a, workp_b, workp_c, workp_shft0, workp_shft1;
1039 // Added before shift for rounding part of ROUND_POWER_OF_TWO
1040
1041 // o*p2
1042 workp_a = _mm_add_epi16(_mm_add_epi16(*p3, *p3), _mm_add_epi16(*p2, *p1));
1043 workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), *p0);
1044 workp_c = _mm_add_epi16(_mm_add_epi16(*q0, *p2), *p3);
1045 workp_c = _mm_add_epi16(workp_a, workp_c);
1046
1047 // o*p1
1048 workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *q1), *p1);
1049 workp_shft0 = _mm_add_epi16(workp_a, workp_b);
1050
1051 // o*p0
1052 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q2);
1053 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p1), *p0);
1054 workp_shft1 = _mm_add_epi16(workp_a, workp_b);
1055
1056 flat_p1p0 = _mm_srli_epi16(_mm_unpacklo_epi64(workp_shft1, workp_shft0), 3);
1057
1058 // oq0
1059 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q3);
1060 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p0), *q0);
1061 workp_shft0 = _mm_add_epi16(workp_a, workp_b);
1062
1063 // oq1
1064 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p2), *q3);
1065 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q0), *q1);
1066 workp_shft1 = _mm_add_epi16(workp_a, workp_b);
1067
1068 flat_q0q1 = _mm_srli_epi16(_mm_unpacklo_epi64(workp_shft0, workp_shft1), 3);
1069
1070 // oq2
1071 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p1), *q3);
1072 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q1), *q2);
1073 workp_a = _mm_add_epi16(workp_a, workp_b);
1074 opq2 = _mm_srli_epi16(_mm_unpacklo_epi64(workp_c, workp_a), 3);
1075
1076 qs1qs0 = _mm_andnot_si128(flat, *q1q0_out);
1077 q1q0 = _mm_and_si128(flat, flat_q0q1);
1078 *q1q0_out = _mm_or_si128(qs1qs0, q1q0);
1079
1080 ps1ps0 = _mm_andnot_si128(flat, *p1p0_out);
1081 p1p0 = _mm_and_si128(flat, flat_p1p0);
1082 *p1p0_out = _mm_or_si128(ps1ps0, p1p0);
1083
1084 work_a = _mm_andnot_si128(flat, pq[2]);
1085 *p2 = _mm_and_si128(flat, opq2);
1086 *p2 = _mm_or_si128(work_a, *p2);
1087 *q2 = _mm_srli_si128(*p2, 8);
1088 }
1089 }
1090
highbd_lpf_internal_8_dual_sse2(__m128i * p3,__m128i * q3,__m128i * p2,__m128i * q2,__m128i * p1,__m128i * q1,__m128i * p0,__m128i * q0,const unsigned char * _blimit0,const unsigned char * _limit0,const unsigned char * _thresh0,const unsigned char * _blimit1,const unsigned char * _limit1,const unsigned char * _thresh1,int bd)1091 static AOM_FORCE_INLINE void highbd_lpf_internal_8_dual_sse2(
1092 __m128i *p3, __m128i *q3, __m128i *p2, __m128i *q2, __m128i *p1,
1093 __m128i *q1, __m128i *p0, __m128i *q0, const unsigned char *_blimit0,
1094 const unsigned char *_limit0, const unsigned char *_thresh0,
1095 const unsigned char *_blimit1, const unsigned char *_limit1,
1096 const unsigned char *_thresh1, int bd) {
1097 __m128i blimit0, limit0, thresh0;
1098 __m128i t80;
1099 __m128i mask, flat;
1100 __m128i work_a, op2, oq2, op1, op0, oq0, oq1;
1101 __m128i abs_p1q1, abs_p0q0, work0, work1, work2;
1102
1103 const __m128i zero = _mm_setzero_si128();
1104 const __m128i four = _mm_set1_epi16(4);
1105 const __m128i one = _mm_set1_epi16(0x1);
1106 const __m128i ffff = _mm_cmpeq_epi16(one, one);
1107
1108 get_limit_dual(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
1109 &blimit0, &limit0, &thresh0, &t80);
1110
1111 abs_p0q0 = abs_diff16(*p0, *q0);
1112 abs_p1q1 = abs_diff16(*p1, *q1);
1113
1114 abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
1115 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
1116 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit0);
1117 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
1118 // mask |= (abs(*p0 - q0) * 2 + abs(*p1 - q1) / 2 > blimit) * -1;
1119
1120 // So taking maximums continues to work:
1121 mask = _mm_and_si128(mask, _mm_adds_epu16(limit0, one));
1122
1123 work0 = _mm_max_epi16(abs_diff16(*p3, *p2), abs_diff16(*p2, *p1));
1124 work1 =
1125 _mm_max_epi16(abs_diff16(*p1, *p0), abs_diff16(*q1, *q0)); // tbu 4 flat
1126 work0 = _mm_max_epi16(work0, work1);
1127 work2 = _mm_max_epi16(abs_diff16(*q2, *q1), abs_diff16(*q2, *q3));
1128 work2 = _mm_max_epi16(work2, work0);
1129 mask = _mm_max_epi16(work2, mask);
1130
1131 mask = _mm_subs_epu16(mask, limit0);
1132 mask = _mm_cmpeq_epi16(mask, zero);
1133
1134 // lp filter
1135 __m128i ps[2], qs[2], p[2], q[2];
1136 {
1137 p[0] = *p0;
1138 p[1] = *p1;
1139 q[0] = *q0;
1140 q[1] = *q1;
1141 // filter_mask and hev_mask
1142 highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh0, bd, &t80);
1143 }
1144
1145 flat = _mm_max_epi16(abs_diff16(*p2, *p0), abs_diff16(*q2, *q0));
1146 flat = _mm_max_epi16(work1, flat);
1147 work0 = _mm_max_epi16(abs_diff16(*p3, *p0), abs_diff16(*q3, *q0));
1148 flat = _mm_max_epi16(work0, flat);
1149
1150 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
1151 flat = _mm_cmpeq_epi16(flat, zero);
1152 flat = _mm_and_si128(flat, mask); // flat & mask
1153
1154 // filter8 need it only if flat !=0
1155 if (0xffff != _mm_movemask_epi8(_mm_cmpeq_epi16(flat, zero))) {
1156 __m128i workp_a, workp_b;
1157 // Added before shift for rounding part of ROUND_POWER_OF_TWO
1158
1159 // o*p2
1160 workp_a = _mm_add_epi16(_mm_add_epi16(*p3, *p3), _mm_add_epi16(*p2, *p1));
1161 workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), *p0);
1162 workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *p2), *p3);
1163 op2 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1164
1165 // o*p1
1166 workp_b = _mm_add_epi16(_mm_add_epi16(*q0, *q1), *p1);
1167 op1 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1168
1169 // o*p0
1170 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q2);
1171 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p1), *p0);
1172 op0 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1173
1174 // oq0
1175 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p3), *q3);
1176 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *p0), *q0);
1177 oq0 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1178
1179 // oq1
1180 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p2), *q3);
1181 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q0), *q1);
1182 oq1 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1183
1184 // oq2
1185 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, *p1), *q3);
1186 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, *q1), *q2);
1187 oq2 = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
1188
1189 qs[0] = _mm_andnot_si128(flat, qs[0]);
1190 oq0 = _mm_and_si128(flat, oq0);
1191 *q0 = _mm_or_si128(qs[0], oq0);
1192
1193 qs[1] = _mm_andnot_si128(flat, qs[1]);
1194 oq1 = _mm_and_si128(flat, oq1);
1195 *q1 = _mm_or_si128(qs[1], oq1);
1196
1197 ps[0] = _mm_andnot_si128(flat, ps[0]);
1198 op0 = _mm_and_si128(flat, op0);
1199 *p0 = _mm_or_si128(ps[0], op0);
1200
1201 ps[1] = _mm_andnot_si128(flat, ps[1]);
1202 op1 = _mm_and_si128(flat, op1);
1203 *p1 = _mm_or_si128(ps[1], op1);
1204
1205 work_a = _mm_andnot_si128(flat, *q2);
1206 *q2 = _mm_and_si128(flat, oq2);
1207 *q2 = _mm_or_si128(work_a, *q2);
1208
1209 work_a = _mm_andnot_si128(flat, *p2);
1210 *p2 = _mm_and_si128(flat, op2);
1211 *p2 = _mm_or_si128(work_a, *p2);
1212 } else {
1213 *q0 = qs[0];
1214 *q1 = qs[1];
1215 *p0 = ps[0];
1216 *p1 = ps[1];
1217 }
1218 }
1219
aom_highbd_lpf_horizontal_8_sse2(uint16_t * s,int p,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)1220 void aom_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
1221 const uint8_t *_blimit,
1222 const uint8_t *_limit,
1223 const uint8_t *_thresh, int bd) {
1224 __m128i p2, p1, p0, q0, q1, q2, p3, q3;
1225 __m128i q1q0, p1p0;
1226
1227 p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
1228 q3 = _mm_loadl_epi64((__m128i *)(s + 3 * p));
1229 p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
1230 q2 = _mm_loadl_epi64((__m128i *)(s + 2 * p));
1231 p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
1232 q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
1233 p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
1234 q0 = _mm_loadl_epi64((__m128i *)(s + 0 * p));
1235
1236 highbd_lpf_internal_8_sse2(&p3, &q3, &p2, &q2, &p1, &q1, &p0, &q0, &q1q0,
1237 &p1p0, _blimit, _limit, _thresh, bd);
1238
1239 _mm_storel_epi64((__m128i *)(s - 3 * p), p2);
1240 _mm_storel_epi64((__m128i *)(s - 2 * p), _mm_srli_si128(p1p0, 8));
1241 _mm_storel_epi64((__m128i *)(s - 1 * p), p1p0);
1242 _mm_storel_epi64((__m128i *)(s + 0 * p), q1q0);
1243 _mm_storel_epi64((__m128i *)(s + 1 * p), _mm_srli_si128(q1q0, 8));
1244 _mm_storel_epi64((__m128i *)(s + 2 * p), q2);
1245 }
1246
aom_highbd_lpf_horizontal_8_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1247 void aom_highbd_lpf_horizontal_8_dual_sse2(
1248 uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
1249 const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1250 const uint8_t *_thresh1, int bd) {
1251 __m128i p2, p1, p0, q0, q1, q2, p3, q3;
1252
1253 p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
1254 q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
1255 p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
1256 q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
1257 p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
1258 q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
1259 p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
1260 q0 = _mm_loadu_si128((__m128i *)(s + 0 * p));
1261
1262 highbd_lpf_internal_8_dual_sse2(&p3, &q3, &p2, &q2, &p1, &q1, &p0, &q0,
1263 _blimit0, _limit0, _thresh0, _blimit1,
1264 _limit1, _thresh1, bd);
1265
1266 _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
1267 _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
1268 _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
1269 _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
1270 _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
1271 _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
1272 }
1273
highbd_lpf_internal_4_sse2(__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * q1q0_out,__m128i * p1p0_out,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)1274 static AOM_FORCE_INLINE void highbd_lpf_internal_4_sse2(
1275 __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1, __m128i *q1q0_out,
1276 __m128i *p1p0_out, const uint8_t *_blimit, const uint8_t *_limit,
1277 const uint8_t *_thresh, int bd) {
1278 __m128i blimit, limit, thresh;
1279 __m128i mask, hev;
1280 __m128i p1p0, q1q0;
1281 __m128i pq[2];
1282
1283 __m128i abs_p1p0;
1284
1285 __m128i t80;
1286 get_limit(_blimit, _limit, _thresh, bd, &blimit, &limit, &thresh, &t80);
1287
1288 pq[0] = _mm_unpacklo_epi64(*p0, *q0);
1289 pq[1] = _mm_unpacklo_epi64(*p1, *q1);
1290
1291 highbd_hev_filter_mask_x_sse2(pq, 2, &p1p0, &q1q0, &abs_p1p0, &limit, &blimit,
1292 &thresh, &hev, &mask);
1293
1294 highbd_filter4_sse2(&p1p0, &q1q0, &hev, &mask, q1q0_out, p1p0_out, &t80, bd);
1295 }
1296
highbd_lpf_internal_4_dual_sse2(__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * ps,__m128i * qs,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1297 static AOM_FORCE_INLINE void highbd_lpf_internal_4_dual_sse2(
1298 __m128i *p1, __m128i *p0, __m128i *q0, __m128i *q1, __m128i *ps,
1299 __m128i *qs, const uint8_t *_blimit0, const uint8_t *_limit0,
1300 const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1301 const uint8_t *_thresh1, int bd) {
1302 __m128i blimit0, limit0, thresh0;
1303 __m128i mask, flat;
1304 __m128i p[2], q[2];
1305
1306 const __m128i zero = _mm_setzero_si128();
1307 __m128i abs_p0q0 = abs_diff16(*q0, *p0);
1308 __m128i abs_p1q1 = abs_diff16(*q1, *p1);
1309
1310 __m128i abs_p1p0 = abs_diff16(*p1, *p0);
1311 __m128i abs_q1q0 = abs_diff16(*q1, *q0);
1312
1313 const __m128i ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0);
1314 const __m128i one = _mm_set1_epi16(1);
1315
1316 __m128i t80;
1317
1318 get_limit_dual(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
1319 &blimit0, &limit0, &thresh0, &t80);
1320
1321 // filter_mask and hev_mask
1322 flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
1323
1324 abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0);
1325 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
1326
1327 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit0);
1328 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
1329 // mask |= (abs(*p0 - *q0) * 2 + abs(*p1 - *q1) / 2 > blimit) * -1;
1330 // So taking maximums continues to work:
1331 mask = _mm_and_si128(mask, _mm_adds_epu16(limit0, one));
1332 mask = _mm_max_epi16(flat, mask);
1333
1334 mask = _mm_subs_epu16(mask, limit0);
1335 mask = _mm_cmpeq_epi16(mask, zero);
1336
1337 p[0] = *p0;
1338 p[1] = *p1;
1339 q[0] = *q0;
1340 q[1] = *q1;
1341
1342 highbd_filter4_dual_sse2(p, q, ps, qs, &mask, &thresh0, bd, &t80);
1343 }
1344
aom_highbd_lpf_horizontal_4_sse2(uint16_t * s,int p,const uint8_t * _blimit,const uint8_t * _limit,const uint8_t * _thresh,int bd)1345 void aom_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
1346 const uint8_t *_blimit,
1347 const uint8_t *_limit,
1348 const uint8_t *_thresh, int bd) {
1349 __m128i p1p0, q1q0;
1350 __m128i p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
1351 __m128i p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
1352 __m128i q0 = _mm_loadl_epi64((__m128i *)(s - 0 * p));
1353 __m128i q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p));
1354
1355 highbd_lpf_internal_4_sse2(&p1, &p0, &q0, &q1, &q1q0, &p1p0, _blimit, _limit,
1356 _thresh, bd);
1357
1358 _mm_storel_epi64((__m128i *)(s - 2 * p), _mm_srli_si128(p1p0, 8));
1359 _mm_storel_epi64((__m128i *)(s - 1 * p), p1p0);
1360 _mm_storel_epi64((__m128i *)(s + 0 * p), q1q0);
1361 _mm_storel_epi64((__m128i *)(s + 1 * p), _mm_srli_si128(q1q0, 8));
1362 }
1363
aom_highbd_lpf_horizontal_4_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1364 void aom_highbd_lpf_horizontal_4_dual_sse2(
1365 uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
1366 const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1367 const uint8_t *_thresh1, int bd) {
1368 __m128i p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
1369 __m128i p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
1370 __m128i q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
1371 __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
1372 __m128i ps[2], qs[2];
1373
1374 highbd_lpf_internal_4_dual_sse2(&p1, &p0, &q0, &q1, ps, qs, _blimit0, _limit0,
1375 _thresh0, _blimit1, _limit1, _thresh1, bd);
1376
1377 _mm_storeu_si128((__m128i *)(s - 2 * p), ps[1]);
1378 _mm_storeu_si128((__m128i *)(s - 1 * p), ps[0]);
1379 _mm_storeu_si128((__m128i *)(s + 0 * p), qs[0]);
1380 _mm_storeu_si128((__m128i *)(s + 1 * p), qs[1]);
1381 }
1382
aom_highbd_lpf_vertical_4_sse2(uint16_t * s,int p,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1383 void aom_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
1384 const uint8_t *limit, const uint8_t *thresh,
1385 int bd) {
1386 __m128i x0, x1, x2, x3, d0, d1, d2, d3;
1387 __m128i p1p0, q1q0;
1388 __m128i p1, q1;
1389
1390 x0 = _mm_loadl_epi64((__m128i *)(s - 2 + 0 * p));
1391 x1 = _mm_loadl_epi64((__m128i *)(s - 2 + 1 * p));
1392 x2 = _mm_loadl_epi64((__m128i *)(s - 2 + 2 * p));
1393 x3 = _mm_loadl_epi64((__m128i *)(s - 2 + 3 * p));
1394
1395 highbd_transpose4x8_8x4_low_sse2(&x0, &x1, &x2, &x3, &d0, &d1, &d2, &d3);
1396
1397 highbd_lpf_internal_4_sse2(&d0, &d1, &d2, &d3, &q1q0, &p1p0, blimit, limit,
1398 thresh, bd);
1399
1400 p1 = _mm_srli_si128(p1p0, 8);
1401 q1 = _mm_srli_si128(q1q0, 8);
1402
1403 // transpose from 8x4 to 4x8
1404 highbd_transpose4x8_8x4_low_sse2(&p1, &p1p0, &q1q0, &q1, &d0, &d1, &d2, &d3);
1405
1406 _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1407 _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1408 _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1409 _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1410 }
1411
aom_highbd_lpf_vertical_4_dual_sse2(uint16_t * s,int p,const uint8_t * blimit0,const uint8_t * limit0,const uint8_t * thresh0,const uint8_t * blimit1,const uint8_t * limit1,const uint8_t * thresh1,int bd)1412 void aom_highbd_lpf_vertical_4_dual_sse2(
1413 uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
1414 const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
1415 const uint8_t *thresh1, int bd) {
1416 __m128i x0, x1, x2, x3, x4, x5, x6, x7;
1417 __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1418 __m128i ps[2], qs[2];
1419
1420 x0 = _mm_loadl_epi64((__m128i *)(s - 2 + 0 * p));
1421 x1 = _mm_loadl_epi64((__m128i *)(s - 2 + 1 * p));
1422 x2 = _mm_loadl_epi64((__m128i *)(s - 2 + 2 * p));
1423 x3 = _mm_loadl_epi64((__m128i *)(s - 2 + 3 * p));
1424 x4 = _mm_loadl_epi64((__m128i *)(s - 2 + 4 * p));
1425 x5 = _mm_loadl_epi64((__m128i *)(s - 2 + 5 * p));
1426 x6 = _mm_loadl_epi64((__m128i *)(s - 2 + 6 * p));
1427 x7 = _mm_loadl_epi64((__m128i *)(s - 2 + 7 * p));
1428
1429 highbd_transpose8x8_low_sse2(&x0, &x1, &x2, &x3, &x4, &x5, &x6, &x7, &d0, &d1,
1430 &d2, &d3);
1431
1432 highbd_lpf_internal_4_dual_sse2(&d0, &d1, &d2, &d3, ps, qs, blimit0, limit0,
1433 thresh0, blimit1, limit1, thresh1, bd);
1434
1435 highbd_transpose4x8_8x4_sse2(&ps[1], &ps[0], &qs[0], &qs[1], &d0, &d1, &d2,
1436 &d3, &d4, &d5, &d6, &d7);
1437
1438 _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1439 _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1440 _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1441 _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1442 _mm_storel_epi64((__m128i *)(s - 2 + 4 * p), d4);
1443 _mm_storel_epi64((__m128i *)(s - 2 + 5 * p), d5);
1444 _mm_storel_epi64((__m128i *)(s - 2 + 6 * p), d6);
1445 _mm_storel_epi64((__m128i *)(s - 2 + 7 * p), d7);
1446 }
1447
aom_highbd_lpf_vertical_6_sse2(uint16_t * s,int p,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1448 void aom_highbd_lpf_vertical_6_sse2(uint16_t *s, int p, const uint8_t *blimit,
1449 const uint8_t *limit, const uint8_t *thresh,
1450 int bd) {
1451 __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1452 __m128i x3, x2, x1, x0, p0, q0;
1453 __m128i p1p0, q1q0;
1454
1455 x3 = _mm_loadu_si128((__m128i *)((s - 3) + 0 * p));
1456 x2 = _mm_loadu_si128((__m128i *)((s - 3) + 1 * p));
1457 x1 = _mm_loadu_si128((__m128i *)((s - 3) + 2 * p));
1458 x0 = _mm_loadu_si128((__m128i *)((s - 3) + 3 * p));
1459
1460 highbd_transpose4x8_8x4_sse2(&x3, &x2, &x1, &x0, &d0, &d1, &d2, &d3, &d4, &d5,
1461 &d6, &d7);
1462
1463 highbd_lpf_internal_6_sse2(&d0, &d1, &d2, &d3, &d4, &d5, &p1p0, &q1q0, blimit,
1464 limit, thresh, bd);
1465
1466 p0 = _mm_srli_si128(p1p0, 8);
1467 q0 = _mm_srli_si128(q1q0, 8);
1468
1469 highbd_transpose4x8_8x4_low_sse2(&p0, &p1p0, &q1q0, &q0, &d0, &d1, &d2, &d3);
1470
1471 _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1472 _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1473 _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1474 _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1475 }
1476
aom_highbd_lpf_vertical_6_dual_sse2(uint16_t * s,int p,const uint8_t * _blimit0,const uint8_t * _limit0,const uint8_t * _thresh0,const uint8_t * _blimit1,const uint8_t * _limit1,const uint8_t * _thresh1,int bd)1477 void aom_highbd_lpf_vertical_6_dual_sse2(
1478 uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
1479 const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
1480 const uint8_t *_thresh1, int bd) {
1481 __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1482 __m128i x0, x1, x2, x3, x4, x5, x6, x7;
1483 __m128i p0, q0, p1, q1, p2, q2;
1484
1485 x0 = _mm_loadu_si128((__m128i *)((s - 3) + 0 * p));
1486 x1 = _mm_loadu_si128((__m128i *)((s - 3) + 1 * p));
1487 x2 = _mm_loadu_si128((__m128i *)((s - 3) + 2 * p));
1488 x3 = _mm_loadu_si128((__m128i *)((s - 3) + 3 * p));
1489 x4 = _mm_loadu_si128((__m128i *)((s - 3) + 4 * p));
1490 x5 = _mm_loadu_si128((__m128i *)((s - 3) + 5 * p));
1491 x6 = _mm_loadu_si128((__m128i *)((s - 3) + 6 * p));
1492 x7 = _mm_loadu_si128((__m128i *)((s - 3) + 7 * p));
1493
1494 highbd_transpose8x8_sse2(&x0, &x1, &x2, &x3, &x4, &x5, &x6, &x7, &p2, &p1,
1495 &p0, &q0, &q1, &q2, &d6, &d7);
1496
1497 highbd_lpf_internal_6_dual_sse2(&p2, &p1, &p0, &q0, &q1, &q2, _blimit0,
1498 _limit0, _thresh0, _blimit1, _limit1,
1499 _thresh1, bd);
1500
1501 highbd_transpose4x8_8x4_sse2(&p1, &p0, &q0, &q1, &d0, &d1, &d2, &d3, &d4, &d5,
1502 &d6, &d7);
1503
1504 _mm_storel_epi64((__m128i *)(s - 2 + 0 * p), d0);
1505 _mm_storel_epi64((__m128i *)(s - 2 + 1 * p), d1);
1506 _mm_storel_epi64((__m128i *)(s - 2 + 2 * p), d2);
1507 _mm_storel_epi64((__m128i *)(s - 2 + 3 * p), d3);
1508 _mm_storel_epi64((__m128i *)(s - 2 + 4 * p), d4);
1509 _mm_storel_epi64((__m128i *)(s - 2 + 5 * p), d5);
1510 _mm_storel_epi64((__m128i *)(s - 2 + 6 * p), d6);
1511 _mm_storel_epi64((__m128i *)(s - 2 + 7 * p), d7);
1512 }
1513
aom_highbd_lpf_vertical_8_sse2(uint16_t * s,int p,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1514 void aom_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit,
1515 const uint8_t *limit, const uint8_t *thresh,
1516 int bd) {
1517 __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1518 __m128i p2, p1, p0, p3, q0;
1519 __m128i q1q0, p1p0;
1520
1521 p3 = _mm_loadu_si128((__m128i *)((s - 4) + 0 * p));
1522 p2 = _mm_loadu_si128((__m128i *)((s - 4) + 1 * p));
1523 p1 = _mm_loadu_si128((__m128i *)((s - 4) + 2 * p));
1524 p0 = _mm_loadu_si128((__m128i *)((s - 4) + 3 * p));
1525
1526 highbd_transpose4x8_8x4_sse2(&p3, &p2, &p1, &p0, &d0, &d1, &d2, &d3, &d4, &d5,
1527 &d6, &d7);
1528
1529 // Loop filtering
1530 highbd_lpf_internal_8_sse2(&d0, &d7, &d1, &d6, &d2, &d5, &d3, &d4, &q1q0,
1531 &p1p0, blimit, limit, thresh, bd);
1532
1533 p0 = _mm_srli_si128(p1p0, 8);
1534 q0 = _mm_srli_si128(q1q0, 8);
1535
1536 highbd_transpose8x8_low_sse2(&d0, &d1, &p0, &p1p0, &q1q0, &q0, &d6, &d7, &d0,
1537 &d1, &d2, &d3);
1538
1539 _mm_storeu_si128((__m128i *)(s - 4 + 0 * p), d0);
1540 _mm_storeu_si128((__m128i *)(s - 4 + 1 * p), d1);
1541 _mm_storeu_si128((__m128i *)(s - 4 + 2 * p), d2);
1542 _mm_storeu_si128((__m128i *)(s - 4 + 3 * p), d3);
1543 }
1544
aom_highbd_lpf_vertical_8_dual_sse2(uint16_t * s,int p,const uint8_t * blimit0,const uint8_t * limit0,const uint8_t * thresh0,const uint8_t * blimit1,const uint8_t * limit1,const uint8_t * thresh1,int bd)1545 void aom_highbd_lpf_vertical_8_dual_sse2(
1546 uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
1547 const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
1548 const uint8_t *thresh1, int bd) {
1549 __m128i x0, x1, x2, x3, x4, x5, x6, x7;
1550 __m128i d0, d1, d2, d3, d4, d5, d6, d7;
1551
1552 x0 = _mm_loadu_si128((__m128i *)(s - 4 + 0 * p));
1553 x1 = _mm_loadu_si128((__m128i *)(s - 4 + 1 * p));
1554 x2 = _mm_loadu_si128((__m128i *)(s - 4 + 2 * p));
1555 x3 = _mm_loadu_si128((__m128i *)(s - 4 + 3 * p));
1556 x4 = _mm_loadu_si128((__m128i *)(s - 4 + 4 * p));
1557 x5 = _mm_loadu_si128((__m128i *)(s - 4 + 5 * p));
1558 x6 = _mm_loadu_si128((__m128i *)(s - 4 + 6 * p));
1559 x7 = _mm_loadu_si128((__m128i *)(s - 4 + 7 * p));
1560
1561 highbd_transpose8x8_sse2(&x0, &x1, &x2, &x3, &x4, &x5, &x6, &x7, &d0, &d1,
1562 &d2, &d3, &d4, &d5, &d6, &d7);
1563
1564 highbd_lpf_internal_8_dual_sse2(&d0, &d7, &d1, &d6, &d2, &d5, &d3, &d4,
1565 blimit0, limit0, thresh0, blimit1, limit1,
1566 thresh1, bd);
1567
1568 highbd_transpose8x8_sse2(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7, &x0, &x1,
1569 &x2, &x3, &x4, &x5, &x6, &x7);
1570
1571 _mm_storeu_si128((__m128i *)(s - 4 + 0 * p), x0);
1572 _mm_storeu_si128((__m128i *)(s - 4 + 1 * p), x1);
1573 _mm_storeu_si128((__m128i *)(s - 4 + 2 * p), x2);
1574 _mm_storeu_si128((__m128i *)(s - 4 + 3 * p), x3);
1575 _mm_storeu_si128((__m128i *)(s - 4 + 4 * p), x4);
1576 _mm_storeu_si128((__m128i *)(s - 4 + 5 * p), x5);
1577 _mm_storeu_si128((__m128i *)(s - 4 + 6 * p), x6);
1578 _mm_storeu_si128((__m128i *)(s - 4 + 7 * p), x7);
1579 }
1580
aom_highbd_lpf_vertical_14_sse2(uint16_t * s,int pitch,const uint8_t * blimit,const uint8_t * limit,const uint8_t * thresh,int bd)1581 void aom_highbd_lpf_vertical_14_sse2(uint16_t *s, int pitch,
1582 const uint8_t *blimit,
1583 const uint8_t *limit,
1584 const uint8_t *thresh, int bd) {
1585 __m128i q[7], p[7], pq[7];
1586 __m128i p6, p5, p4, p3;
1587 __m128i p6_2, p5_2, p4_2, p3_2;
1588 __m128i d0, d1, d2, d3;
1589 __m128i d0_2, d1_2, d2_2, d3_2, d7_2;
1590
1591 p6 = _mm_loadu_si128((__m128i *)((s - 8) + 0 * pitch));
1592 p5 = _mm_loadu_si128((__m128i *)((s - 8) + 1 * pitch));
1593 p4 = _mm_loadu_si128((__m128i *)((s - 8) + 2 * pitch));
1594 p3 = _mm_loadu_si128((__m128i *)((s - 8) + 3 * pitch));
1595
1596 highbd_transpose4x8_8x4_sse2(&p6, &p5, &p4, &p3, &d0, &p[6], &p[5], &p[4],
1597 &p[3], &p[2], &p[1], &p[0]);
1598
1599 p6_2 = _mm_loadu_si128((__m128i *)(s + 0 * pitch));
1600 p5_2 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
1601 p4_2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
1602 p3_2 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
1603
1604 highbd_transpose4x8_8x4_sse2(&p6_2, &p5_2, &p4_2, &p3_2, &q[0], &q[1], &q[2],
1605 &q[3], &q[4], &q[5], &q[6], &d7_2);
1606
1607 highbd_lpf_internal_14_sse2(p, q, pq, blimit, limit, thresh, bd);
1608
1609 highbd_transpose8x8_low_sse2(&d0, &p[6], &pq[5], &pq[4], &pq[3], &pq[2],
1610 &pq[1], &pq[0], &d0, &d1, &d2, &d3);
1611
1612 q[0] = _mm_srli_si128(pq[0], 8);
1613 q[1] = _mm_srli_si128(pq[1], 8);
1614 q[2] = _mm_srli_si128(pq[2], 8);
1615 q[3] = _mm_srli_si128(pq[3], 8);
1616 q[4] = _mm_srli_si128(pq[4], 8);
1617 q[5] = _mm_srli_si128(pq[5], 8);
1618
1619 highbd_transpose8x8_low_sse2(&q[0], &q[1], &q[2], &q[3], &q[4], &q[5], &q[6],
1620 &d7_2, &d0_2, &d1_2, &d2_2, &d3_2);
1621
1622 _mm_storeu_si128((__m128i *)(s - 8 + 0 * pitch), d0);
1623 _mm_storeu_si128((__m128i *)(s + 0 * pitch), d0_2);
1624
1625 _mm_storeu_si128((__m128i *)(s - 8 + 1 * pitch), d1);
1626 _mm_storeu_si128((__m128i *)(s + 1 * pitch), d1_2);
1627
1628 _mm_storeu_si128((__m128i *)(s - 8 + 2 * pitch), d2);
1629 _mm_storeu_si128((__m128i *)(s + 2 * pitch), d2_2);
1630
1631 _mm_storeu_si128((__m128i *)(s - 8 + 3 * pitch), d3);
1632 _mm_storeu_si128((__m128i *)(s + 3 * pitch), d3_2);
1633 }
1634
aom_highbd_lpf_vertical_14_dual_sse2(uint16_t * s,int pitch,const uint8_t * blimit0,const uint8_t * limit0,const uint8_t * thresh0,const uint8_t * blimit1,const uint8_t * limit1,const uint8_t * thresh1,int bd)1635 void aom_highbd_lpf_vertical_14_dual_sse2(
1636 uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0,
1637 const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
1638 const uint8_t *thresh1, int bd) {
1639 __m128i q[7], p[7];
1640 __m128i p6, p5, p4, p3, p2, p1, p0, q0;
1641 __m128i p6_2, p5_2, p4_2, p3_2, p2_2, p1_2, q0_2, p0_2;
1642 __m128i d0, d7;
1643 __m128i d0_out, d1_out, d2_out, d3_out, d4_out, d5_out, d6_out, d7_out;
1644
1645 p6 = _mm_loadu_si128((__m128i *)((s - 8) + 0 * pitch));
1646 p5 = _mm_loadu_si128((__m128i *)((s - 8) + 1 * pitch));
1647 p4 = _mm_loadu_si128((__m128i *)((s - 8) + 2 * pitch));
1648 p3 = _mm_loadu_si128((__m128i *)((s - 8) + 3 * pitch));
1649 p2 = _mm_loadu_si128((__m128i *)((s - 8) + 4 * pitch));
1650 p1 = _mm_loadu_si128((__m128i *)((s - 8) + 5 * pitch));
1651 p0 = _mm_loadu_si128((__m128i *)((s - 8) + 6 * pitch));
1652 q0 = _mm_loadu_si128((__m128i *)((s - 8) + 7 * pitch));
1653
1654 highbd_transpose8x8_sse2(&p6, &p5, &p4, &p3, &p2, &p1, &p0, &q0, &d0, &p[6],
1655 &p[5], &p[4], &p[3], &p[2], &p[1], &p[0]);
1656
1657 p6_2 = _mm_loadu_si128((__m128i *)(s + 0 * pitch));
1658 p5_2 = _mm_loadu_si128((__m128i *)(s + 1 * pitch));
1659 p4_2 = _mm_loadu_si128((__m128i *)(s + 2 * pitch));
1660 p3_2 = _mm_loadu_si128((__m128i *)(s + 3 * pitch));
1661 p2_2 = _mm_loadu_si128((__m128i *)(s + 4 * pitch));
1662 p1_2 = _mm_loadu_si128((__m128i *)(s + 5 * pitch));
1663 p0_2 = _mm_loadu_si128((__m128i *)(s + 6 * pitch));
1664 q0_2 = _mm_loadu_si128((__m128i *)(s + 7 * pitch));
1665
1666 highbd_transpose8x8_sse2(&p6_2, &p5_2, &p4_2, &p3_2, &p2_2, &p1_2, &p0_2,
1667 &q0_2, &q[0], &q[1], &q[2], &q[3], &q[4], &q[5],
1668 &q[6], &d7);
1669
1670 highbd_lpf_internal_14_dual_sse2(p, q, blimit0, limit0, thresh0, blimit1,
1671 limit1, thresh1, bd);
1672
1673 highbd_transpose8x8_sse2(&d0, &p[6], &p[5], &p[4], &p[3], &p[2], &p[1], &p[0],
1674 &d0_out, &d1_out, &d2_out, &d3_out, &d4_out, &d5_out,
1675 &d6_out, &d7_out);
1676
1677 _mm_storeu_si128((__m128i *)(s - 8 + 0 * pitch), d0_out);
1678 _mm_storeu_si128((__m128i *)(s - 8 + 1 * pitch), d1_out);
1679 _mm_storeu_si128((__m128i *)(s - 8 + 2 * pitch), d2_out);
1680 _mm_storeu_si128((__m128i *)(s - 8 + 3 * pitch), d3_out);
1681 _mm_storeu_si128((__m128i *)(s - 8 + 4 * pitch), d4_out);
1682 _mm_storeu_si128((__m128i *)(s - 8 + 5 * pitch), d5_out);
1683 _mm_storeu_si128((__m128i *)(s - 8 + 6 * pitch), d6_out);
1684 _mm_storeu_si128((__m128i *)(s - 8 + 7 * pitch), d7_out);
1685
1686 highbd_transpose8x8_sse2(&q[0], &q[1], &q[2], &q[3], &q[4], &q[5], &q[6], &d7,
1687 &d0_out, &d1_out, &d2_out, &d3_out, &d4_out, &d5_out,
1688 &d6_out, &d7_out);
1689
1690 _mm_storeu_si128((__m128i *)(s + 0 * pitch), d0_out);
1691 _mm_storeu_si128((__m128i *)(s + 1 * pitch), d1_out);
1692 _mm_storeu_si128((__m128i *)(s + 2 * pitch), d2_out);
1693 _mm_storeu_si128((__m128i *)(s + 3 * pitch), d3_out);
1694 _mm_storeu_si128((__m128i *)(s + 4 * pitch), d4_out);
1695 _mm_storeu_si128((__m128i *)(s + 5 * pitch), d5_out);
1696 _mm_storeu_si128((__m128i *)(s + 6 * pitch), d6_out);
1697 _mm_storeu_si128((__m128i *)(s + 7 * pitch), d7_out);
1698 }
1699