• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // SSE2 version of some decoding functions (idct, loop filtering).
11 //
12 // Author: somnath@google.com (Somnath Banerjee)
13 //         cduvivier@google.com (Christian Duvivier)
14 
15 #include "./dsp.h"
16 
17 #if defined(__cplusplus) || defined(c_plusplus)
18 extern "C" {
19 #endif
20 
21 #if defined(WEBP_USE_SSE2)
22 
23 #include <emmintrin.h>
24 #include "../dec/vp8i.h"
25 
26 //------------------------------------------------------------------------------
27 // Transforms (Paragraph 14.4)
28 
TransformSSE2(const int16_t * in,uint8_t * dst,int do_two)29 static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
30   // This implementation makes use of 16-bit fixed point versions of two
31   // multiply constants:
32   //    K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
33   //    K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
34   //
35   // To be able to use signed 16-bit integers, we use the following trick to
36   // have constants within range:
37   // - Associated constants are obtained by subtracting the 16-bit fixed point
38   //   version of one:
39   //      k = K - (1 << 16)  =>  K = k + (1 << 16)
40   //      K1 = 85267  =>  k1 =  20091
41   //      K2 = 35468  =>  k2 = -30068
42   // - The multiplication of a variable by a constant become the sum of the
43   //   variable and the multiplication of that variable by the associated
44   //   constant:
45   //      (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
46   const __m128i k1 = _mm_set1_epi16(20091);
47   const __m128i k2 = _mm_set1_epi16(-30068);
48   __m128i T0, T1, T2, T3;
49 
50   // Load and concatenate the transform coefficients (we'll do two transforms
51   // in parallel). In the case of only one transform, the second half of the
52   // vectors will just contain random value we'll never use nor store.
53   __m128i in0, in1, in2, in3;
54   {
55     in0 = _mm_loadl_epi64((__m128i*)&in[0]);
56     in1 = _mm_loadl_epi64((__m128i*)&in[4]);
57     in2 = _mm_loadl_epi64((__m128i*)&in[8]);
58     in3 = _mm_loadl_epi64((__m128i*)&in[12]);
59     // a00 a10 a20 a30   x x x x
60     // a01 a11 a21 a31   x x x x
61     // a02 a12 a22 a32   x x x x
62     // a03 a13 a23 a33   x x x x
63     if (do_two) {
64       const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]);
65       const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]);
66       const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]);
67       const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]);
68       in0 = _mm_unpacklo_epi64(in0, inB0);
69       in1 = _mm_unpacklo_epi64(in1, inB1);
70       in2 = _mm_unpacklo_epi64(in2, inB2);
71       in3 = _mm_unpacklo_epi64(in3, inB3);
72       // a00 a10 a20 a30   b00 b10 b20 b30
73       // a01 a11 a21 a31   b01 b11 b21 b31
74       // a02 a12 a22 a32   b02 b12 b22 b32
75       // a03 a13 a23 a33   b03 b13 b23 b33
76     }
77   }
78 
79   // Vertical pass and subsequent transpose.
80   {
81     // First pass, c and d calculations are longer because of the "trick"
82     // multiplications.
83     const __m128i a = _mm_add_epi16(in0, in2);
84     const __m128i b = _mm_sub_epi16(in0, in2);
85     // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
86     const __m128i c1 = _mm_mulhi_epi16(in1, k2);
87     const __m128i c2 = _mm_mulhi_epi16(in3, k1);
88     const __m128i c3 = _mm_sub_epi16(in1, in3);
89     const __m128i c4 = _mm_sub_epi16(c1, c2);
90     const __m128i c = _mm_add_epi16(c3, c4);
91     // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
92     const __m128i d1 = _mm_mulhi_epi16(in1, k1);
93     const __m128i d2 = _mm_mulhi_epi16(in3, k2);
94     const __m128i d3 = _mm_add_epi16(in1, in3);
95     const __m128i d4 = _mm_add_epi16(d1, d2);
96     const __m128i d = _mm_add_epi16(d3, d4);
97 
98     // Second pass.
99     const __m128i tmp0 = _mm_add_epi16(a, d);
100     const __m128i tmp1 = _mm_add_epi16(b, c);
101     const __m128i tmp2 = _mm_sub_epi16(b, c);
102     const __m128i tmp3 = _mm_sub_epi16(a, d);
103 
104     // Transpose the two 4x4.
105     // a00 a01 a02 a03   b00 b01 b02 b03
106     // a10 a11 a12 a13   b10 b11 b12 b13
107     // a20 a21 a22 a23   b20 b21 b22 b23
108     // a30 a31 a32 a33   b30 b31 b32 b33
109     const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1);
110     const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3);
111     const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1);
112     const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3);
113     // a00 a10 a01 a11   a02 a12 a03 a13
114     // a20 a30 a21 a31   a22 a32 a23 a33
115     // b00 b10 b01 b11   b02 b12 b03 b13
116     // b20 b30 b21 b31   b22 b32 b23 b33
117     const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
118     const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
119     const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
120     const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
121     // a00 a10 a20 a30 a01 a11 a21 a31
122     // b00 b10 b20 b30 b01 b11 b21 b31
123     // a02 a12 a22 a32 a03 a13 a23 a33
124     // b02 b12 a22 b32 b03 b13 b23 b33
125     T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
126     T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
127     T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
128     T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
129     // a00 a10 a20 a30   b00 b10 b20 b30
130     // a01 a11 a21 a31   b01 b11 b21 b31
131     // a02 a12 a22 a32   b02 b12 b22 b32
132     // a03 a13 a23 a33   b03 b13 b23 b33
133   }
134 
135   // Horizontal pass and subsequent transpose.
136   {
137     // First pass, c and d calculations are longer because of the "trick"
138     // multiplications.
139     const __m128i four = _mm_set1_epi16(4);
140     const __m128i dc = _mm_add_epi16(T0, four);
141     const __m128i a =  _mm_add_epi16(dc, T2);
142     const __m128i b =  _mm_sub_epi16(dc, T2);
143     // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
144     const __m128i c1 = _mm_mulhi_epi16(T1, k2);
145     const __m128i c2 = _mm_mulhi_epi16(T3, k1);
146     const __m128i c3 = _mm_sub_epi16(T1, T3);
147     const __m128i c4 = _mm_sub_epi16(c1, c2);
148     const __m128i c = _mm_add_epi16(c3, c4);
149     // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
150     const __m128i d1 = _mm_mulhi_epi16(T1, k1);
151     const __m128i d2 = _mm_mulhi_epi16(T3, k2);
152     const __m128i d3 = _mm_add_epi16(T1, T3);
153     const __m128i d4 = _mm_add_epi16(d1, d2);
154     const __m128i d = _mm_add_epi16(d3, d4);
155 
156     // Second pass.
157     const __m128i tmp0 = _mm_add_epi16(a, d);
158     const __m128i tmp1 = _mm_add_epi16(b, c);
159     const __m128i tmp2 = _mm_sub_epi16(b, c);
160     const __m128i tmp3 = _mm_sub_epi16(a, d);
161     const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
162     const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
163     const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
164     const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
165 
166     // Transpose the two 4x4.
167     // a00 a01 a02 a03   b00 b01 b02 b03
168     // a10 a11 a12 a13   b10 b11 b12 b13
169     // a20 a21 a22 a23   b20 b21 b22 b23
170     // a30 a31 a32 a33   b30 b31 b32 b33
171     const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1);
172     const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3);
173     const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1);
174     const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3);
175     // a00 a10 a01 a11   a02 a12 a03 a13
176     // a20 a30 a21 a31   a22 a32 a23 a33
177     // b00 b10 b01 b11   b02 b12 b03 b13
178     // b20 b30 b21 b31   b22 b32 b23 b33
179     const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
180     const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
181     const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
182     const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
183     // a00 a10 a20 a30 a01 a11 a21 a31
184     // b00 b10 b20 b30 b01 b11 b21 b31
185     // a02 a12 a22 a32 a03 a13 a23 a33
186     // b02 b12 a22 b32 b03 b13 b23 b33
187     T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
188     T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
189     T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
190     T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
191     // a00 a10 a20 a30   b00 b10 b20 b30
192     // a01 a11 a21 a31   b01 b11 b21 b31
193     // a02 a12 a22 a32   b02 b12 b22 b32
194     // a03 a13 a23 a33   b03 b13 b23 b33
195   }
196 
197   // Add inverse transform to 'dst' and store.
198   {
199     const __m128i zero = _mm_setzero_si128();
200     // Load the reference(s).
201     __m128i dst0, dst1, dst2, dst3;
202     if (do_two) {
203       // Load eight bytes/pixels per line.
204       dst0 = _mm_loadl_epi64((__m128i*)&dst[0 * BPS]);
205       dst1 = _mm_loadl_epi64((__m128i*)&dst[1 * BPS]);
206       dst2 = _mm_loadl_epi64((__m128i*)&dst[2 * BPS]);
207       dst3 = _mm_loadl_epi64((__m128i*)&dst[3 * BPS]);
208     } else {
209       // Load four bytes/pixels per line.
210       dst0 = _mm_cvtsi32_si128(*(int*)&dst[0 * BPS]);
211       dst1 = _mm_cvtsi32_si128(*(int*)&dst[1 * BPS]);
212       dst2 = _mm_cvtsi32_si128(*(int*)&dst[2 * BPS]);
213       dst3 = _mm_cvtsi32_si128(*(int*)&dst[3 * BPS]);
214     }
215     // Convert to 16b.
216     dst0 = _mm_unpacklo_epi8(dst0, zero);
217     dst1 = _mm_unpacklo_epi8(dst1, zero);
218     dst2 = _mm_unpacklo_epi8(dst2, zero);
219     dst3 = _mm_unpacklo_epi8(dst3, zero);
220     // Add the inverse transform(s).
221     dst0 = _mm_add_epi16(dst0, T0);
222     dst1 = _mm_add_epi16(dst1, T1);
223     dst2 = _mm_add_epi16(dst2, T2);
224     dst3 = _mm_add_epi16(dst3, T3);
225     // Unsigned saturate to 8b.
226     dst0 = _mm_packus_epi16(dst0, dst0);
227     dst1 = _mm_packus_epi16(dst1, dst1);
228     dst2 = _mm_packus_epi16(dst2, dst2);
229     dst3 = _mm_packus_epi16(dst3, dst3);
230     // Store the results.
231     if (do_two) {
232       // Store eight bytes/pixels per line.
233       _mm_storel_epi64((__m128i*)&dst[0 * BPS], dst0);
234       _mm_storel_epi64((__m128i*)&dst[1 * BPS], dst1);
235       _mm_storel_epi64((__m128i*)&dst[2 * BPS], dst2);
236       _mm_storel_epi64((__m128i*)&dst[3 * BPS], dst3);
237     } else {
238       // Store four bytes/pixels per line.
239       *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(dst0);
240       *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(dst1);
241       *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(dst2);
242       *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(dst3);
243     }
244   }
245 }
246 
247 //------------------------------------------------------------------------------
248 // Loop Filter (Paragraph 15)
249 
250 // Compute abs(p - q) = subs(p - q) OR subs(q - p)
251 #define MM_ABS(p, q)  _mm_or_si128(                                            \
252     _mm_subs_epu8((q), (p)),                                                   \
253     _mm_subs_epu8((p), (q)))
254 
255 // Shift each byte of "a" by N bits while preserving by the sign bit.
256 //
257 // It first shifts the lower bytes of the words and then the upper bytes and
258 // then merges the results together.
259 #define SIGNED_SHIFT_N(a, N) {                                                 \
260   __m128i t = a;                                                               \
261   t = _mm_slli_epi16(t, 8);                                                    \
262   t = _mm_srai_epi16(t, N);                                                    \
263   t = _mm_srli_epi16(t, 8);                                                    \
264                                                                                \
265   a = _mm_srai_epi16(a, N + 8);                                                \
266   a = _mm_slli_epi16(a, 8);                                                    \
267                                                                                \
268   a = _mm_or_si128(t, a);                                                      \
269 }
270 
271 #define FLIP_SIGN_BIT2(a, b) {                                                 \
272   a = _mm_xor_si128(a, sign_bit);                                              \
273   b = _mm_xor_si128(b, sign_bit);                                              \
274 }
275 
276 #define FLIP_SIGN_BIT4(a, b, c, d) {                                           \
277   FLIP_SIGN_BIT2(a, b);                                                        \
278   FLIP_SIGN_BIT2(c, d);                                                        \
279 }
280 
281 #define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) {                      \
282   const __m128i zero = _mm_setzero_si128();                                    \
283   const __m128i t_1 = MM_ABS(p1, p0);                                          \
284   const __m128i t_2 = MM_ABS(q1, q0);                                          \
285                                                                                \
286   const __m128i h = _mm_set1_epi8(hev_thresh);                                 \
287   const __m128i t_3 = _mm_subs_epu8(t_1, h);  /* abs(p1 - p0) - hev_tresh */   \
288   const __m128i t_4 = _mm_subs_epu8(t_2, h);  /* abs(q1 - q0) - hev_tresh */   \
289                                                                                \
290   not_hev = _mm_or_si128(t_3, t_4);                                            \
291   not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
292 }
293 
294 #define GET_BASE_DELTA(p1, p0, q0, q1, o) {                                    \
295   const __m128i qp0 = _mm_subs_epi8(q0, p0);  /* q0 - p0 */                    \
296   o = _mm_subs_epi8(p1, q1);            /* p1 - q1 */                          \
297   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 1 * (q0 - p0) */          \
298   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 2 * (q0 - p0) */          \
299   o = _mm_adds_epi8(o, qp0);            /* p1 - q1 + 3 * (q0 - p0) */          \
300 }
301 
302 #define DO_SIMPLE_FILTER(p0, q0, fl) {                                         \
303   const __m128i three = _mm_set1_epi8(3);                                      \
304   const __m128i four = _mm_set1_epi8(4);                                       \
305   __m128i v3 = _mm_adds_epi8(fl, three);                                       \
306   __m128i v4 = _mm_adds_epi8(fl, four);                                        \
307                                                                                \
308   /* Do +4 side */                                                             \
309   SIGNED_SHIFT_N(v4, 3);                /* v4 >> 3  */                         \
310   q0 = _mm_subs_epi8(q0, v4);           /* q0 -= v4 */                         \
311                                                                                \
312   /* Now do +3 side */                                                         \
313   SIGNED_SHIFT_N(v3, 3);                /* v3 >> 3  */                         \
314   p0 = _mm_adds_epi8(p0, v3);           /* p0 += v3 */                         \
315 }
316 
317 // Updates values of 2 pixels at MB edge during complex filtering.
318 // Update operations:
319 // q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
320 #define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) {                                   \
321   const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7);                               \
322   const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7);                               \
323   const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7);                         \
324   pi = _mm_adds_epi8(pi, delta);                                               \
325   qi = _mm_subs_epi8(qi, delta);                                               \
326 }
327 
NeedsFilter(const __m128i * p1,const __m128i * p0,const __m128i * q0,const __m128i * q1,int thresh,__m128i * mask)328 static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
329                         const __m128i* q1, int thresh, __m128i *mask) {
330   __m128i t1 = MM_ABS(*p1, *q1);        // abs(p1 - q1)
331   *mask = _mm_set1_epi8(0xFE);
332   t1 = _mm_and_si128(t1, *mask);        // set lsb of each byte to zero
333   t1 = _mm_srli_epi16(t1, 1);           // abs(p1 - q1) / 2
334 
335   *mask = MM_ABS(*p0, *q0);             // abs(p0 - q0)
336   *mask = _mm_adds_epu8(*mask, *mask);  // abs(p0 - q0) * 2
337   *mask = _mm_adds_epu8(*mask, t1);     // abs(p0 - q0) * 2 + abs(p1 - q1) / 2
338 
339   t1 = _mm_set1_epi8(thresh);
340   *mask = _mm_subs_epu8(*mask, t1);     // mask <= thresh
341   *mask = _mm_cmpeq_epi8(*mask, _mm_setzero_si128());
342 }
343 
344 //------------------------------------------------------------------------------
345 // Edge filtering functions
346 
347 // Applies filter on 2 pixels (p0 and q0)
DoFilter2(const __m128i * p1,__m128i * p0,__m128i * q0,const __m128i * q1,int thresh)348 static WEBP_INLINE void DoFilter2(const __m128i* p1, __m128i* p0, __m128i* q0,
349                                   const __m128i* q1, int thresh) {
350   __m128i a, mask;
351   const __m128i sign_bit = _mm_set1_epi8(0x80);
352   const __m128i p1s = _mm_xor_si128(*p1, sign_bit);
353   const __m128i q1s = _mm_xor_si128(*q1, sign_bit);
354 
355   NeedsFilter(p1, p0, q0, q1, thresh, &mask);
356 
357   // convert to signed values
358   FLIP_SIGN_BIT2(*p0, *q0);
359 
360   GET_BASE_DELTA(p1s, *p0, *q0, q1s, a);
361   a = _mm_and_si128(a, mask);     // mask filter values we don't care about
362   DO_SIMPLE_FILTER(*p0, *q0, a);
363 
364   // unoffset
365   FLIP_SIGN_BIT2(*p0, *q0);
366 }
367 
368 // Applies filter on 4 pixels (p1, p0, q0 and q1)
DoFilter4(__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,const __m128i * mask,int hev_thresh)369 static WEBP_INLINE void DoFilter4(__m128i* p1, __m128i *p0,
370                                   __m128i* q0, __m128i* q1,
371                                   const __m128i* mask, int hev_thresh) {
372   __m128i not_hev;
373   __m128i t1, t2, t3;
374   const __m128i sign_bit = _mm_set1_epi8(0x80);
375 
376   // compute hev mask
377   GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
378 
379   // convert to signed values
380   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
381 
382   t1 = _mm_subs_epi8(*p1, *q1);        // p1 - q1
383   t1 = _mm_andnot_si128(not_hev, t1);  // hev(p1 - q1)
384   t2 = _mm_subs_epi8(*q0, *p0);        // q0 - p0
385   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 1 * (q0 - p0)
386   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 2 * (q0 - p0)
387   t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 3 * (q0 - p0)
388   t1 = _mm_and_si128(t1, *mask);       // mask filter values we don't care about
389 
390   // Do +4 side
391   t2 = _mm_set1_epi8(4);
392   t2 = _mm_adds_epi8(t1, t2);        // 3 * (q0 - p0) + (p1 - q1) + 4
393   SIGNED_SHIFT_N(t2, 3);             // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3
394   t3 = t2;                           // save t2
395   *q0 = _mm_subs_epi8(*q0, t2);      // q0 -= t2
396 
397   // Now do +3 side
398   t2 = _mm_set1_epi8(3);
399   t2 = _mm_adds_epi8(t1, t2);        // +3 instead of +4
400   SIGNED_SHIFT_N(t2, 3);             // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3
401   *p0 = _mm_adds_epi8(*p0, t2);      // p0 += t2
402 
403   t2 = _mm_set1_epi8(1);
404   t3 = _mm_adds_epi8(t3, t2);
405   SIGNED_SHIFT_N(t3, 1);             // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 4
406 
407   t3 = _mm_and_si128(not_hev, t3);   // if !hev
408   *q1 = _mm_subs_epi8(*q1, t3);      // q1 -= t3
409   *p1 = _mm_adds_epi8(*p1, t3);      // p1 += t3
410 
411   // unoffset
412   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
413 }
414 
415 // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2)
DoFilter6(__m128i * p2,__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1,__m128i * q2,const __m128i * mask,int hev_thresh)416 static WEBP_INLINE void DoFilter6(__m128i *p2, __m128i* p1, __m128i *p0,
417                                   __m128i* q0, __m128i* q1, __m128i *q2,
418                                   const __m128i* mask, int hev_thresh) {
419   __m128i a, not_hev;
420   const __m128i sign_bit = _mm_set1_epi8(0x80);
421 
422   // compute hev mask
423   GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
424 
425   // convert to signed values
426   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
427   FLIP_SIGN_BIT2(*p2, *q2);
428 
429   GET_BASE_DELTA(*p1, *p0, *q0, *q1, a);
430 
431   { // do simple filter on pixels with hev
432     const __m128i m = _mm_andnot_si128(not_hev, *mask);
433     const __m128i f = _mm_and_si128(a, m);
434     DO_SIMPLE_FILTER(*p0, *q0, f);
435   }
436   { // do strong filter on pixels with not hev
437     const __m128i zero = _mm_setzero_si128();
438     const __m128i nine = _mm_set1_epi16(0x0900);
439     const __m128i sixty_three = _mm_set1_epi16(63);
440 
441     const __m128i m = _mm_and_si128(not_hev, *mask);
442     const __m128i f = _mm_and_si128(a, m);
443     const __m128i f_lo = _mm_unpacklo_epi8(zero, f);
444     const __m128i f_hi = _mm_unpackhi_epi8(zero, f);
445 
446     const __m128i f9_lo = _mm_mulhi_epi16(f_lo, nine);   // Filter (lo) * 9
447     const __m128i f9_hi = _mm_mulhi_epi16(f_hi, nine);   // Filter (hi) * 9
448     const __m128i f18_lo = _mm_add_epi16(f9_lo, f9_lo);  // Filter (lo) * 18
449     const __m128i f18_hi = _mm_add_epi16(f9_hi, f9_hi);  // Filter (hi) * 18
450 
451     const __m128i a2_lo = _mm_add_epi16(f9_lo, sixty_three);  // Filter * 9 + 63
452     const __m128i a2_hi = _mm_add_epi16(f9_hi, sixty_three);  // Filter * 9 + 63
453 
454     const __m128i a1_lo = _mm_add_epi16(f18_lo, sixty_three);  // F... * 18 + 63
455     const __m128i a1_hi = _mm_add_epi16(f18_hi, sixty_three);  // F... * 18 + 63
456 
457     const __m128i a0_lo = _mm_add_epi16(f18_lo, a2_lo);  // Filter * 27 + 63
458     const __m128i a0_hi = _mm_add_epi16(f18_hi, a2_hi);  // Filter * 27 + 63
459 
460     UPDATE_2PIXELS(*p2, *q2, a2_lo, a2_hi);
461     UPDATE_2PIXELS(*p1, *q1, a1_lo, a1_hi);
462     UPDATE_2PIXELS(*p0, *q0, a0_lo, a0_hi);
463   }
464 
465   // unoffset
466   FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
467   FLIP_SIGN_BIT2(*p2, *q2);
468 }
469 
470 // reads 8 rows across a vertical edge.
471 //
472 // TODO(somnath): Investigate _mm_shuffle* also see if it can be broken into
473 // two Load4x4() to avoid code duplication.
Load8x4(const uint8_t * b,int stride,__m128i * p,__m128i * q)474 static WEBP_INLINE void Load8x4(const uint8_t* b, int stride,
475                                 __m128i* p, __m128i* q) {
476   __m128i t1, t2;
477 
478   // Load 0th, 1st, 4th and 5th rows
479   __m128i r0 =  _mm_cvtsi32_si128(*((int*)&b[0 * stride]));  // 03 02 01 00
480   __m128i r1 =  _mm_cvtsi32_si128(*((int*)&b[1 * stride]));  // 13 12 11 10
481   __m128i r4 =  _mm_cvtsi32_si128(*((int*)&b[4 * stride]));  // 43 42 41 40
482   __m128i r5 =  _mm_cvtsi32_si128(*((int*)&b[5 * stride]));  // 53 52 51 50
483 
484   r0 = _mm_unpacklo_epi32(r0, r4);               // 43 42 41 40 03 02 01 00
485   r1 = _mm_unpacklo_epi32(r1, r5);               // 53 52 51 50 13 12 11 10
486 
487   // t1 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00
488   t1 = _mm_unpacklo_epi8(r0, r1);
489 
490   // Load 2nd, 3rd, 6th and 7th rows
491   r0 =  _mm_cvtsi32_si128(*((int*)&b[2 * stride]));          // 23 22 21 22
492   r1 =  _mm_cvtsi32_si128(*((int*)&b[3 * stride]));          // 33 32 31 30
493   r4 =  _mm_cvtsi32_si128(*((int*)&b[6 * stride]));          // 63 62 61 60
494   r5 =  _mm_cvtsi32_si128(*((int*)&b[7 * stride]));          // 73 72 71 70
495 
496   r0 = _mm_unpacklo_epi32(r0, r4);               // 63 62 61 60 23 22 21 20
497   r1 = _mm_unpacklo_epi32(r1, r5);               // 73 72 71 70 33 32 31 30
498 
499   // t2 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20
500   t2 = _mm_unpacklo_epi8(r0, r1);
501 
502   // t1 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
503   // t2 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
504   r0 = t1;
505   t1 = _mm_unpacklo_epi16(t1, t2);
506   t2 = _mm_unpackhi_epi16(r0, t2);
507 
508   // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
509   // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
510   *p = _mm_unpacklo_epi32(t1, t2);
511   *q = _mm_unpackhi_epi32(t1, t2);
512 }
513 
Load16x4(const uint8_t * r0,const uint8_t * r8,int stride,__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1)514 static WEBP_INLINE void Load16x4(const uint8_t* r0, const uint8_t* r8,
515                                  int stride,
516                                  __m128i* p1, __m128i* p0,
517                                  __m128i* q0, __m128i* q1) {
518   __m128i t1, t2;
519   // Assume the pixels around the edge (|) are numbered as follows
520   //                00 01 | 02 03
521   //                10 11 | 12 13
522   //                 ...  |  ...
523   //                e0 e1 | e2 e3
524   //                f0 f1 | f2 f3
525   //
526   // r0 is pointing to the 0th row (00)
527   // r8 is pointing to the 8th row (80)
528 
529   // Load
530   // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
531   // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
532   // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
533   // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
534   Load8x4(r0, stride, p1, q0);
535   Load8x4(r8, stride, p0, q1);
536 
537   t1 = *p1;
538   t2 = *q0;
539   // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
540   // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
541   // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
542   // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
543   *p1 = _mm_unpacklo_epi64(t1, *p0);
544   *p0 = _mm_unpackhi_epi64(t1, *p0);
545   *q0 = _mm_unpacklo_epi64(t2, *q1);
546   *q1 = _mm_unpackhi_epi64(t2, *q1);
547 }
548 
Store4x4(__m128i * x,uint8_t * dst,int stride)549 static WEBP_INLINE void Store4x4(__m128i* x, uint8_t* dst, int stride) {
550   int i;
551   for (i = 0; i < 4; ++i, dst += stride) {
552     *((int32_t*)dst) = _mm_cvtsi128_si32(*x);
553     *x = _mm_srli_si128(*x, 4);
554   }
555 }
556 
557 // Transpose back and store
Store16x4(uint8_t * r0,uint8_t * r8,int stride,__m128i * p1,__m128i * p0,__m128i * q0,__m128i * q1)558 static WEBP_INLINE void Store16x4(uint8_t* r0, uint8_t* r8, int stride,
559                                   __m128i* p1, __m128i* p0,
560                                   __m128i* q0, __m128i* q1) {
561   __m128i t1;
562 
563   // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
564   // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
565   t1 = *p0;
566   *p0 = _mm_unpacklo_epi8(*p1, t1);
567   *p1 = _mm_unpackhi_epi8(*p1, t1);
568 
569   // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
570   // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
571   t1 = *q0;
572   *q0 = _mm_unpacklo_epi8(t1, *q1);
573   *q1 = _mm_unpackhi_epi8(t1, *q1);
574 
575   // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
576   // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
577   t1 = *p0;
578   *p0 = _mm_unpacklo_epi16(t1, *q0);
579   *q0 = _mm_unpackhi_epi16(t1, *q0);
580 
581   // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
582   // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
583   t1 = *p1;
584   *p1 = _mm_unpacklo_epi16(t1, *q1);
585   *q1 = _mm_unpackhi_epi16(t1, *q1);
586 
587   Store4x4(p0, r0, stride);
588   r0 += 4 * stride;
589   Store4x4(q0, r0, stride);
590 
591   Store4x4(p1, r8, stride);
592   r8 += 4 * stride;
593   Store4x4(q1, r8, stride);
594 }
595 
596 //------------------------------------------------------------------------------
597 // Simple In-loop filtering (Paragraph 15.2)
598 
SimpleVFilter16SSE2(uint8_t * p,int stride,int thresh)599 static void SimpleVFilter16SSE2(uint8_t* p, int stride, int thresh) {
600   // Load
601   __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]);
602   __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]);
603   __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]);
604   __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]);
605 
606   DoFilter2(&p1, &p0, &q0, &q1, thresh);
607 
608   // Store
609   _mm_storeu_si128((__m128i*)&p[-stride], p0);
610   _mm_storeu_si128((__m128i*)p, q0);
611 }
612 
SimpleHFilter16SSE2(uint8_t * p,int stride,int thresh)613 static void SimpleHFilter16SSE2(uint8_t* p, int stride, int thresh) {
614   __m128i p1, p0, q0, q1;
615 
616   p -= 2;  // beginning of p1
617 
618   Load16x4(p, p + 8 * stride,  stride, &p1, &p0, &q0, &q1);
619   DoFilter2(&p1, &p0, &q0, &q1, thresh);
620   Store16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
621 }
622 
SimpleVFilter16iSSE2(uint8_t * p,int stride,int thresh)623 static void SimpleVFilter16iSSE2(uint8_t* p, int stride, int thresh) {
624   int k;
625   for (k = 3; k > 0; --k) {
626     p += 4 * stride;
627     SimpleVFilter16SSE2(p, stride, thresh);
628   }
629 }
630 
SimpleHFilter16iSSE2(uint8_t * p,int stride,int thresh)631 static void SimpleHFilter16iSSE2(uint8_t* p, int stride, int thresh) {
632   int k;
633   for (k = 3; k > 0; --k) {
634     p += 4;
635     SimpleHFilter16SSE2(p, stride, thresh);
636   }
637 }
638 
639 //------------------------------------------------------------------------------
640 // Complex In-loop filtering (Paragraph 15.3)
641 
642 #define MAX_DIFF1(p3, p2, p1, p0, m) {                                         \
643   m = MM_ABS(p3, p2);                                                          \
644   m = _mm_max_epu8(m, MM_ABS(p2, p1));                                         \
645   m = _mm_max_epu8(m, MM_ABS(p1, p0));                                         \
646 }
647 
648 #define MAX_DIFF2(p3, p2, p1, p0, m) {                                         \
649   m = _mm_max_epu8(m, MM_ABS(p3, p2));                                         \
650   m = _mm_max_epu8(m, MM_ABS(p2, p1));                                         \
651   m = _mm_max_epu8(m, MM_ABS(p1, p0));                                         \
652 }
653 
654 #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) {                             \
655   e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]);                            \
656   e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]);                            \
657   e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]);                            \
658   e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]);                            \
659 }
660 
661 #define LOADUV_H_EDGE(p, u, v, stride) {                                       \
662   p = _mm_loadl_epi64((__m128i*)&(u)[(stride)]);                               \
663   p = _mm_unpacklo_epi64(p, _mm_loadl_epi64((__m128i*)&(v)[(stride)]));        \
664 }
665 
666 #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) {                        \
667   LOADUV_H_EDGE(e1, u, v, 0 * stride);                                         \
668   LOADUV_H_EDGE(e2, u, v, 1 * stride);                                         \
669   LOADUV_H_EDGE(e3, u, v, 2 * stride);                                         \
670   LOADUV_H_EDGE(e4, u, v, 3 * stride);                                         \
671 }
672 
673 #define STOREUV(p, u, v, stride) {                                             \
674   _mm_storel_epi64((__m128i*)&u[(stride)], p);                                 \
675   p = _mm_srli_si128(p, 8);                                                    \
676   _mm_storel_epi64((__m128i*)&v[(stride)], p);                                 \
677 }
678 
679 #define COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask) {               \
680   __m128i fl_yes;                                                              \
681   const __m128i it = _mm_set1_epi8(ithresh);                                   \
682   mask = _mm_subs_epu8(mask, it);                                              \
683   mask = _mm_cmpeq_epi8(mask, _mm_setzero_si128());                            \
684   NeedsFilter(&p1, &p0, &q0, &q1, thresh, &fl_yes);                            \
685   mask = _mm_and_si128(mask, fl_yes);                                          \
686 }
687 
688 // on macroblock edges
VFilter16SSE2(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)689 static void VFilter16SSE2(uint8_t* p, int stride,
690                           int thresh, int ithresh, int hev_thresh) {
691   __m128i t1;
692   __m128i mask;
693   __m128i p2, p1, p0, q0, q1, q2;
694 
695   // Load p3, p2, p1, p0
696   LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0);
697   MAX_DIFF1(t1, p2, p1, p0, mask);
698 
699   // Load q0, q1, q2, q3
700   LOAD_H_EDGES4(p, stride, q0, q1, q2, t1);
701   MAX_DIFF2(t1, q2, q1, q0, mask);
702 
703   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
704   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
705 
706   // Store
707   _mm_storeu_si128((__m128i*)&p[-3 * stride], p2);
708   _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
709   _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
710   _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
711   _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
712   _mm_storeu_si128((__m128i*)&p[2 * stride], q2);
713 }
714 
HFilter16SSE2(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)715 static void HFilter16SSE2(uint8_t* p, int stride,
716                           int thresh, int ithresh, int hev_thresh) {
717   __m128i mask;
718   __m128i p3, p2, p1, p0, q0, q1, q2, q3;
719 
720   uint8_t* const b = p - 4;
721   Load16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);  // p3, p2, p1, p0
722   MAX_DIFF1(p3, p2, p1, p0, mask);
723 
724   Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);  // q0, q1, q2, q3
725   MAX_DIFF2(q3, q2, q1, q0, mask);
726 
727   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
728   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
729 
730   Store16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);
731   Store16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);
732 }
733 
734 // on three inner edges
VFilter16iSSE2(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)735 static void VFilter16iSSE2(uint8_t* p, int stride,
736                            int thresh, int ithresh, int hev_thresh) {
737   int k;
738   __m128i mask;
739   __m128i t1, t2, p1, p0, q0, q1;
740 
741   for (k = 3; k > 0; --k) {
742     // Load p3, p2, p1, p0
743     LOAD_H_EDGES4(p, stride, t2, t1, p1, p0);
744     MAX_DIFF1(t2, t1, p1, p0, mask);
745 
746     p += 4 * stride;
747 
748     // Load q0, q1, q2, q3
749     LOAD_H_EDGES4(p, stride, q0, q1, t1, t2);
750     MAX_DIFF2(t2, t1, q1, q0, mask);
751 
752     COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
753     DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
754 
755     // Store
756     _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
757     _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
758     _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
759     _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
760   }
761 }
762 
HFilter16iSSE2(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)763 static void HFilter16iSSE2(uint8_t* p, int stride,
764                            int thresh, int ithresh, int hev_thresh) {
765   int k;
766   uint8_t* b;
767   __m128i mask;
768   __m128i t1, t2, p1, p0, q0, q1;
769 
770   for (k = 3; k > 0; --k) {
771     b = p;
772     Load16x4(b, b + 8 * stride, stride, &t2, &t1, &p1, &p0);  // p3, p2, p1, p0
773     MAX_DIFF1(t2, t1, p1, p0, mask);
774 
775     b += 4;  // beginning of q0
776     Load16x4(b, b + 8 * stride, stride, &q0, &q1, &t1, &t2);  // q0, q1, q2, q3
777     MAX_DIFF2(t2, t1, q1, q0, mask);
778 
779     COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
780     DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
781 
782     b -= 2;  // beginning of p1
783     Store16x4(b, b + 8 * stride, stride, &p1, &p0, &q0, &q1);
784 
785     p += 4;
786   }
787 }
788 
789 // 8-pixels wide variant, for chroma filtering
VFilter8SSE2(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)790 static void VFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
791                          int thresh, int ithresh, int hev_thresh) {
792   __m128i mask;
793   __m128i t1, p2, p1, p0, q0, q1, q2;
794 
795   // Load p3, p2, p1, p0
796   LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0);
797   MAX_DIFF1(t1, p2, p1, p0, mask);
798 
799   // Load q0, q1, q2, q3
800   LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1);
801   MAX_DIFF2(t1, q2, q1, q0, mask);
802 
803   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
804   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
805 
806   // Store
807   STOREUV(p2, u, v, -3 * stride);
808   STOREUV(p1, u, v, -2 * stride);
809   STOREUV(p0, u, v, -1 * stride);
810   STOREUV(q0, u, v, 0 * stride);
811   STOREUV(q1, u, v, 1 * stride);
812   STOREUV(q2, u, v, 2 * stride);
813 }
814 
HFilter8SSE2(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)815 static void HFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
816                          int thresh, int ithresh, int hev_thresh) {
817   __m128i mask;
818   __m128i p3, p2, p1, p0, q0, q1, q2, q3;
819 
820   uint8_t* const tu = u - 4;
821   uint8_t* const tv = v - 4;
822   Load16x4(tu, tv, stride, &p3, &p2, &p1, &p0);  // p3, p2, p1, p0
823   MAX_DIFF1(p3, p2, p1, p0, mask);
824 
825   Load16x4(u, v, stride, &q0, &q1, &q2, &q3);    // q0, q1, q2, q3
826   MAX_DIFF2(q3, q2, q1, q0, mask);
827 
828   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
829   DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
830 
831   Store16x4(tu, tv, stride, &p3, &p2, &p1, &p0);
832   Store16x4(u, v, stride, &q0, &q1, &q2, &q3);
833 }
834 
VFilter8iSSE2(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)835 static void VFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
836                           int thresh, int ithresh, int hev_thresh) {
837   __m128i mask;
838   __m128i t1, t2, p1, p0, q0, q1;
839 
840   // Load p3, p2, p1, p0
841   LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0);
842   MAX_DIFF1(t2, t1, p1, p0, mask);
843 
844   u += 4 * stride;
845   v += 4 * stride;
846 
847   // Load q0, q1, q2, q3
848   LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2);
849   MAX_DIFF2(t2, t1, q1, q0, mask);
850 
851   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
852   DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
853 
854   // Store
855   STOREUV(p1, u, v, -2 * stride);
856   STOREUV(p0, u, v, -1 * stride);
857   STOREUV(q0, u, v, 0 * stride);
858   STOREUV(q1, u, v, 1 * stride);
859 }
860 
HFilter8iSSE2(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)861 static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
862                           int thresh, int ithresh, int hev_thresh) {
863   __m128i mask;
864   __m128i t1, t2, p1, p0, q0, q1;
865   Load16x4(u, v, stride, &t2, &t1, &p1, &p0);   // p3, p2, p1, p0
866   MAX_DIFF1(t2, t1, p1, p0, mask);
867 
868   u += 4;  // beginning of q0
869   v += 4;
870   Load16x4(u, v, stride, &q0, &q1, &t1, &t2);  // q0, q1, q2, q3
871   MAX_DIFF2(t2, t1, q1, q0, mask);
872 
873   COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
874   DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
875 
876   u -= 2;  // beginning of p1
877   v -= 2;
878   Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
879 }
880 
881 #endif   // WEBP_USE_SSE2
882 
883 //------------------------------------------------------------------------------
884 // Entry point
885 
886 extern void VP8DspInitSSE2(void);
887 
VP8DspInitSSE2(void)888 void VP8DspInitSSE2(void) {
889 #if defined(WEBP_USE_SSE2)
890   VP8Transform = TransformSSE2;
891 
892   VP8VFilter16 = VFilter16SSE2;
893   VP8HFilter16 = HFilter16SSE2;
894   VP8VFilter8 = VFilter8SSE2;
895   VP8HFilter8 = HFilter8SSE2;
896   VP8VFilter16i = VFilter16iSSE2;
897   VP8HFilter16i = HFilter16iSSE2;
898   VP8VFilter8i = VFilter8iSSE2;
899   VP8HFilter8i = HFilter8iSSE2;
900 
901   VP8SimpleVFilter16 = SimpleVFilter16SSE2;
902   VP8SimpleHFilter16 = SimpleHFilter16SSE2;
903   VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
904   VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
905 #endif   // WEBP_USE_SSE2
906 }
907 
908 #if defined(__cplusplus) || defined(c_plusplus)
909 }    // extern "C"
910 #endif
911