• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "SkBitmapProcState_opts_SSSE3.h"
9 #include "SkColorPriv.h"
10 #include "SkPaint.h"
11 #include "SkUtils.h"
12 
13 #include <tmmintrin.h>  // SSSE3
14 
15 // adding anonymous namespace seemed to force gcc to inline directly the
16 // instantiation, instead of creating the functions
17 // S32_generic_D32_filter_DX_SSSE3<true> and
18 // S32_generic_D32_filter_DX_SSSE3<false> which were then called by the
19 // external functions.
20 namespace {
21 // In this file, variations for alpha and non alpha versions are implemented
22 // with a template, as it makes the code more compact and a bit easier to
23 // maintain, while making the compiler generate the same exact code as with
24 // two functions that only differ by a few lines.
25 
26 
27 // Prepare all necessary constants for a round of processing for two pixel
28 // pairs.
29 // @param xy is the location where the xy parameters for four pixels should be
30 //           read from. It is identical in concept with argument two of
31 //           S32_{opaque}_D32_filter_DX methods.
32 // @param mask_3FFF vector of 32 bit constants containing 3FFF,
33 //                  suitable to mask the bottom 14 bits of a XY value.
34 // @param mask_000F vector of 32 bit constants containing 000F,
35 //                  suitable to mask the bottom 4 bits of a XY value.
36 // @param sixteen_8bit vector of 8 bit components containing the value 16.
37 // @param mask_dist_select vector of 8 bit components containing the shuffling
38 //                         parameters to reorder x[0-3] parameters.
39 // @param all_x_result vector of 8 bit components that will contain the
40 //              (4x(x3), 4x(x2), 4x(x1), 4x(x0)) upon return.
41 // @param sixteen_minus_x vector of 8 bit components, containing
42 //              (4x(16 - x3), 4x(16 - x2), 4x(16 - x1), 4x(16 - x0))
PrepareConstantsTwoPixelPairs(const uint32_t * xy,const __m128i & mask_3FFF,const __m128i & mask_000F,const __m128i & sixteen_8bit,const __m128i & mask_dist_select,__m128i * all_x_result,__m128i * sixteen_minus_x,int * x0,int * x1)43 inline void PrepareConstantsTwoPixelPairs(const uint32_t* xy,
44                                           const __m128i& mask_3FFF,
45                                           const __m128i& mask_000F,
46                                           const __m128i& sixteen_8bit,
47                                           const __m128i& mask_dist_select,
48                                           __m128i* all_x_result,
49                                           __m128i* sixteen_minus_x,
50                                           int* x0,
51                                           int* x1) {
52     const __m128i xx = _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
53 
54     // 4 delta X
55     // (x03, x02, x01, x00)
56     const __m128i x0_wide = _mm_srli_epi32(xx, 18);
57     // (x13, x12, x11, x10)
58     const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF);
59 
60     _mm_storeu_si128(reinterpret_cast<__m128i *>(x0), x0_wide);
61     _mm_storeu_si128(reinterpret_cast<__m128i *>(x1), x1_wide);
62 
63     __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F);
64 
65     // (4x(x3), 4x(x2), 4x(x1), 4x(x0))
66     all_x = _mm_shuffle_epi8(all_x, mask_dist_select);
67 
68     *all_x_result = all_x;
69     // (4x(16-x3), 4x(16-x2), 4x(16-x1), 4x(16-x0))
70     *sixteen_minus_x = _mm_sub_epi8(sixteen_8bit, all_x);
71 }
72 
73 // Prepare all necessary constants for a round of processing for two pixel
74 // pairs.
75 // @param xy is the location where the xy parameters for four pixels should be
76 //           read from. It is identical in concept with argument two of
77 //           S32_{opaque}_D32_filter_DXDY methods.
78 // @param mask_3FFF vector of 32 bit constants containing 3FFF,
79 //                  suitable to mask the bottom 14 bits of a XY value.
80 // @param mask_000F vector of 32 bit constants containing 000F,
81 //                  suitable to mask the bottom 4 bits of a XY value.
82 // @param sixteen_8bit vector of 8 bit components containing the value 16.
83 // @param mask_dist_select vector of 8 bit components containing the shuffling
84 //                         parameters to reorder x[0-3] parameters.
85 // @param all_xy_result vector of 8 bit components that will contain the
86 //              (4x(y1), 4x(y0), 4x(x1), 4x(x0)) upon return.
87 // @param sixteen_minus_x vector of 8 bit components, containing
88 //              (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)).
PrepareConstantsTwoPixelPairsDXDY(const uint32_t * xy,const __m128i & mask_3FFF,const __m128i & mask_000F,const __m128i & sixteen_8bit,const __m128i & mask_dist_select,__m128i * all_xy_result,__m128i * sixteen_minus_xy,int * xy0,int * xy1)89 inline void PrepareConstantsTwoPixelPairsDXDY(const uint32_t* xy,
90                                               const __m128i& mask_3FFF,
91                                               const __m128i& mask_000F,
92                                               const __m128i& sixteen_8bit,
93                                               const __m128i& mask_dist_select,
94                                               __m128i* all_xy_result,
95                                               __m128i* sixteen_minus_xy,
96                                               int* xy0, int* xy1) {
97     const __m128i xy_wide =
98                         _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
99 
100     // (x10, y10, x00, y00)
101     __m128i xy0_wide = _mm_srli_epi32(xy_wide, 18);
102     // (y10, y00, x10, x00)
103     xy0_wide =  _mm_shuffle_epi32(xy0_wide, _MM_SHUFFLE(2, 0, 3, 1));
104     // (x11, y11, x01, y01)
105     __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF);
106     // (y11, y01, x11, x01)
107     xy1_wide = _mm_shuffle_epi32(xy1_wide, _MM_SHUFFLE(2, 0, 3, 1));
108 
109     _mm_storeu_si128(reinterpret_cast<__m128i *>(xy0), xy0_wide);
110     _mm_storeu_si128(reinterpret_cast<__m128i *>(xy1), xy1_wide);
111 
112     // (x1, y1, x0, y0)
113     __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F);
114     // (y1, y0, x1, x0)
115     all_xy = _mm_shuffle_epi32(all_xy, _MM_SHUFFLE(2, 0, 3, 1));
116     // (4x(y1), 4x(y0), 4x(x1), 4x(x0))
117     all_xy = _mm_shuffle_epi8(all_xy, mask_dist_select);
118 
119     *all_xy_result = all_xy;
120     // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0))
121     *sixteen_minus_xy = _mm_sub_epi8(sixteen_8bit, all_xy);
122 }
123 
124 // Helper function used when processing one pixel pair.
125 // @param pixel0..3 are the four input pixels
126 // @param scale_x vector of 8 bit components to multiply the pixel[0:3]. This
127 //                will contain (4x(x1, 16-x1), 4x(x0, 16-x0))
128 //                or (4x(x3, 16-x3), 4x(x2, 16-x2))
129 // @return a vector of 16 bit components containing:
130 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
ProcessPixelPairHelper(uint32_t pixel0,uint32_t pixel1,uint32_t pixel2,uint32_t pixel3,const __m128i & scale_x)131 inline __m128i ProcessPixelPairHelper(uint32_t pixel0,
132                                       uint32_t pixel1,
133                                       uint32_t pixel2,
134                                       uint32_t pixel3,
135                                       const __m128i& scale_x) {
136     __m128i a0, a1, a2, a3;
137     // Load 2 pairs of pixels
138     a0 = _mm_cvtsi32_si128(pixel0);
139     a1 = _mm_cvtsi32_si128(pixel1);
140 
141     // Interleave pixels.
142     // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
143     a0 = _mm_unpacklo_epi8(a0, a1);
144 
145     a2 = _mm_cvtsi32_si128(pixel2);
146     a3 = _mm_cvtsi32_si128(pixel3);
147     // (0, 0, 0, 0, 0, 0, 0, 0, Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2)
148     a2 = _mm_unpacklo_epi8(a2, a3);
149 
150     // two pairs of pixel pairs, interleaved.
151     // (Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2,
152     //  Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
153     a0 = _mm_unpacklo_epi64(a0, a2);
154 
155     // multiply and sum to 16 bit components.
156     // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
157     // At that point, we use up a bit less than 12 bits for each 16 bit
158     // component:
159     // All components are less than 255. So,
160     // C0 * (16 - x) + C1 * x <= 255 * (16 - x) + 255 * x = 255 * 16.
161     return _mm_maddubs_epi16(a0, scale_x);
162 }
163 
164 // Scale back the results after multiplications to the [0:255] range, and scale
165 // by alpha when has_alpha is true.
166 // Depending on whether one set or two sets of multiplications had been applied,
167 // the results have to be shifted by four places (dividing by 16), or shifted
168 // by eight places (dividing by 256), since each multiplication is by a quantity
169 // in the range [0:16].
170 template<bool has_alpha, int scale>
ScaleFourPixels(__m128i * pixels,const __m128i & alpha)171 inline __m128i ScaleFourPixels(__m128i* pixels,
172                                const __m128i& alpha) {
173     // Divide each 16 bit component by 16 (or 256 depending on scale).
174     *pixels = _mm_srli_epi16(*pixels, scale);
175 
176     if (has_alpha) {
177         // Multiply by alpha.
178         *pixels = _mm_mullo_epi16(*pixels, alpha);
179 
180         // Divide each 16 bit component by 256.
181         *pixels = _mm_srli_epi16(*pixels, 8);
182     }
183     return *pixels;
184 }
185 
186 // Wrapper to calculate two output pixels from four input pixels. The
187 // arguments are the same as ProcessPixelPairHelper. Technically, there are
188 // eight input pixels, but since sub_y == 0, the factors applied to half of the
189 // pixels is zero (sub_y), and are therefore omitted here to save on some
190 // processing.
191 // @param alpha when has_alpha is true, scale all resulting components by this
192 //              value.
193 // @return a vector of 16 bit components containing:
194 // ((Aa2 * (16 - x1) + Aa3 * x1) * alpha, ...,
195 // (Ra0 * (16 - x0) + Ra1 * x0) * alpha) (when has_alpha is true)
196 // otherwise
197 // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
198 // In both cases, the results are renormalized (divided by 16) to match the
199 // expected formats when storing back the results into memory.
200 template<bool has_alpha>
ProcessPixelPairZeroSubY(uint32_t pixel0,uint32_t pixel1,uint32_t pixel2,uint32_t pixel3,const __m128i & scale_x,const __m128i & alpha)201 inline __m128i ProcessPixelPairZeroSubY(uint32_t pixel0,
202                                         uint32_t pixel1,
203                                         uint32_t pixel2,
204                                         uint32_t pixel3,
205                                         const __m128i& scale_x,
206                                         const __m128i& alpha) {
207     __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
208                                          scale_x);
209     return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
210 }
211 
212 // Same as ProcessPixelPairZeroSubY, expect processing one output pixel at a
213 // time instead of two. As in the above function, only two pixels are needed
214 // to generate a single pixel since sub_y == 0.
215 // @return same as ProcessPixelPairZeroSubY, except that only the bottom 4
216 // 16 bit components are set.
217 template<bool has_alpha>
ProcessOnePixelZeroSubY(uint32_t pixel0,uint32_t pixel1,__m128i scale_x,__m128i alpha)218 inline __m128i ProcessOnePixelZeroSubY(uint32_t pixel0,
219                                        uint32_t pixel1,
220                                        __m128i scale_x,
221                                        __m128i alpha) {
222     __m128i a0 = _mm_cvtsi32_si128(pixel0);
223     __m128i a1 = _mm_cvtsi32_si128(pixel1);
224 
225     // Interleave
226     a0 = _mm_unpacklo_epi8(a0, a1);
227 
228     // (a0 * (16-x) + a1 * x)
229     __m128i sum = _mm_maddubs_epi16(a0, scale_x);
230 
231     return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
232 }
233 
234 // Methods when sub_y != 0
235 
236 
237 // Same as ProcessPixelPairHelper, except that the values are scaled by y.
238 // @param y vector of 16 bit components containing 'y' values. There are two
239 //        cases in practice, where y will contain the sub_y constant, or will
240 //        contain the 16 - sub_y constant.
241 // @return vector of 16 bit components containing:
242 // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , y * (Ra0 * (16 - x0) + Ra1 * x0))
ProcessPixelPair(uint32_t pixel0,uint32_t pixel1,uint32_t pixel2,uint32_t pixel3,const __m128i & scale_x,const __m128i & y)243 inline __m128i ProcessPixelPair(uint32_t pixel0,
244                                 uint32_t pixel1,
245                                 uint32_t pixel2,
246                                 uint32_t pixel3,
247                                 const __m128i& scale_x,
248                                 const __m128i& y) {
249     __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
250                                          scale_x);
251 
252     // first row times 16-y or y depending on whether 'y' represents one or
253     // the other.
254     // Values will be up to 255 * 16 * 16 = 65280.
255     // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... ,
256     //  y * (Ra0 * (16 - x0) + Ra1 * x0))
257     sum = _mm_mullo_epi16(sum, y);
258 
259     return sum;
260 }
261 
262 // Process two pixel pairs out of eight input pixels.
263 // In other methods, the distinct pixels are passed one by one, but in this
264 // case, the rows, and index offsets to the pixels into the row are passed
265 // to generate the 8 pixels.
266 // @param row0..1 top and bottom row where to find input pixels.
267 // @param x0..1 offsets into the row for all eight input pixels.
268 // @param all_y vector of 16 bit components containing the constant sub_y
269 // @param neg_y vector of 16 bit components containing the constant 16 - sub_y
270 // @param alpha vector of 16 bit components containing the alpha value to scale
271 //        the results by, when has_alpha is true.
272 // @return
273 // (alpha * ((16-y) * (Aa2  * (16-x1) + Aa3  * x1) +
274 //             y    * (Aa2' * (16-x1) + Aa3' * x1)),
275 // ...
276 //  alpha * ((16-y) * (Ra0  * (16-x0) + Ra1 * x0) +
277 //             y    * (Ra0' * (16-x0) + Ra1' * x0))
278 // With the factor alpha removed when has_alpha is false.
279 // The values are scaled back to 16 bit components, but with only the bottom
280 // 8 bits being set.
281 template<bool has_alpha>
ProcessTwoPixelPairs(const uint32_t * row0,const uint32_t * row1,const int * x0,const int * x1,const __m128i & scale_x,const __m128i & all_y,const __m128i & neg_y,const __m128i & alpha)282 inline __m128i ProcessTwoPixelPairs(const uint32_t* row0,
283                                     const uint32_t* row1,
284                                     const int* x0,
285                                     const int* x1,
286                                     const __m128i& scale_x,
287                                     const __m128i& all_y,
288                                     const __m128i& neg_y,
289                                     const __m128i& alpha) {
290     __m128i sum0 = ProcessPixelPair(
291         row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
292         scale_x, neg_y);
293     __m128i sum1 = ProcessPixelPair(
294         row1[x0[0]], row1[x1[0]], row1[x0[1]], row1[x1[1]],
295         scale_x, all_y);
296 
297     // 2 samples fully summed.
298     // ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
299     //  y * (Aa2' * (16-x1) + Aa3' * x1),
300     // ...
301     //  (16-y) * (Ra0 * (16 - x0) + Ra1 * x0)) +
302     //  y * (Ra0' * (16-x0) + Ra1' * x0))
303     // Each component, again can be at most 256 * 255 = 65280, so no overflow.
304     sum0 = _mm_add_epi16(sum0, sum1);
305 
306     return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
307 }
308 
309 // Similar to ProcessTwoPixelPairs except the pixel indexes.
310 template<bool has_alpha>
ProcessTwoPixelPairsDXDY(const uint32_t * row00,const uint32_t * row01,const uint32_t * row10,const uint32_t * row11,const int * xy0,const int * xy1,const __m128i & scale_x,const __m128i & all_y,const __m128i & neg_y,const __m128i & alpha)311 inline __m128i ProcessTwoPixelPairsDXDY(const uint32_t* row00,
312                                         const uint32_t* row01,
313                                         const uint32_t* row10,
314                                         const uint32_t* row11,
315                                         const int* xy0,
316                                         const int* xy1,
317                                         const __m128i& scale_x,
318                                         const __m128i& all_y,
319                                         const __m128i& neg_y,
320                                         const __m128i& alpha) {
321     // first row
322     __m128i sum0 = ProcessPixelPair(
323         row00[xy0[0]], row00[xy1[0]], row10[xy0[1]], row10[xy1[1]],
324         scale_x, neg_y);
325     // second row
326     __m128i sum1 = ProcessPixelPair(
327         row01[xy0[0]], row01[xy1[0]], row11[xy0[1]], row11[xy1[1]],
328         scale_x, all_y);
329 
330     // 2 samples fully summed.
331     // ((16-y1) * (Aa2 * (16-x1) + Aa3 * x1) +
332     //  y0 * (Aa2' * (16-x1) + Aa3' * x1),
333     // ...
334     //  (16-y0) * (Ra0 * (16 - x0) + Ra1 * x0)) +
335     //  y0 * (Ra0' * (16-x0) + Ra1' * x0))
336     // Each component, again can be at most 256 * 255 = 65280, so no overflow.
337     sum0 = _mm_add_epi16(sum0, sum1);
338 
339     return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
340 }
341 
342 
343 // Same as ProcessPixelPair, except that performing the math one output pixel
344 // at a time. This means that only the bottom four 16 bit components are set.
ProcessOnePixel(uint32_t pixel0,uint32_t pixel1,const __m128i & scale_x,const __m128i & y)345 inline __m128i ProcessOnePixel(uint32_t pixel0, uint32_t pixel1,
346                                const __m128i& scale_x, const __m128i& y) {
347     __m128i a0 = _mm_cvtsi32_si128(pixel0);
348     __m128i a1 = _mm_cvtsi32_si128(pixel1);
349 
350     // Interleave
351     // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
352     a0 = _mm_unpacklo_epi8(a0, a1);
353 
354     // (a0 * (16-x) + a1 * x)
355     a0 = _mm_maddubs_epi16(a0, scale_x);
356 
357     // scale row by y
358     return _mm_mullo_epi16(a0, y);
359 }
360 
361 // Notes about the various tricks that are used in this implementation:
362 // - specialization for sub_y == 0.
363 // Statistically, 1/16th of the samples will have sub_y == 0. When this
364 // happens, the math goes from:
365 // (16 - x)*(16 - y)*a00 + x*(16 - y)*a01 + (16 - x)*y*a10 + x*y*a11
366 // to:
367 // (16 - x)*a00 + 16*x*a01
368 // much simpler. The simplification makes for an easy boost in performance.
369 // - calculating 4 output pixels at a time.
370 //  This allows loading the coefficients x0 and x1 and shuffling them to the
371 // optimum location only once per loop, instead of twice per loop.
372 // This also allows us to store the four pixels with a single store.
373 // - Use of 2 special SSSE3 instructions (comparatively to the SSE2 instruction
374 // version):
375 // _mm_shuffle_epi8 : this allows us to spread the coefficients x[0-3] loaded
376 // in 32 bit values to 8 bit values repeated four times.
377 // _mm_maddubs_epi16 : this allows us to perform multiplications and additions
378 // in one swoop of 8bit values storing the results in 16 bit values. This
379 // instruction is actually crucial for the speed of the implementation since
380 // as one can see in the SSE2 implementation, all inputs have to be used as
381 // 16 bits because the results are 16 bits. This basically allows us to process
382 // twice as many pixel components per iteration.
383 //
384 // As a result, this method behaves faster than the traditional SSE2. The actual
385 // boost varies greatly on the underlying architecture.
386 template<bool has_alpha>
S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState & s,const uint32_t * xy,int count,uint32_t * colors)387 void S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
388                                      const uint32_t* xy,
389                                      int count, uint32_t* colors) {
390     SkASSERT(count > 0 && colors != nullptr);
391     SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
392     SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
393     if (has_alpha) {
394         SkASSERT(s.fAlphaScale < 256);
395     } else {
396         SkASSERT(s.fAlphaScale == 256);
397     }
398 
399     const uint8_t* src_addr =
400             static_cast<const uint8_t*>(s.fPixmap.addr());
401     const size_t rb = s.fPixmap.rowBytes();
402     const uint32_t XY = *xy++;
403     const unsigned y0 = XY >> 14;
404     const uint32_t* row0 =
405             reinterpret_cast<const uint32_t*>(src_addr + (y0 >> 4) * rb);
406     const uint32_t* row1 =
407             reinterpret_cast<const uint32_t*>(src_addr + (XY & 0x3FFF) * rb);
408     const unsigned sub_y = y0 & 0xF;
409 
410     // vector constants
411     const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
412                                                   8,  8,  8,  8,
413                                                   4,  4,  4,  4,
414                                                   0,  0,  0,  0);
415     const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
416     const __m128i mask_000F = _mm_set1_epi32(0x000F);
417     const __m128i sixteen_8bit = _mm_set1_epi8(16);
418     // (0, 0, 0, 0, 0, 0, 0, 0)
419     const __m128i zero = _mm_setzero_si128();
420 
421     __m128i alpha = _mm_setzero_si128();
422     if (has_alpha) {
423         // 8x(alpha)
424         alpha = _mm_set1_epi16(s.fAlphaScale);
425     }
426 
427     if (sub_y == 0) {
428         // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
429         while (count > 3) {
430             count -= 4;
431 
432             int x0[4];
433             int x1[4];
434             __m128i all_x, sixteen_minus_x;
435             PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
436                                           sixteen_8bit, mask_dist_select,
437                                           &all_x, &sixteen_minus_x, x0, x1);
438             xy += 4;
439 
440             // First pair of pixel pairs.
441             // (4x(x1, 16-x1), 4x(x0, 16-x0))
442             __m128i scale_x;
443             scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
444 
445             __m128i sum0 = ProcessPixelPairZeroSubY<has_alpha>(
446                 row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
447                 scale_x, alpha);
448 
449             // second pair of pixel pairs
450             // (4x (x3, 16-x3), 4x (16-x2, x2))
451             scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
452 
453             __m128i sum1 = ProcessPixelPairZeroSubY<has_alpha>(
454                 row0[x0[2]], row0[x1[2]], row0[x0[3]], row0[x1[3]],
455                 scale_x, alpha);
456 
457             // Pack lower 4 16 bit values of sum into lower 4 bytes.
458             sum0 = _mm_packus_epi16(sum0, sum1);
459 
460             // Extract low int and store.
461             _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
462 
463             colors += 4;
464         }
465 
466         // handle remainder
467         while (count-- > 0) {
468             uint32_t xx = *xy++;  // x0:14 | 4 | x1:14
469             unsigned x0 = xx >> 18;
470             unsigned x1 = xx & 0x3FFF;
471 
472             // 16x(x)
473             const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
474 
475             // (16x(16-x))
476             __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
477 
478             scale_x = _mm_unpacklo_epi8(scale_x, all_x);
479 
480             __m128i sum = ProcessOnePixelZeroSubY<has_alpha>(
481                 row0[x0], row0[x1],
482                 scale_x, alpha);
483 
484             // Pack lower 4 16 bit values of sum into lower 4 bytes.
485             sum = _mm_packus_epi16(sum, zero);
486 
487             // Extract low int and store.
488             *colors++ = _mm_cvtsi128_si32(sum);
489         }
490     } else {  // more general case, y != 0
491         // 8x(16)
492         const __m128i sixteen_16bit = _mm_set1_epi16(16);
493 
494         // 8x (y)
495         const __m128i all_y = _mm_set1_epi16(sub_y);
496 
497         // 8x (16-y)
498         const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
499 
500         // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
501         while (count > 3) {
502             count -= 4;
503 
504             int x0[4];
505             int x1[4];
506             __m128i all_x, sixteen_minus_x;
507             PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
508                                           sixteen_8bit, mask_dist_select,
509                                           &all_x, &sixteen_minus_x, x0, x1);
510             xy += 4;
511 
512             // First pair of pixel pairs
513             // (4x(x1, 16-x1), 4x(x0, 16-x0))
514             __m128i scale_x;
515             scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
516 
517             __m128i sum0 = ProcessTwoPixelPairs<has_alpha>(
518                 row0, row1, x0, x1,
519                 scale_x, all_y, neg_y, alpha);
520 
521             // second pair of pixel pairs
522             // (4x (x3, 16-x3), 4x (16-x2, x2))
523             scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
524 
525             __m128i sum1 = ProcessTwoPixelPairs<has_alpha>(
526                 row0, row1, x0 + 2, x1 + 2,
527                 scale_x, all_y, neg_y, alpha);
528 
529             // Do the final packing of the two results
530 
531             // Pack lower 4 16 bit values of sum into lower 4 bytes.
532             sum0 = _mm_packus_epi16(sum0, sum1);
533 
534             // Extract low int and store.
535             _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
536 
537             colors += 4;
538         }
539 
540         // Left over.
541         while (count-- > 0) {
542             const uint32_t xx = *xy++;  // x0:14 | 4 | x1:14
543             const unsigned x0 = xx >> 18;
544             const unsigned x1 = xx & 0x3FFF;
545 
546             // 16x(x)
547             const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
548 
549             // 16x (16-x)
550             __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
551 
552             // (8x (x, 16-x))
553             scale_x = _mm_unpacklo_epi8(scale_x, all_x);
554 
555             // first row.
556             __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
557             // second row.
558             __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
559 
560             // Add both rows for full sample
561             sum0 = _mm_add_epi16(sum0, sum1);
562 
563             sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
564 
565             // Pack lower 4 16 bit values of sum into lower 4 bytes.
566             sum0 = _mm_packus_epi16(sum0, zero);
567 
568             // Extract low int and store.
569             *colors++ = _mm_cvtsi128_si32(sum0);
570         }
571     }
572 }
573 
574 /*
575  * Similar to S32_generic_D32_filter_DX_SSSE3, we do not need to handle the
576  * special case suby == 0 as suby is changing in every loop.
577  */
578 template<bool has_alpha>
S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState & s,const uint32_t * xy,int count,uint32_t * colors)579 void S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
580                                        const uint32_t* xy,
581                                        int count, uint32_t* colors) {
582     SkASSERT(count > 0 && colors != nullptr);
583     SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
584     SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
585     if (has_alpha) {
586         SkASSERT(s.fAlphaScale < 256);
587     } else {
588         SkASSERT(s.fAlphaScale == 256);
589     }
590 
591     const uint8_t* src_addr =
592                         static_cast<const uint8_t*>(s.fPixmap.addr());
593     const size_t rb = s.fPixmap.rowBytes();
594 
595     // vector constants
596     const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
597                                                   8,  8,  8,  8,
598                                                   4,  4,  4,  4,
599                                                   0,  0,  0,  0);
600     const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
601     const __m128i mask_000F = _mm_set1_epi32(0x000F);
602     const __m128i sixteen_8bit = _mm_set1_epi8(16);
603 
604     __m128i alpha;
605     if (has_alpha) {
606         // 8x(alpha)
607         alpha = _mm_set1_epi16(s.fAlphaScale);
608     }
609 
610     // Unroll 2x, interleave bytes, use pmaddubsw (all_x is small)
611     while (count >= 2) {
612         int xy0[4];
613         int xy1[4];
614         __m128i all_xy, sixteen_minus_xy;
615         PrepareConstantsTwoPixelPairsDXDY(xy, mask_3FFF, mask_000F,
616                                           sixteen_8bit, mask_dist_select,
617                                          &all_xy, &sixteen_minus_xy, xy0, xy1);
618 
619         // (4x(x1, 16-x1), 4x(x0, 16-x0))
620         __m128i scale_x = _mm_unpacklo_epi8(sixteen_minus_xy, all_xy);
621         // (4x(0, y1), 4x(0, y0))
622         __m128i all_y = _mm_unpackhi_epi8(all_xy, _mm_setzero_si128());
623         __m128i neg_y = _mm_sub_epi16(_mm_set1_epi16(16), all_y);
624 
625         const uint32_t* row00 =
626                     reinterpret_cast<const uint32_t*>(src_addr + xy0[2] * rb);
627         const uint32_t* row01 =
628                     reinterpret_cast<const uint32_t*>(src_addr + xy1[2] * rb);
629         const uint32_t* row10 =
630                     reinterpret_cast<const uint32_t*>(src_addr + xy0[3] * rb);
631         const uint32_t* row11 =
632                     reinterpret_cast<const uint32_t*>(src_addr + xy1[3] * rb);
633 
634         __m128i sum0 = ProcessTwoPixelPairsDXDY<has_alpha>(
635                                         row00, row01, row10, row11, xy0, xy1,
636                                         scale_x, all_y, neg_y, alpha);
637 
638         // Pack lower 4 16 bit values of sum into lower 4 bytes.
639         sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
640 
641         // Extract low int and store.
642         _mm_storel_epi64(reinterpret_cast<__m128i *>(colors), sum0);
643 
644         xy += 4;
645         colors += 2;
646         count -= 2;
647     }
648 
649     // Handle the remainder
650     while (count-- > 0) {
651         uint32_t data = *xy++;
652         unsigned y0 = data >> 14;
653         unsigned y1 = data & 0x3FFF;
654         unsigned subY = y0 & 0xF;
655         y0 >>= 4;
656 
657         data = *xy++;
658         unsigned x0 = data >> 14;
659         unsigned x1 = data & 0x3FFF;
660         unsigned subX = x0 & 0xF;
661         x0 >>= 4;
662 
663         const uint32_t* row0 =
664                         reinterpret_cast<const uint32_t*>(src_addr + y0 * rb);
665         const uint32_t* row1 =
666                         reinterpret_cast<const uint32_t*>(src_addr + y1 * rb);
667 
668         // 16x(x)
669         const __m128i all_x = _mm_set1_epi8(subX);
670 
671         // 16x (16-x)
672         __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
673 
674         // (8x (x, 16-x))
675         scale_x = _mm_unpacklo_epi8(scale_x, all_x);
676 
677         // 8x(16)
678         const __m128i sixteen_16bit = _mm_set1_epi16(16);
679 
680         // 8x (y)
681         const __m128i all_y = _mm_set1_epi16(subY);
682 
683         // 8x (16-y)
684         const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
685 
686         // first row.
687         __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
688         // second row.
689         __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
690 
691         // Add both rows for full sample
692         sum0 = _mm_add_epi16(sum0, sum1);
693 
694         sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
695 
696         // Pack lower 4 16 bit values of sum into lower 4 bytes.
697         sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
698 
699         // Extract low int and store.
700         *colors++ = _mm_cvtsi128_si32(sum0);
701     }
702 }
703 }  // namespace
704 
S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState & s,const uint32_t * xy,int count,uint32_t * colors)705 void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
706                                     const uint32_t* xy,
707                                     int count, uint32_t* colors) {
708     S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors);
709 }
710 
S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState & s,const uint32_t * xy,int count,uint32_t * colors)711 void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
712                                    const uint32_t* xy,
713                                    int count, uint32_t* colors) {
714     S32_generic_D32_filter_DX_SSSE3<true>(s, xy, count, colors);
715 }
716 
S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState & s,const uint32_t * xy,int count,uint32_t * colors)717 void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
718                                       const uint32_t* xy,
719                                       int count, uint32_t* colors) {
720     S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors);
721 }
722 
S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState & s,const uint32_t * xy,int count,uint32_t * colors)723 void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
724                                      const uint32_t* xy,
725                                      int count, uint32_t* colors) {
726     S32_generic_D32_filter_DXDY_SSSE3<true>(s, xy, count, colors);
727 }
728