• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2008-2021 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * @file
30  * SSE intrinsics portability header.
31  *
32  * Although the SSE intrinsics are support by all modern x86 and x86-64
33  * compilers, there are some intrisincs missing in some implementations
34  * (especially older MSVC versions). This header abstracts that away.
35  */
36 
37 #ifndef U_SSE_H_
38 #define U_SSE_H_
39 
40 #include "pipe/p_config.h"
41 #include "pipe/p_compiler.h"
42 #include "util/u_debug.h"
43 
44 #if defined(PIPE_ARCH_SSE)
45 
46 #include <emmintrin.h>
47 
48 
49 union m128i {
50    __m128i m;
51    ubyte ub[16];
52    ushort us[8];
53    uint ui[4];
54 };
55 
u_print_epi8(const char * name,__m128i r)56 static inline void u_print_epi8(const char *name, __m128i r)
57 {
58    union { __m128i m; ubyte ub[16]; } u;
59    u.m = r;
60 
61    debug_printf("%s: "
62                 "%02x/"
63                 "%02x/"
64                 "%02x/"
65                 "%02x/"
66                 "%02x/"
67                 "%02x/"
68                 "%02x/"
69                 "%02x/"
70                 "%02x/"
71                 "%02x/"
72                 "%02x/"
73                 "%02x/"
74                 "%02x/"
75                 "%02x/"
76                 "%02x/"
77                 "%02x\n",
78                 name,
79                 u.ub[0],  u.ub[1],  u.ub[2],  u.ub[3],
80                 u.ub[4],  u.ub[5],  u.ub[6],  u.ub[7],
81                 u.ub[8],  u.ub[9],  u.ub[10], u.ub[11],
82                 u.ub[12], u.ub[13], u.ub[14], u.ub[15]);
83 }
84 
u_print_epi16(const char * name,__m128i r)85 static inline void u_print_epi16(const char *name, __m128i r)
86 {
87    union { __m128i m; ushort us[8]; } u;
88    u.m = r;
89 
90    debug_printf("%s: "
91                 "%04x/"
92                 "%04x/"
93                 "%04x/"
94                 "%04x/"
95                 "%04x/"
96                 "%04x/"
97                 "%04x/"
98                 "%04x\n",
99                 name,
100                 u.us[0],  u.us[1],  u.us[2],  u.us[3],
101                 u.us[4],  u.us[5],  u.us[6],  u.us[7]);
102 }
103 
u_print_epi32(const char * name,__m128i r)104 static inline void u_print_epi32(const char *name, __m128i r)
105 {
106    union { __m128i m; uint ui[4]; } u;
107    u.m = r;
108 
109    debug_printf("%s: "
110                 "%08x/"
111                 "%08x/"
112                 "%08x/"
113                 "%08x\n",
114                 name,
115                 u.ui[0],  u.ui[1],  u.ui[2],  u.ui[3]);
116 }
117 
u_print_ps(const char * name,__m128 r)118 static inline void u_print_ps(const char *name, __m128 r)
119 {
120    union { __m128 m; float f[4]; } u;
121    u.m = r;
122 
123    debug_printf("%s: "
124                 "%f/"
125                 "%f/"
126                 "%f/"
127                 "%f\n",
128                 name,
129                 u.f[0],  u.f[1],  u.f[2],  u.f[3]);
130 }
131 
132 
133 #define U_DUMP_EPI32(a) u_print_epi32(#a, a)
134 #define U_DUMP_EPI16(a) u_print_epi16(#a, a)
135 #define U_DUMP_EPI8(a)  u_print_epi8(#a, a)
136 #define U_DUMP_PS(a)    u_print_ps(#a, a)
137 
138 
139 
140 #if defined(PIPE_ARCH_SSSE3)
141 
142 #include <tmmintrin.h>
143 
144 #else /* !PIPE_ARCH_SSSE3 */
145 
146 /**
147  * Describe _mm_shuffle_epi8() with gcc extended inline assembly, for cases
148  * where -mssse3 is not supported/enabled.
149  *
150  * MSVC will never get in here as its intrinsics support do not rely on
151  * compiler command line options.
152  */
153 static __inline __m128i
154 #ifdef __clang__
155    __attribute__((__always_inline__, __nodebug__))
156 #else
157    __attribute__((__gnu_inline__, __always_inline__, __artificial__))
158 #endif
_mm_shuffle_epi8(__m128i a,__m128i mask)159 _mm_shuffle_epi8(__m128i a, __m128i mask)
160 {
161     __m128i result;
162     __asm__("pshufb %1, %0"
163             : "=x" (result)
164             : "xm" (mask), "0" (a));
165     return result;
166 }
167 
168 #endif /* !PIPE_ARCH_SSSE3 */
169 
170 
171 /*
172  * Provide an SSE implementation of _mm_mul_epi32() in terms of
173  * _mm_mul_epu32().
174  *
175  * Basically, albeit surprising at first (and second, and third...) look
176  * if a * b is done signed instead of unsigned, can just
177  * subtract b from the high bits of the result if a is negative
178  * (and the same for a if b is negative). Modular arithmetic at its best!
179  *
180  * So for int32 a,b in crude pseudo-code ("*" here denoting a widening mul)
181  * fixupb = (signmask(b) & a) << 32ULL
182  * fixupa = (signmask(a) & b) << 32ULL
183  * a * b = (unsigned)a * (unsigned)b - fixupb - fixupa
184  * = (unsigned)a * (unsigned)b -(fixupb + fixupa)
185  *
186  * This does both lo (dwords 0/2) and hi parts (1/3) at the same time due
187  * to some optimization potential.
188  */
189 static inline __m128i
mm_mullohi_epi32(const __m128i a,const __m128i b,__m128i * res13)190 mm_mullohi_epi32(const __m128i a, const __m128i b, __m128i *res13)
191 {
192    __m128i a13, b13, mul02, mul13;
193    __m128i anegmask, bnegmask, fixup, fixup02, fixup13;
194    a13 = _mm_shuffle_epi32(a, _MM_SHUFFLE(2,3,0,1));
195    b13 = _mm_shuffle_epi32(b, _MM_SHUFFLE(2,3,0,1));
196    anegmask = _mm_srai_epi32(a, 31);
197    bnegmask = _mm_srai_epi32(b, 31);
198    fixup = _mm_add_epi32(_mm_and_si128(anegmask, b),
199                          _mm_and_si128(bnegmask, a));
200    mul02 = _mm_mul_epu32(a, b);
201    mul13 = _mm_mul_epu32(a13, b13);
202    fixup02 = _mm_slli_epi64(fixup, 32);
203    fixup13 = _mm_and_si128(fixup, _mm_set_epi32(-1,0,-1,0));
204    *res13 = _mm_sub_epi64(mul13, fixup13);
205    return _mm_sub_epi64(mul02, fixup02);
206 }
207 
208 
209 /* Provide an SSE2 implementation of _mm_mullo_epi32() in terms of
210  * _mm_mul_epu32().
211  *
212  * This always works regardless the signs of the operands, since
213  * the high bits (which would be different) aren't used.
214  *
215  * This seems close enough to the speed of SSE4 and the real
216  * _mm_mullo_epi32() intrinsic as to not justify adding an sse4
217  * dependency at this point.
218  */
mm_mullo_epi32(const __m128i a,const __m128i b)219 static inline __m128i mm_mullo_epi32(const __m128i a, const __m128i b)
220 {
221    __m128i a4   = _mm_srli_epi64(a, 32);  /* shift by one dword */
222    __m128i b4   = _mm_srli_epi64(b, 32);  /* shift by one dword */
223    __m128i ba   = _mm_mul_epu32(b, a);   /* multply dwords 0, 2 */
224    __m128i b4a4 = _mm_mul_epu32(b4, a4); /* multiply dwords 1, 3 */
225 
226    /* Interleave the results, either with shuffles or (slightly
227     * faster) direct bit operations:
228     * XXX: might be only true for some cpus (in particular 65nm
229     * Core 2). On most cpus (including that Core 2, but not Nehalem...)
230     * using _mm_shuffle_ps/_mm_shuffle_epi32 might also be faster
231     * than using the 3 instructions below. But logic should be fine
232     * as well, we can't have optimal solution for all cpus (if anything,
233     * should just use _mm_mullo_epi32() if sse41 is available...).
234     */
235 #if 0
236    __m128i ba8             = _mm_shuffle_epi32(ba, 8);
237    __m128i b4a48           = _mm_shuffle_epi32(b4a4, 8);
238    __m128i result          = _mm_unpacklo_epi32(ba8, b4a48);
239 #else
240    __m128i mask            = _mm_setr_epi32(~0,0,~0,0);
241    __m128i ba_mask         = _mm_and_si128(ba, mask);
242    __m128i b4a4_mask_shift = _mm_slli_epi64(b4a4, 32);
243    __m128i result          = _mm_or_si128(ba_mask, b4a4_mask_shift);
244 #endif
245 
246    return result;
247 }
248 
249 
250 static inline void
transpose4_epi32(const __m128i * restrict a,const __m128i * restrict b,const __m128i * restrict c,const __m128i * restrict d,__m128i * restrict o,__m128i * restrict p,__m128i * restrict q,__m128i * restrict r)251 transpose4_epi32(const __m128i * restrict a,
252                  const __m128i * restrict b,
253                  const __m128i * restrict c,
254                  const __m128i * restrict d,
255                  __m128i * restrict o,
256                  __m128i * restrict p,
257                  __m128i * restrict q,
258                  __m128i * restrict r)
259 {
260    __m128i t0 = _mm_unpacklo_epi32(*a, *b);
261    __m128i t1 = _mm_unpacklo_epi32(*c, *d);
262    __m128i t2 = _mm_unpackhi_epi32(*a, *b);
263    __m128i t3 = _mm_unpackhi_epi32(*c, *d);
264 
265    *o = _mm_unpacklo_epi64(t0, t1);
266    *p = _mm_unpackhi_epi64(t0, t1);
267    *q = _mm_unpacklo_epi64(t2, t3);
268    *r = _mm_unpackhi_epi64(t2, t3);
269 }
270 
271 
272 /*
273  * Same as above, except the first two values are already interleaved
274  * (i.e. contain 64bit values).
275  */
276 static inline void
transpose2_64_2_32(const __m128i * restrict a01,const __m128i * restrict a23,const __m128i * restrict c,const __m128i * restrict d,__m128i * restrict o,__m128i * restrict p,__m128i * restrict q,__m128i * restrict r)277 transpose2_64_2_32(const __m128i * restrict a01,
278                    const __m128i * restrict a23,
279                    const __m128i * restrict c,
280                    const __m128i * restrict d,
281                    __m128i * restrict o,
282                    __m128i * restrict p,
283                    __m128i * restrict q,
284                    __m128i * restrict r)
285 {
286    __m128i t0 = *a01;
287    __m128i t1 = _mm_unpacklo_epi32(*c, *d);
288    __m128i t2 = *a23;
289    __m128i t3 = _mm_unpackhi_epi32(*c, *d);
290 
291    *o = _mm_unpacklo_epi64(t0, t1);
292    *p = _mm_unpackhi_epi64(t0, t1);
293    *q = _mm_unpacklo_epi64(t2, t3);
294    *r = _mm_unpackhi_epi64(t2, t3);
295 }
296 
297 
298 #define SCALAR_EPI32(m, i) _mm_shuffle_epi32((m), _MM_SHUFFLE(i,i,i,i))
299 
300 
301 /*
302  * Implements (1-w)*a + w*b = a - wa + wb = w(b-a) + a
303  * ((b-a)*w >> 8) + a
304  * The math behind negative sub results (logic shift/mask) is tricky.
305  *
306  * w -- weight values
307  * a -- src0 values
308  * b -- src1 values
309  */
310 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi16(__m128i w,__m128i a,__m128i b)311 util_sse2_lerp_epi16(__m128i w, __m128i a, __m128i b)
312 {
313    __m128i res;
314 
315    res = _mm_sub_epi16(b, a);
316    res = _mm_mullo_epi16(res, w);
317    res = _mm_srli_epi16(res, 8);
318    /* use add_epi8 instead of add_epi16 so no need to mask off upper bits */
319    res = _mm_add_epi8(res, a);
320 
321    return res;
322 }
323 
324 
325 /* Apply premultiplied-alpha blending on two pixels simultaneously.
326  * All parameters are packed as 8.8 fixed point values in __m128i SSE
327  * registers, with the upper 8 bits all zero.
328  *
329  * a -- src alpha values
330  * d -- dst color values
331  * s -- src color values
332  */
333 static inline __m128i
util_sse2_premul_blend_epi16(__m128i a,__m128i d,__m128i s)334 util_sse2_premul_blend_epi16( __m128i a, __m128i d, __m128i s)
335 {
336    __m128i da, d_sub_da, tmp;
337    tmp      = _mm_mullo_epi16(d, a);
338    da       = _mm_srli_epi16(tmp, 8);
339    d_sub_da = _mm_sub_epi16(d, da);
340 
341    return  _mm_add_epi16(s, d_sub_da);
342 }
343 
344 
345 /* Apply premultiplied-alpha blending on four pixels in packed BGRA
346  * format (one/inv_src_alpha blend mode).
347  *
348  * src    -- four pixels (bgra8 format)
349  * dst    -- four destination pixels (bgra8)
350  * return -- blended pixels (bgra8)
351  */
352 static ALWAYS_INLINE __m128i
util_sse2_blend_premul_4(const __m128i src,const __m128i dst)353 util_sse2_blend_premul_4(const __m128i src,
354                          const __m128i dst)
355 {
356 
357    __m128i al, ah, dl, dh, sl, sh, rl, rh;
358    __m128i zero = _mm_setzero_si128();
359 
360    /* Blend first two pixels:
361     */
362    sl = _mm_unpacklo_epi8(src, zero);
363    dl = _mm_unpacklo_epi8(dst, zero);
364 
365    al = _mm_shufflehi_epi16(sl, 0xff);
366    al = _mm_shufflelo_epi16(al, 0xff);
367 
368    rl = util_sse2_premul_blend_epi16(al, dl, sl);
369 
370    /* Blend second two pixels:
371     */
372    sh = _mm_unpackhi_epi8(src, zero);
373    dh = _mm_unpackhi_epi8(dst, zero);
374 
375    ah = _mm_shufflehi_epi16(sh, 0xff);
376    ah = _mm_shufflelo_epi16(ah, 0xff);
377 
378    rh = util_sse2_premul_blend_epi16(ah, dh, sh);
379 
380    /* Pack the results down to four bgra8 pixels:
381     */
382    return _mm_packus_epi16(rl, rh);
383 }
384 
385 
386 /* Apply src-alpha blending on four pixels in packed BGRA
387  * format (srcalpha/inv_src_alpha blend mode).
388  *
389  * src    -- four pixels (bgra8 format)
390  * dst    -- four destination pixels (bgra8)
391  * return -- blended pixels (bgra8)
392  */
393 static ALWAYS_INLINE __m128i
util_sse2_blend_srcalpha_4(const __m128i src,const __m128i dst)394 util_sse2_blend_srcalpha_4(const __m128i src,
395                            const __m128i dst)
396 {
397 
398    __m128i al, ah, dl, dh, sl, sh, rl, rh;
399    __m128i zero = _mm_setzero_si128();
400 
401    /* Blend first two pixels:
402     */
403    sl = _mm_unpacklo_epi8(src, zero);
404    dl = _mm_unpacklo_epi8(dst, zero);
405 
406    al = _mm_shufflehi_epi16(sl, 0xff);
407    al = _mm_shufflelo_epi16(al, 0xff);
408 
409    rl = util_sse2_lerp_epi16(al, dl, sl);
410 
411    /* Blend second two pixels:
412     */
413    sh = _mm_unpackhi_epi8(src, zero);
414    dh = _mm_unpackhi_epi8(dst, zero);
415 
416    ah = _mm_shufflehi_epi16(sh, 0xff);
417    ah = _mm_shufflelo_epi16(ah, 0xff);
418 
419    rh = util_sse2_lerp_epi16(ah, dh, sh);
420 
421    /* Pack the results down to four bgra8 pixels:
422     */
423    return _mm_packus_epi16(rl, rh);
424 }
425 
426 
427 /**
428  * premultiplies src with constant alpha then
429  * does one/inv_src_alpha blend.
430  *
431  * src 16xi8 (normalized)
432  * dst 16xi8 (normalized)
433  * cst_alpha (constant alpha (u8 value))
434  */
435 static ALWAYS_INLINE __m128i
util_sse2_blend_premul_src_4(const __m128i src,const __m128i dst,const unsigned cst_alpha)436 util_sse2_blend_premul_src_4(const __m128i src,
437                              const __m128i dst,
438                              const unsigned cst_alpha)
439 {
440 
441    __m128i srca, d, s, rl, rh;
442    __m128i zero = _mm_setzero_si128();
443    __m128i cst_alpha_vec = _mm_set1_epi16(cst_alpha);
444 
445    /* Blend first two pixels:
446     */
447    s = _mm_unpacklo_epi8(src, zero);
448    s = _mm_mullo_epi16(s, cst_alpha_vec);
449    /* the shift will cause some precision loss */
450    s = _mm_srli_epi16(s, 8);
451 
452    srca = _mm_shufflehi_epi16(s, 0xff);
453    srca = _mm_shufflelo_epi16(srca, 0xff);
454 
455    d = _mm_unpacklo_epi8(dst, zero);
456    rl = util_sse2_premul_blend_epi16(srca, d, s);
457 
458    /* Blend second two pixels:
459     */
460    s = _mm_unpackhi_epi8(src, zero);
461    s = _mm_mullo_epi16(s, cst_alpha_vec);
462    /* the shift will cause some precision loss */
463    s = _mm_srli_epi16(s, 8);
464 
465    srca = _mm_shufflehi_epi16(s, 0xff);
466    srca = _mm_shufflelo_epi16(srca, 0xff);
467 
468    d = _mm_unpackhi_epi8(dst, zero);
469    rh = util_sse2_premul_blend_epi16(srca, d, s);
470 
471    /* Pack the results down to four bgra8 pixels:
472     */
473    return _mm_packus_epi16(rl, rh);
474 }
475 
476 
477 /**
478  * Linear interpolation with SSE2.
479  *
480  * dst, src0, src1 are 16 x i8 vectors, with [0..255] normalized values.
481  *
482  * weight_lo and weight_hi should be a 8 x i16 vectors, in 8.8 fixed point
483  * format, for the low and high components.
484  * We'd want to pass these as values but MSVC limitation forces us to pass these
485  * as pointers since it will complain if more than 3 __m128 are passed by value.
486  */
487 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi8_fixed88(__m128i src0,__m128i src1,const __m128i * restrict weight_lo,const __m128i * restrict weight_hi)488 util_sse2_lerp_epi8_fixed88(__m128i src0, __m128i src1,
489                             const __m128i * restrict weight_lo,
490                             const __m128i * restrict weight_hi)
491 {
492    const __m128i zero = _mm_setzero_si128();
493 
494    __m128i src0_lo = _mm_unpacklo_epi8(src0, zero);
495    __m128i src0_hi = _mm_unpackhi_epi8(src0, zero);
496 
497    __m128i src1_lo = _mm_unpacklo_epi8(src1, zero);
498    __m128i src1_hi = _mm_unpackhi_epi8(src1, zero);
499 
500    __m128i dst_lo;
501    __m128i dst_hi;
502 
503    dst_lo = util_sse2_lerp_epi16(*weight_lo, src0_lo, src1_lo);
504    dst_hi = util_sse2_lerp_epi16(*weight_hi, src0_hi, src1_hi);
505 
506    return _mm_packus_epi16(dst_lo, dst_hi);
507 }
508 
509 
510 /**
511  * Linear interpolation with SSE2.
512  *
513  * dst, src0, src1 are 16 x i8 vectors, with [0..255] normalized values.
514  *
515  * weight should be a 16 x i8 vector, in 0.8 fixed point values.
516  */
517 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi8_fixed08(__m128i src0,__m128i src1,__m128i weight)518 util_sse2_lerp_epi8_fixed08(__m128i src0, __m128i src1,
519                             __m128i weight)
520 {
521    const __m128i zero = _mm_setzero_si128();
522    __m128i weight_lo = _mm_unpacklo_epi8(weight, zero);
523    __m128i weight_hi = _mm_unpackhi_epi8(weight, zero);
524 
525    return util_sse2_lerp_epi8_fixed88(src0, src1,
526                                       &weight_lo, &weight_hi);
527 }
528 
529 
530 /**
531  * Linear interpolation with SSE2.
532  *
533  * dst, src0, src1, and weight are 16 x i8 vectors, with [0..255] normalized
534  * values.
535  */
536 static ALWAYS_INLINE __m128i
util_sse2_lerp_unorm8(__m128i src0,__m128i src1,__m128i weight)537 util_sse2_lerp_unorm8(__m128i src0, __m128i src1,
538                       __m128i weight)
539 {
540    const __m128i zero = _mm_setzero_si128();
541    __m128i weight_lo = _mm_unpacklo_epi8(weight, zero);
542    __m128i weight_hi = _mm_unpackhi_epi8(weight, zero);
543 
544 #if 0
545    /*
546     * Rescale from [0..255] to [0..256].
547     */
548    weight_lo = _mm_add_epi16(weight_lo, _mm_srli_epi16(weight_lo, 7));
549    weight_hi = _mm_add_epi16(weight_hi, _mm_srli_epi16(weight_hi, 7));
550 #endif
551 
552    return util_sse2_lerp_epi8_fixed88(src0, src1,
553                                       &weight_lo, &weight_hi);
554 }
555 
556 
557 /**
558  * Linear interpolation with SSE2.
559  *
560  * dst, src0, src1, src2, src3 are 16 x i8 vectors, with [0..255] normalized
561  * values.
562  *
563  * ws_lo, ws_hi, wt_lo, wt_hi should be a 8 x i16 vectors, in 8.8 fixed point
564  * format, for the low and high components.
565  * We'd want to pass these as values but MSVC limitation forces us to pass these
566  * as pointers since it will complain if more than 3 __m128 are passed by value.
567  *
568  * This uses ws_lo, ws_hi to interpolate between src0 and src1, as well as to
569  * interpolate between src2 and src3, then uses wt_lo and wt_hi to interpolate
570  * between the resulting vectors.
571  */
572 static ALWAYS_INLINE __m128i
util_sse2_lerp_2d_epi8_fixed88(__m128i src0,__m128i src1,const __m128i * restrict src2,const __m128i * restrict src3,const __m128i * restrict ws_lo,const __m128i * restrict ws_hi,const __m128i * restrict wt_lo,const __m128i * restrict wt_hi)573 util_sse2_lerp_2d_epi8_fixed88(__m128i src0, __m128i src1,
574                                const __m128i * restrict src2,
575                                const __m128i * restrict src3,
576                                const __m128i * restrict ws_lo,
577                                const __m128i * restrict ws_hi,
578                                const __m128i * restrict wt_lo,
579                                const __m128i * restrict wt_hi)
580 {
581    const __m128i zero = _mm_setzero_si128();
582 
583    __m128i src0_lo = _mm_unpacklo_epi8(src0, zero);
584    __m128i src0_hi = _mm_unpackhi_epi8(src0, zero);
585 
586    __m128i src1_lo = _mm_unpacklo_epi8(src1, zero);
587    __m128i src1_hi = _mm_unpackhi_epi8(src1, zero);
588 
589    __m128i src2_lo = _mm_unpacklo_epi8(*src2, zero);
590    __m128i src2_hi = _mm_unpackhi_epi8(*src2, zero);
591 
592    __m128i src3_lo = _mm_unpacklo_epi8(*src3, zero);
593    __m128i src3_hi = _mm_unpackhi_epi8(*src3, zero);
594 
595    __m128i dst_lo, dst01_lo, dst23_lo;
596    __m128i dst_hi, dst01_hi, dst23_hi;
597 
598    dst01_lo = util_sse2_lerp_epi16(*ws_lo, src0_lo, src1_lo);
599    dst01_hi = util_sse2_lerp_epi16(*ws_hi, src0_hi, src1_hi);
600    dst23_lo = util_sse2_lerp_epi16(*ws_lo, src2_lo, src3_lo);
601    dst23_hi = util_sse2_lerp_epi16(*ws_hi, src2_hi, src3_hi);
602 
603    dst_lo = util_sse2_lerp_epi16(*wt_lo, dst01_lo, dst23_lo);
604    dst_hi = util_sse2_lerp_epi16(*wt_hi, dst01_hi, dst23_hi);
605 
606    return _mm_packus_epi16(dst_lo, dst_hi);
607 }
608 
609 /**
610  * Stretch a row of pixels using linear filter.
611  *
612  * Uses Bresenham's line algorithm using 16.16 fixed point representation for
613  * the error term.
614  *
615  * @param dst_width destination width in pixels
616  * @param src_x    start x0 in 16.16 fixed point format
617  * @param src_xstep step in 16.16. fixed point format
618  *
619  * @return final src_x value (i.e., src_x + dst_width*src_xstep)
620  */
621 static ALWAYS_INLINE int32_t
util_sse2_stretch_row_8unorm(__m128i * restrict dst,int32_t dst_width,const uint32_t * restrict src,int32_t src_x,int32_t src_xstep)622 util_sse2_stretch_row_8unorm(__m128i * restrict dst,
623                              int32_t dst_width,
624                              const uint32_t * restrict src,
625                              int32_t src_x,
626                              int32_t src_xstep)
627 {
628    int16_t error0, error1, error2, error3;
629    __m128i error_lo, error_hi, error_step;
630 
631    assert(dst_width >= 0);
632    assert(dst_width % 4 == 0);
633 
634    error0 = src_x;
635    error1 = error0 + src_xstep;
636    error2 = error1 + src_xstep;
637    error3 = error2 + src_xstep;
638 
639    error_lo   = _mm_setr_epi16(error0, error0, error0, error0,
640                                error1, error1, error1, error1);
641    error_hi   = _mm_setr_epi16(error2, error2, error2, error2,
642                                error3, error3, error3, error3);
643    error_step = _mm_set1_epi16(src_xstep << 2);
644 
645    dst_width >>= 2;
646    while (dst_width) {
647       uint16_t src_x0;
648       uint16_t src_x1;
649       uint16_t src_x2;
650       uint16_t src_x3;
651       __m128i src0, src1;
652       __m128i weight_lo, weight_hi;
653 
654       /*
655        * It is faster to re-compute the coordinates in the scalar integer unit here,
656        * than to fetch the values from the SIMD integer unit.
657        */
658 
659       src_x0 = src_x >> 16;
660       src_x += src_xstep;
661       src_x1 = src_x >> 16;
662       src_x += src_xstep;
663       src_x2 = src_x >> 16;
664       src_x += src_xstep;
665       src_x3 = src_x >> 16;
666       src_x += src_xstep;
667 
668       /*
669        * Fetch pairs of pixels 64bit at a time, and then swizzle them inplace.
670        */
671 
672       {
673          __m128i src_00_10 = _mm_loadl_epi64((const __m128i *)&src[src_x0]);
674          __m128i src_01_11 = _mm_loadl_epi64((const __m128i *)&src[src_x1]);
675          __m128i src_02_12 = _mm_loadl_epi64((const __m128i *)&src[src_x2]);
676          __m128i src_03_13 = _mm_loadl_epi64((const __m128i *)&src[src_x3]);
677 
678          __m128i src_00_01_10_11 = _mm_unpacklo_epi32(src_00_10, src_01_11);
679          __m128i src_02_03_12_13 = _mm_unpacklo_epi32(src_02_12, src_03_13);
680 
681          src0 = _mm_unpacklo_epi64(src_00_01_10_11, src_02_03_12_13);
682          src1 = _mm_unpackhi_epi64(src_00_01_10_11, src_02_03_12_13);
683       }
684 
685       weight_lo = _mm_srli_epi16(error_lo, 8);
686       weight_hi = _mm_srli_epi16(error_hi, 8);
687 
688       *dst = util_sse2_lerp_epi8_fixed88(src0, src1,
689                                          &weight_lo, &weight_hi);
690 
691       error_lo = _mm_add_epi16(error_lo, error_step);
692       error_hi = _mm_add_epi16(error_hi, error_step);
693 
694       ++dst;
695       --dst_width;
696    }
697 
698    return src_x;
699 }
700 
701 
702 
703 #endif /* PIPE_ARCH_SSE */
704 
705 #endif /* U_SSE_H_ */
706