1 /**************************************************************************
2 *
3 * Copyright 2008-2021 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * SSE intrinsics portability header.
31 *
32 * Although the SSE intrinsics are support by all modern x86 and x86-64
33 * compilers, there are some intrisincs missing in some implementations
34 * (especially older MSVC versions). This header abstracts that away.
35 */
36
37 #ifndef U_SSE_H_
38 #define U_SSE_H_
39
40 #include "pipe/p_config.h"
41 #include "pipe/p_compiler.h"
42 #include "util/u_debug.h"
43
44 #if defined(PIPE_ARCH_SSE)
45
46 #include <emmintrin.h>
47
48
49 union m128i {
50 __m128i m;
51 ubyte ub[16];
52 ushort us[8];
53 uint ui[4];
54 };
55
u_print_epi8(const char * name,__m128i r)56 static inline void u_print_epi8(const char *name, __m128i r)
57 {
58 union { __m128i m; ubyte ub[16]; } u;
59 u.m = r;
60
61 debug_printf("%s: "
62 "%02x/"
63 "%02x/"
64 "%02x/"
65 "%02x/"
66 "%02x/"
67 "%02x/"
68 "%02x/"
69 "%02x/"
70 "%02x/"
71 "%02x/"
72 "%02x/"
73 "%02x/"
74 "%02x/"
75 "%02x/"
76 "%02x/"
77 "%02x\n",
78 name,
79 u.ub[0], u.ub[1], u.ub[2], u.ub[3],
80 u.ub[4], u.ub[5], u.ub[6], u.ub[7],
81 u.ub[8], u.ub[9], u.ub[10], u.ub[11],
82 u.ub[12], u.ub[13], u.ub[14], u.ub[15]);
83 }
84
u_print_epi16(const char * name,__m128i r)85 static inline void u_print_epi16(const char *name, __m128i r)
86 {
87 union { __m128i m; ushort us[8]; } u;
88 u.m = r;
89
90 debug_printf("%s: "
91 "%04x/"
92 "%04x/"
93 "%04x/"
94 "%04x/"
95 "%04x/"
96 "%04x/"
97 "%04x/"
98 "%04x\n",
99 name,
100 u.us[0], u.us[1], u.us[2], u.us[3],
101 u.us[4], u.us[5], u.us[6], u.us[7]);
102 }
103
u_print_epi32(const char * name,__m128i r)104 static inline void u_print_epi32(const char *name, __m128i r)
105 {
106 union { __m128i m; uint ui[4]; } u;
107 u.m = r;
108
109 debug_printf("%s: "
110 "%08x/"
111 "%08x/"
112 "%08x/"
113 "%08x\n",
114 name,
115 u.ui[0], u.ui[1], u.ui[2], u.ui[3]);
116 }
117
u_print_ps(const char * name,__m128 r)118 static inline void u_print_ps(const char *name, __m128 r)
119 {
120 union { __m128 m; float f[4]; } u;
121 u.m = r;
122
123 debug_printf("%s: "
124 "%f/"
125 "%f/"
126 "%f/"
127 "%f\n",
128 name,
129 u.f[0], u.f[1], u.f[2], u.f[3]);
130 }
131
132
133 #define U_DUMP_EPI32(a) u_print_epi32(#a, a)
134 #define U_DUMP_EPI16(a) u_print_epi16(#a, a)
135 #define U_DUMP_EPI8(a) u_print_epi8(#a, a)
136 #define U_DUMP_PS(a) u_print_ps(#a, a)
137
138 /*
139 * Provide an SSE implementation of _mm_mul_epi32() in terms of
140 * _mm_mul_epu32().
141 *
142 * Basically, albeit surprising at first (and second, and third...) look
143 * if a * b is done signed instead of unsigned, can just
144 * subtract b from the high bits of the result if a is negative
145 * (and the same for a if b is negative). Modular arithmetic at its best!
146 *
147 * So for int32 a,b in crude pseudo-code ("*" here denoting a widening mul)
148 * fixupb = (signmask(b) & a) << 32ULL
149 * fixupa = (signmask(a) & b) << 32ULL
150 * a * b = (unsigned)a * (unsigned)b - fixupb - fixupa
151 * = (unsigned)a * (unsigned)b -(fixupb + fixupa)
152 *
153 * This does both lo (dwords 0/2) and hi parts (1/3) at the same time due
154 * to some optimization potential.
155 */
156 static inline __m128i
mm_mullohi_epi32(const __m128i a,const __m128i b,__m128i * res13)157 mm_mullohi_epi32(const __m128i a, const __m128i b, __m128i *res13)
158 {
159 __m128i a13, b13, mul02, mul13;
160 __m128i anegmask, bnegmask, fixup, fixup02, fixup13;
161 a13 = _mm_shuffle_epi32(a, _MM_SHUFFLE(2,3,0,1));
162 b13 = _mm_shuffle_epi32(b, _MM_SHUFFLE(2,3,0,1));
163 anegmask = _mm_srai_epi32(a, 31);
164 bnegmask = _mm_srai_epi32(b, 31);
165 fixup = _mm_add_epi32(_mm_and_si128(anegmask, b),
166 _mm_and_si128(bnegmask, a));
167 mul02 = _mm_mul_epu32(a, b);
168 mul13 = _mm_mul_epu32(a13, b13);
169 fixup02 = _mm_slli_epi64(fixup, 32);
170 fixup13 = _mm_and_si128(fixup, _mm_set_epi32(-1,0,-1,0));
171 *res13 = _mm_sub_epi64(mul13, fixup13);
172 return _mm_sub_epi64(mul02, fixup02);
173 }
174
175
176 /* Provide an SSE2 implementation of _mm_mullo_epi32() in terms of
177 * _mm_mul_epu32().
178 *
179 * This always works regardless the signs of the operands, since
180 * the high bits (which would be different) aren't used.
181 *
182 * This seems close enough to the speed of SSE4 and the real
183 * _mm_mullo_epi32() intrinsic as to not justify adding an sse4
184 * dependency at this point.
185 */
mm_mullo_epi32(const __m128i a,const __m128i b)186 static inline __m128i mm_mullo_epi32(const __m128i a, const __m128i b)
187 {
188 __m128i a4 = _mm_srli_epi64(a, 32); /* shift by one dword */
189 __m128i b4 = _mm_srli_epi64(b, 32); /* shift by one dword */
190 __m128i ba = _mm_mul_epu32(b, a); /* multply dwords 0, 2 */
191 __m128i b4a4 = _mm_mul_epu32(b4, a4); /* multiply dwords 1, 3 */
192
193 /* Interleave the results, either with shuffles or (slightly
194 * faster) direct bit operations:
195 * XXX: might be only true for some cpus (in particular 65nm
196 * Core 2). On most cpus (including that Core 2, but not Nehalem...)
197 * using _mm_shuffle_ps/_mm_shuffle_epi32 might also be faster
198 * than using the 3 instructions below. But logic should be fine
199 * as well, we can't have optimal solution for all cpus (if anything,
200 * should just use _mm_mullo_epi32() if sse41 is available...).
201 */
202 #if 0
203 __m128i ba8 = _mm_shuffle_epi32(ba, 8);
204 __m128i b4a48 = _mm_shuffle_epi32(b4a4, 8);
205 __m128i result = _mm_unpacklo_epi32(ba8, b4a48);
206 #else
207 __m128i mask = _mm_setr_epi32(~0,0,~0,0);
208 __m128i ba_mask = _mm_and_si128(ba, mask);
209 __m128i b4a4_mask_shift = _mm_slli_epi64(b4a4, 32);
210 __m128i result = _mm_or_si128(ba_mask, b4a4_mask_shift);
211 #endif
212
213 return result;
214 }
215
216
217 static inline void
transpose4_epi32(const __m128i * restrict a,const __m128i * restrict b,const __m128i * restrict c,const __m128i * restrict d,__m128i * restrict o,__m128i * restrict p,__m128i * restrict q,__m128i * restrict r)218 transpose4_epi32(const __m128i * restrict a,
219 const __m128i * restrict b,
220 const __m128i * restrict c,
221 const __m128i * restrict d,
222 __m128i * restrict o,
223 __m128i * restrict p,
224 __m128i * restrict q,
225 __m128i * restrict r)
226 {
227 __m128i t0 = _mm_unpacklo_epi32(*a, *b);
228 __m128i t1 = _mm_unpacklo_epi32(*c, *d);
229 __m128i t2 = _mm_unpackhi_epi32(*a, *b);
230 __m128i t3 = _mm_unpackhi_epi32(*c, *d);
231
232 *o = _mm_unpacklo_epi64(t0, t1);
233 *p = _mm_unpackhi_epi64(t0, t1);
234 *q = _mm_unpacklo_epi64(t2, t3);
235 *r = _mm_unpackhi_epi64(t2, t3);
236 }
237
238
239 /*
240 * Same as above, except the first two values are already interleaved
241 * (i.e. contain 64bit values).
242 */
243 static inline void
transpose2_64_2_32(const __m128i * restrict a01,const __m128i * restrict a23,const __m128i * restrict c,const __m128i * restrict d,__m128i * restrict o,__m128i * restrict p,__m128i * restrict q,__m128i * restrict r)244 transpose2_64_2_32(const __m128i * restrict a01,
245 const __m128i * restrict a23,
246 const __m128i * restrict c,
247 const __m128i * restrict d,
248 __m128i * restrict o,
249 __m128i * restrict p,
250 __m128i * restrict q,
251 __m128i * restrict r)
252 {
253 __m128i t0 = *a01;
254 __m128i t1 = _mm_unpacklo_epi32(*c, *d);
255 __m128i t2 = *a23;
256 __m128i t3 = _mm_unpackhi_epi32(*c, *d);
257
258 *o = _mm_unpacklo_epi64(t0, t1);
259 *p = _mm_unpackhi_epi64(t0, t1);
260 *q = _mm_unpacklo_epi64(t2, t3);
261 *r = _mm_unpackhi_epi64(t2, t3);
262 }
263
264
265 #define SCALAR_EPI32(m, i) _mm_shuffle_epi32((m), _MM_SHUFFLE(i,i,i,i))
266
267
268 /*
269 * Implements (1-w)*a + w*b = a - wa + wb = w(b-a) + a
270 * ((b-a)*w >> 8) + a
271 * The math behind negative sub results (logic shift/mask) is tricky.
272 *
273 * w -- weight values
274 * a -- src0 values
275 * b -- src1 values
276 */
277 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi16(__m128i w,__m128i a,__m128i b)278 util_sse2_lerp_epi16(__m128i w, __m128i a, __m128i b)
279 {
280 __m128i res;
281
282 res = _mm_sub_epi16(b, a);
283 res = _mm_mullo_epi16(res, w);
284 res = _mm_srli_epi16(res, 8);
285 /* use add_epi8 instead of add_epi16 so no need to mask off upper bits */
286 res = _mm_add_epi8(res, a);
287
288 return res;
289 }
290
291
292 /* Apply premultiplied-alpha blending on two pixels simultaneously.
293 * All parameters are packed as 8.8 fixed point values in __m128i SSE
294 * registers, with the upper 8 bits all zero.
295 *
296 * a -- src alpha values
297 * d -- dst color values
298 * s -- src color values
299 */
300 static inline __m128i
util_sse2_premul_blend_epi16(__m128i a,__m128i d,__m128i s)301 util_sse2_premul_blend_epi16( __m128i a, __m128i d, __m128i s)
302 {
303 __m128i da, d_sub_da, tmp;
304 tmp = _mm_mullo_epi16(d, a);
305 da = _mm_srli_epi16(tmp, 8);
306 d_sub_da = _mm_sub_epi16(d, da);
307
308 return _mm_add_epi16(s, d_sub_da);
309 }
310
311
312 /* Apply premultiplied-alpha blending on four pixels in packed BGRA
313 * format (one/inv_src_alpha blend mode).
314 *
315 * src -- four pixels (bgra8 format)
316 * dst -- four destination pixels (bgra8)
317 * return -- blended pixels (bgra8)
318 */
319 static ALWAYS_INLINE __m128i
util_sse2_blend_premul_4(const __m128i src,const __m128i dst)320 util_sse2_blend_premul_4(const __m128i src,
321 const __m128i dst)
322 {
323
324 __m128i al, ah, dl, dh, sl, sh, rl, rh;
325 __m128i zero = _mm_setzero_si128();
326
327 /* Blend first two pixels:
328 */
329 sl = _mm_unpacklo_epi8(src, zero);
330 dl = _mm_unpacklo_epi8(dst, zero);
331
332 al = _mm_shufflehi_epi16(sl, 0xff);
333 al = _mm_shufflelo_epi16(al, 0xff);
334
335 rl = util_sse2_premul_blend_epi16(al, dl, sl);
336
337 /* Blend second two pixels:
338 */
339 sh = _mm_unpackhi_epi8(src, zero);
340 dh = _mm_unpackhi_epi8(dst, zero);
341
342 ah = _mm_shufflehi_epi16(sh, 0xff);
343 ah = _mm_shufflelo_epi16(ah, 0xff);
344
345 rh = util_sse2_premul_blend_epi16(ah, dh, sh);
346
347 /* Pack the results down to four bgra8 pixels:
348 */
349 return _mm_packus_epi16(rl, rh);
350 }
351
352
353 /* Apply src-alpha blending on four pixels in packed BGRA
354 * format (srcalpha/inv_src_alpha blend mode).
355 *
356 * src -- four pixels (bgra8 format)
357 * dst -- four destination pixels (bgra8)
358 * return -- blended pixels (bgra8)
359 */
360 static ALWAYS_INLINE __m128i
util_sse2_blend_srcalpha_4(const __m128i src,const __m128i dst)361 util_sse2_blend_srcalpha_4(const __m128i src,
362 const __m128i dst)
363 {
364
365 __m128i al, ah, dl, dh, sl, sh, rl, rh;
366 __m128i zero = _mm_setzero_si128();
367
368 /* Blend first two pixels:
369 */
370 sl = _mm_unpacklo_epi8(src, zero);
371 dl = _mm_unpacklo_epi8(dst, zero);
372
373 al = _mm_shufflehi_epi16(sl, 0xff);
374 al = _mm_shufflelo_epi16(al, 0xff);
375
376 rl = util_sse2_lerp_epi16(al, dl, sl);
377
378 /* Blend second two pixels:
379 */
380 sh = _mm_unpackhi_epi8(src, zero);
381 dh = _mm_unpackhi_epi8(dst, zero);
382
383 ah = _mm_shufflehi_epi16(sh, 0xff);
384 ah = _mm_shufflelo_epi16(ah, 0xff);
385
386 rh = util_sse2_lerp_epi16(ah, dh, sh);
387
388 /* Pack the results down to four bgra8 pixels:
389 */
390 return _mm_packus_epi16(rl, rh);
391 }
392
393
394 /**
395 * premultiplies src with constant alpha then
396 * does one/inv_src_alpha blend.
397 *
398 * src 16xi8 (normalized)
399 * dst 16xi8 (normalized)
400 * cst_alpha (constant alpha (u8 value))
401 */
402 static ALWAYS_INLINE __m128i
util_sse2_blend_premul_src_4(const __m128i src,const __m128i dst,const unsigned cst_alpha)403 util_sse2_blend_premul_src_4(const __m128i src,
404 const __m128i dst,
405 const unsigned cst_alpha)
406 {
407
408 __m128i srca, d, s, rl, rh;
409 __m128i zero = _mm_setzero_si128();
410 __m128i cst_alpha_vec = _mm_set1_epi16(cst_alpha);
411
412 /* Blend first two pixels:
413 */
414 s = _mm_unpacklo_epi8(src, zero);
415 s = _mm_mullo_epi16(s, cst_alpha_vec);
416 /* the shift will cause some precision loss */
417 s = _mm_srli_epi16(s, 8);
418
419 srca = _mm_shufflehi_epi16(s, 0xff);
420 srca = _mm_shufflelo_epi16(srca, 0xff);
421
422 d = _mm_unpacklo_epi8(dst, zero);
423 rl = util_sse2_premul_blend_epi16(srca, d, s);
424
425 /* Blend second two pixels:
426 */
427 s = _mm_unpackhi_epi8(src, zero);
428 s = _mm_mullo_epi16(s, cst_alpha_vec);
429 /* the shift will cause some precision loss */
430 s = _mm_srli_epi16(s, 8);
431
432 srca = _mm_shufflehi_epi16(s, 0xff);
433 srca = _mm_shufflelo_epi16(srca, 0xff);
434
435 d = _mm_unpackhi_epi8(dst, zero);
436 rh = util_sse2_premul_blend_epi16(srca, d, s);
437
438 /* Pack the results down to four bgra8 pixels:
439 */
440 return _mm_packus_epi16(rl, rh);
441 }
442
443
444 /**
445 * Linear interpolation with SSE2.
446 *
447 * dst, src0, src1 are 16 x i8 vectors, with [0..255] normalized values.
448 *
449 * weight_lo and weight_hi should be a 8 x i16 vectors, in 8.8 fixed point
450 * format, for the low and high components.
451 * We'd want to pass these as values but MSVC limitation forces us to pass these
452 * as pointers since it will complain if more than 3 __m128 are passed by value.
453 */
454 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi8_fixed88(__m128i src0,__m128i src1,const __m128i * restrict weight_lo,const __m128i * restrict weight_hi)455 util_sse2_lerp_epi8_fixed88(__m128i src0, __m128i src1,
456 const __m128i * restrict weight_lo,
457 const __m128i * restrict weight_hi)
458 {
459 const __m128i zero = _mm_setzero_si128();
460
461 __m128i src0_lo = _mm_unpacklo_epi8(src0, zero);
462 __m128i src0_hi = _mm_unpackhi_epi8(src0, zero);
463
464 __m128i src1_lo = _mm_unpacklo_epi8(src1, zero);
465 __m128i src1_hi = _mm_unpackhi_epi8(src1, zero);
466
467 __m128i dst_lo;
468 __m128i dst_hi;
469
470 dst_lo = util_sse2_lerp_epi16(*weight_lo, src0_lo, src1_lo);
471 dst_hi = util_sse2_lerp_epi16(*weight_hi, src0_hi, src1_hi);
472
473 return _mm_packus_epi16(dst_lo, dst_hi);
474 }
475
476
477 /**
478 * Linear interpolation with SSE2.
479 *
480 * dst, src0, src1 are 16 x i8 vectors, with [0..255] normalized values.
481 *
482 * weight should be a 16 x i8 vector, in 0.8 fixed point values.
483 */
484 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi8_fixed08(__m128i src0,__m128i src1,__m128i weight)485 util_sse2_lerp_epi8_fixed08(__m128i src0, __m128i src1,
486 __m128i weight)
487 {
488 const __m128i zero = _mm_setzero_si128();
489 __m128i weight_lo = _mm_unpacklo_epi8(weight, zero);
490 __m128i weight_hi = _mm_unpackhi_epi8(weight, zero);
491
492 return util_sse2_lerp_epi8_fixed88(src0, src1,
493 &weight_lo, &weight_hi);
494 }
495
496
497 /**
498 * Linear interpolation with SSE2.
499 *
500 * dst, src0, src1, and weight are 16 x i8 vectors, with [0..255] normalized
501 * values.
502 */
503 static ALWAYS_INLINE __m128i
util_sse2_lerp_unorm8(__m128i src0,__m128i src1,__m128i weight)504 util_sse2_lerp_unorm8(__m128i src0, __m128i src1,
505 __m128i weight)
506 {
507 const __m128i zero = _mm_setzero_si128();
508 __m128i weight_lo = _mm_unpacklo_epi8(weight, zero);
509 __m128i weight_hi = _mm_unpackhi_epi8(weight, zero);
510
511 #if 0
512 /*
513 * Rescale from [0..255] to [0..256].
514 */
515 weight_lo = _mm_add_epi16(weight_lo, _mm_srli_epi16(weight_lo, 7));
516 weight_hi = _mm_add_epi16(weight_hi, _mm_srli_epi16(weight_hi, 7));
517 #endif
518
519 return util_sse2_lerp_epi8_fixed88(src0, src1,
520 &weight_lo, &weight_hi);
521 }
522
523
524 /**
525 * Linear interpolation with SSE2.
526 *
527 * dst, src0, src1, src2, src3 are 16 x i8 vectors, with [0..255] normalized
528 * values.
529 *
530 * ws_lo, ws_hi, wt_lo, wt_hi should be a 8 x i16 vectors, in 8.8 fixed point
531 * format, for the low and high components.
532 * We'd want to pass these as values but MSVC limitation forces us to pass these
533 * as pointers since it will complain if more than 3 __m128 are passed by value.
534 *
535 * This uses ws_lo, ws_hi to interpolate between src0 and src1, as well as to
536 * interpolate between src2 and src3, then uses wt_lo and wt_hi to interpolate
537 * between the resulting vectors.
538 */
539 static ALWAYS_INLINE __m128i
util_sse2_lerp_2d_epi8_fixed88(__m128i src0,__m128i src1,const __m128i * restrict src2,const __m128i * restrict src3,const __m128i * restrict ws_lo,const __m128i * restrict ws_hi,const __m128i * restrict wt_lo,const __m128i * restrict wt_hi)540 util_sse2_lerp_2d_epi8_fixed88(__m128i src0, __m128i src1,
541 const __m128i * restrict src2,
542 const __m128i * restrict src3,
543 const __m128i * restrict ws_lo,
544 const __m128i * restrict ws_hi,
545 const __m128i * restrict wt_lo,
546 const __m128i * restrict wt_hi)
547 {
548 const __m128i zero = _mm_setzero_si128();
549
550 __m128i src0_lo = _mm_unpacklo_epi8(src0, zero);
551 __m128i src0_hi = _mm_unpackhi_epi8(src0, zero);
552
553 __m128i src1_lo = _mm_unpacklo_epi8(src1, zero);
554 __m128i src1_hi = _mm_unpackhi_epi8(src1, zero);
555
556 __m128i src2_lo = _mm_unpacklo_epi8(*src2, zero);
557 __m128i src2_hi = _mm_unpackhi_epi8(*src2, zero);
558
559 __m128i src3_lo = _mm_unpacklo_epi8(*src3, zero);
560 __m128i src3_hi = _mm_unpackhi_epi8(*src3, zero);
561
562 __m128i dst_lo, dst01_lo, dst23_lo;
563 __m128i dst_hi, dst01_hi, dst23_hi;
564
565 dst01_lo = util_sse2_lerp_epi16(*ws_lo, src0_lo, src1_lo);
566 dst01_hi = util_sse2_lerp_epi16(*ws_hi, src0_hi, src1_hi);
567 dst23_lo = util_sse2_lerp_epi16(*ws_lo, src2_lo, src3_lo);
568 dst23_hi = util_sse2_lerp_epi16(*ws_hi, src2_hi, src3_hi);
569
570 dst_lo = util_sse2_lerp_epi16(*wt_lo, dst01_lo, dst23_lo);
571 dst_hi = util_sse2_lerp_epi16(*wt_hi, dst01_hi, dst23_hi);
572
573 return _mm_packus_epi16(dst_lo, dst_hi);
574 }
575
576 /**
577 * Stretch a row of pixels using linear filter.
578 *
579 * Uses Bresenham's line algorithm using 16.16 fixed point representation for
580 * the error term.
581 *
582 * @param dst_width destination width in pixels
583 * @param src_x start x0 in 16.16 fixed point format
584 * @param src_xstep step in 16.16. fixed point format
585 *
586 * @return final src_x value (i.e., src_x + dst_width*src_xstep)
587 */
588 static ALWAYS_INLINE int32_t
util_sse2_stretch_row_8unorm(__m128i * restrict dst,int32_t dst_width,const uint32_t * restrict src,int32_t src_x,int32_t src_xstep)589 util_sse2_stretch_row_8unorm(__m128i * restrict dst,
590 int32_t dst_width,
591 const uint32_t * restrict src,
592 int32_t src_x,
593 int32_t src_xstep)
594 {
595 int16_t error0, error1, error2, error3;
596 __m128i error_lo, error_hi, error_step;
597
598 assert(dst_width >= 0);
599 assert(dst_width % 4 == 0);
600
601 error0 = src_x;
602 error1 = error0 + src_xstep;
603 error2 = error1 + src_xstep;
604 error3 = error2 + src_xstep;
605
606 error_lo = _mm_setr_epi16(error0, error0, error0, error0,
607 error1, error1, error1, error1);
608 error_hi = _mm_setr_epi16(error2, error2, error2, error2,
609 error3, error3, error3, error3);
610 error_step = _mm_set1_epi16(src_xstep << 2);
611
612 dst_width >>= 2;
613 while (dst_width) {
614 uint16_t src_x0;
615 uint16_t src_x1;
616 uint16_t src_x2;
617 uint16_t src_x3;
618 __m128i src0, src1;
619 __m128i weight_lo, weight_hi;
620
621 /*
622 * It is faster to re-compute the coordinates in the scalar integer unit here,
623 * than to fetch the values from the SIMD integer unit.
624 */
625
626 src_x0 = src_x >> 16;
627 src_x += src_xstep;
628 src_x1 = src_x >> 16;
629 src_x += src_xstep;
630 src_x2 = src_x >> 16;
631 src_x += src_xstep;
632 src_x3 = src_x >> 16;
633 src_x += src_xstep;
634
635 /*
636 * Fetch pairs of pixels 64bit at a time, and then swizzle them inplace.
637 */
638
639 {
640 __m128i src_00_10 = _mm_loadl_epi64((const __m128i *)&src[src_x0]);
641 __m128i src_01_11 = _mm_loadl_epi64((const __m128i *)&src[src_x1]);
642 __m128i src_02_12 = _mm_loadl_epi64((const __m128i *)&src[src_x2]);
643 __m128i src_03_13 = _mm_loadl_epi64((const __m128i *)&src[src_x3]);
644
645 __m128i src_00_01_10_11 = _mm_unpacklo_epi32(src_00_10, src_01_11);
646 __m128i src_02_03_12_13 = _mm_unpacklo_epi32(src_02_12, src_03_13);
647
648 src0 = _mm_unpacklo_epi64(src_00_01_10_11, src_02_03_12_13);
649 src1 = _mm_unpackhi_epi64(src_00_01_10_11, src_02_03_12_13);
650 }
651
652 weight_lo = _mm_srli_epi16(error_lo, 8);
653 weight_hi = _mm_srli_epi16(error_hi, 8);
654
655 *dst = util_sse2_lerp_epi8_fixed88(src0, src1,
656 &weight_lo, &weight_hi);
657
658 error_lo = _mm_add_epi16(error_lo, error_step);
659 error_hi = _mm_add_epi16(error_hi, error_step);
660
661 ++dst;
662 --dst_width;
663 }
664
665 return src_x;
666 }
667
668
669
670 #endif /* PIPE_ARCH_SSE */
671
672 #endif /* U_SSE_H_ */
673