1 #include "SkBlitRow_opts_SSE4.h"
2
3 // Some compilers can't compile SSSE3 or SSE4 intrinsics. We give them stub methods.
4 // The stubs should never be called, so we make them crash just to confirm that.
5 #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSE41
S32A_Opaque_BlitRow32_SSE4(SkPMColor * SK_RESTRICT,const SkPMColor * SK_RESTRICT,int,U8CPU)6 void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT, const SkPMColor* SK_RESTRICT, int, U8CPU) {
7 sk_throw();
8 }
9
10 #else
11
12 #include <smmintrin.h> // SSE4.1 intrinsics
13 #include "SkColorPriv.h"
14 #include "SkColor_opts_SSE2.h"
15
S32A_Opaque_BlitRow32_SSE4(SkPMColor * SK_RESTRICT dst,const SkPMColor * SK_RESTRICT src,int count,U8CPU alpha)16 void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst,
17 const SkPMColor* SK_RESTRICT src,
18 int count,
19 U8CPU alpha) {
20 SkASSERT(alpha == 255);
21 // As long as we can, we'll work on 16 pixel pairs at once.
22 int count16 = count / 16;
23 __m128i* dst4 = (__m128i*)dst;
24 const __m128i* src4 = (const __m128i*)src;
25
26 for (int i = 0; i < count16 * 4; i += 4) {
27 // Load 16 source pixels.
28 __m128i s0 = _mm_loadu_si128(src4+i+0),
29 s1 = _mm_loadu_si128(src4+i+1),
30 s2 = _mm_loadu_si128(src4+i+2),
31 s3 = _mm_loadu_si128(src4+i+3);
32
33 const __m128i alphaMask = _mm_set1_epi32(0xFF << SK_A32_SHIFT);
34 const __m128i ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0)));
35 if (_mm_testz_si128(ORed, alphaMask)) {
36 // All 16 source pixels are fully transparent. There's nothing to do!
37 continue;
38 }
39 const __m128i ANDed = _mm_and_si128(s3, _mm_and_si128(s2, _mm_and_si128(s1, s0)));
40 if (_mm_testc_si128(ANDed, alphaMask)) {
41 // All 16 source pixels are fully opaque. There's no need to read dst or blend it.
42 _mm_storeu_si128(dst4+i+0, s0);
43 _mm_storeu_si128(dst4+i+1, s1);
44 _mm_storeu_si128(dst4+i+2, s2);
45 _mm_storeu_si128(dst4+i+3, s3);
46 continue;
47 }
48 // The general slow case: do the blend for all 16 pixels.
49 _mm_storeu_si128(dst4+i+0, SkPMSrcOver_SSE2(s0, _mm_loadu_si128(dst4+i+0)));
50 _mm_storeu_si128(dst4+i+1, SkPMSrcOver_SSE2(s1, _mm_loadu_si128(dst4+i+1)));
51 _mm_storeu_si128(dst4+i+2, SkPMSrcOver_SSE2(s2, _mm_loadu_si128(dst4+i+2)));
52 _mm_storeu_si128(dst4+i+3, SkPMSrcOver_SSE2(s3, _mm_loadu_si128(dst4+i+3)));
53 }
54
55 // Wrap up the last <= 15 pixels.
56 for (int i = count16*16; i < count; i++) {
57 // This check is not really necessarily, but it prevents pointless autovectorization.
58 if (src[i] & 0xFF000000) {
59 dst[i] = SkPMSrcOver(src[i], dst[i]);
60 }
61 }
62 }
63
64 #endif
65