• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 namespace {  // NOLINT(google-build-namespaces)
9 
widen()10 inline Sk4px::Wide Sk4px::widen() const {
11     return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()),
12                  _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128()));
13 }
14 
mulWiden(const Sk16b & other)15 inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
16     return this->widen() * Sk4px(other).widen();
17 }
18 
addNarrowHi(const Sk16h & other)19 inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
20     Sk4px::Wide r = (*this + other) >> 8;
21     return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
22 }
23 
div255()24 inline Sk4px Sk4px::Wide::div255() const {
25     // (x + 127) / 255 == ((x+128) * 257)>>16,
26     // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
27     const __m128i _128 = _mm_set1_epi16(128),
28                   _257 = _mm_set1_epi16(257);
29     return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
30                                   _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
31 }
32 
33 // Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
34 // These are safe on x86, often with no speed penalty.
35 
36 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
alphas()37     inline Sk4px Sk4px::alphas() const {
38         static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
39         __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3);
40         return Sk16b(_mm_shuffle_epi8(this->fVec, splat));
41     }
42 
Load4Alphas(const SkAlpha a[4])43     inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
44         uint32_t as;
45         memcpy(&as, a, 4);
46         __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
47         return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat));
48     }
49 #else
alphas()50     inline Sk4px Sk4px::alphas() const {
51         static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
52         // We exploit that A >= rgb for any premul pixel.
53         __m128i as = fVec;                             // 3xxx 2xxx 1xxx 0xxx
54         as = _mm_max_epu8(as, _mm_srli_epi32(as,  8)); // 33xx 22xx 11xx 00xx
55         as = _mm_max_epu8(as, _mm_srli_epi32(as, 16)); // 3333 2222 1111 0000
56         return Sk16b(as);
57     }
58 
Load4Alphas(const SkAlpha a[4])59     inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
60         __m128i as;
61         memcpy(&as, a, 4);                   // ____ ____ ____ 3210
62         as = _mm_unpacklo_epi8 (as, as);     // ____ ____ 3322 1100
63         as = _mm_unpacklo_epi16(as, as);     // 3333 2222 1111 0000
64         return Sk16b(as);
65     }
66 #endif
67 
Load2Alphas(const SkAlpha a[2])68 inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
69     uint16_t alphas;
70     memcpy(&alphas, a, 2);
71     uint32_t alphas_and_two_zeros = alphas;   // Aa -> Aa00
72 
73     return Load4Alphas((const SkAlpha*)&alphas_and_two_zeros);
74 }
75 
76 }  // namespace
77