• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2006 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkColorPriv_DEFINED
9 #define SkColorPriv_DEFINED
10 
11 #include "include/core/SkColor.h"
12 #include "include/core/SkScalar.h"
13 #include "include/core/SkTypes.h"
14 #include "include/private/base/SkCPUTypes.h"
15 #include "include/private/base/SkMath.h"
16 #include "include/private/base/SkTPin.h"
17 #include "include/private/base/SkTo.h"
18 
19 #include <algorithm>
20 #include <cstdint>
21 
22 /** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a
23     byte into a scale value, so that we can say scale * value >> 8 instead of
24     alpha * value / 255.
25 
26     In debugging, asserts that alpha is 0..255
27 */
SkAlpha255To256(U8CPU alpha)28 static inline unsigned SkAlpha255To256(U8CPU alpha) {
29     SkASSERT(SkToU8(alpha) == alpha);
30     // this one assues that blending on top of an opaque dst keeps it that way
31     // even though it is less accurate than a+(a>>7) for non-opaque dsts
32     return alpha + 1;
33 }
34 
35 /** Multiplify value by 0..256, and shift the result down 8
36     (i.e. return (value * alpha256) >> 8)
37  */
38 #define SkAlphaMul(value, alpha256)     (((value) * (alpha256)) >> 8)
39 
SkUnitScalarClampToByte(SkScalar x)40 static inline U8CPU SkUnitScalarClampToByte(SkScalar x) {
41     return static_cast<U8CPU>(SkTPin(x, 0.0f, 1.0f) * 255 + 0.5);
42 }
43 
44 #define SK_A32_BITS     8
45 #define SK_R32_BITS     8
46 #define SK_G32_BITS     8
47 #define SK_B32_BITS     8
48 
49 #define SK_A32_MASK     ((1 << SK_A32_BITS) - 1)
50 #define SK_R32_MASK     ((1 << SK_R32_BITS) - 1)
51 #define SK_G32_MASK     ((1 << SK_G32_BITS) - 1)
52 #define SK_B32_MASK     ((1 << SK_B32_BITS) - 1)
53 
54 /*
55  *  Skia's 32bit backend only supports 1 swizzle order at a time (compile-time).
56  *  This is specified by SK_R32_SHIFT=0 or SK_R32_SHIFT=16.
57  *
58  *  For easier compatibility with Skia's GPU backend, we further restrict these
59  *  to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does
60  *  not directly correspond to the same shift-order, since we have to take endianess
61  *  into account.
62  *
63  *  Here we enforce this constraint.
64  */
65 
66 #define SK_RGBA_R32_SHIFT   0
67 #define SK_RGBA_G32_SHIFT   8
68 #define SK_RGBA_B32_SHIFT   16
69 #define SK_RGBA_A32_SHIFT   24
70 
71 #define SK_BGRA_B32_SHIFT   0
72 #define SK_BGRA_G32_SHIFT   8
73 #define SK_BGRA_R32_SHIFT   16
74 #define SK_BGRA_A32_SHIFT   24
75 
76 #if defined(SK_PMCOLOR_IS_RGBA) || defined(SK_PMCOLOR_IS_BGRA)
77     #error "Configure PMCOLOR by setting SK_R32_SHIFT."
78 #endif
79 
80 // Deduce which SK_PMCOLOR_IS_ to define from the _SHIFT defines
81 
82 #if (SK_A32_SHIFT == SK_RGBA_A32_SHIFT && \
83      SK_R32_SHIFT == SK_RGBA_R32_SHIFT && \
84      SK_G32_SHIFT == SK_RGBA_G32_SHIFT && \
85      SK_B32_SHIFT == SK_RGBA_B32_SHIFT)
86     #define SK_PMCOLOR_IS_RGBA
87 #elif (SK_A32_SHIFT == SK_BGRA_A32_SHIFT && \
88        SK_R32_SHIFT == SK_BGRA_R32_SHIFT && \
89        SK_G32_SHIFT == SK_BGRA_G32_SHIFT && \
90        SK_B32_SHIFT == SK_BGRA_B32_SHIFT)
91     #define SK_PMCOLOR_IS_BGRA
92 #else
93     #error "need 32bit packing to be either RGBA or BGRA"
94 #endif
95 
96 #define SkGetPackedA32(packed)      ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24)
97 #define SkGetPackedR32(packed)      ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24)
98 #define SkGetPackedG32(packed)      ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24)
99 #define SkGetPackedB32(packed)      ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24)
100 
101 #define SkA32Assert(a)  SkASSERT((unsigned)(a) <= SK_A32_MASK)
102 #define SkR32Assert(r)  SkASSERT((unsigned)(r) <= SK_R32_MASK)
103 #define SkG32Assert(g)  SkASSERT((unsigned)(g) <= SK_G32_MASK)
104 #define SkB32Assert(b)  SkASSERT((unsigned)(b) <= SK_B32_MASK)
105 
106 /**
107  *  Pack the components into a SkPMColor
108  */
SkPackARGB32(U8CPU a,U8CPU r,U8CPU g,U8CPU b)109 static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
110     SkA32Assert(a);
111     SkR32Assert(r);
112     SkG32Assert(g);
113     SkB32Assert(b);
114 
115     return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
116            (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
117 }
118 
119 static inline
SkPremultiplyARGBInline(U8CPU a,U8CPU r,U8CPU g,U8CPU b)120 SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
121     SkA32Assert(a);
122     SkR32Assert(r);
123     SkG32Assert(g);
124     SkB32Assert(b);
125 
126     if (a != 255) {
127         r = SkMulDiv255Round(r, a);
128         g = SkMulDiv255Round(g, a);
129         b = SkMulDiv255Round(b, a);
130     }
131     return SkPackARGB32(a, r, g, b);
132 }
133 
134 // When Android is compiled optimizing for size, SkAlphaMulQ doesn't get
135 // inlined; forcing inlining significantly improves performance.
SkAlphaMulQ(uint32_t c,unsigned scale)136 static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) {
137     static constexpr uint32_t kMask = 0x00FF00FF;
138 
139     uint32_t rb = ((c & kMask) * scale) >> 8;
140     uint32_t ag = ((c >> 8) & kMask) * scale;
141     return (rb & kMask) | (ag & ~kMask);
142 }
143 
SkPMSrcOver(SkPMColor src,SkPMColor dst)144 static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) {
145     uint32_t scale = SkAlpha255To256(255 - SkGetPackedA32(src));
146 
147     static constexpr uint32_t kMask = 0x00FF00FF;
148     uint32_t rb = (((dst & kMask) * scale) >> 8) & kMask;
149     uint32_t ag = (((dst >> 8) & kMask) * scale) & ~kMask;
150 
151     rb += (src &  kMask);
152     ag += (src & ~kMask);
153 
154     // Color channels (but not alpha) can overflow, so we have to saturate to 0xFF in each lane.
155     return std::min(rb & 0x000001FF, 0x000000FFU) |
156            std::min(ag & 0x0001FF00, 0x0000FF00U) |
157            std::min(rb & 0x01FF0000, 0x00FF0000U) |
158                    (ag & 0xFF000000);
159 }
160 
161 #endif
162