1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SkShaderBase_DEFINED
9 #define SkShaderBase_DEFINED
10
11 #include "include/core/SkMatrix.h"
12 #include "include/core/SkPaint.h"
13 #include "include/core/SkSamplingOptions.h"
14 #include "include/core/SkShader.h"
15 #include "include/core/SkSurfaceProps.h"
16 #include "include/private/base/SkNoncopyable.h"
17 #include "src/base/SkTLazy.h"
18 #include "src/core/SkEffectPriv.h"
19 #include "src/core/SkMask.h"
20 #include "src/core/SkVM_fwd.h"
21
22 class GrFragmentProcessor;
23 struct GrFPArgs;
24 class SkArenaAlloc;
25 class SkColorSpace;
26 class SkImage;
27 struct SkImageInfo;
28 class SkPaint;
29 class SkRasterPipeline;
30 class SkRuntimeEffect;
31 class SkStageUpdater;
32 class SkUpdatableShader;
33
34 namespace skgpu::graphite {
35 class KeyContext;
36 class PaintParamsKeyBuilder;
37 class PipelineDataGatherer;
38 }
39
40 #if defined(SK_GANESH)
41 using GrFPResult = std::tuple<bool /*success*/, std::unique_ptr<GrFragmentProcessor>>;
42 #endif
43
44 class SkShaderBase : public SkShader {
45 public:
46 ~SkShaderBase() override;
47
48 sk_sp<SkShader> makeInvertAlpha() const;
49 sk_sp<SkShader> makeWithCTM(const SkMatrix&) const; // owns its own ctm
50
51 /**
52 * Returns true if the shader is guaranteed to produce only a single color.
53 * Subclasses can override this to allow loop-hoisting optimization.
54 */
isConstant()55 virtual bool isConstant() const { return false; }
56
57 enum class GradientType {
58 kNone,
59 kColor,
60 kLinear,
61 kRadial,
62 kSweep,
63 kConical
64 };
65
66 /**
67 * If the shader subclass can be represented as a gradient, asGradient
68 * returns the matching GradientType enum (or GradientType::kNone if it
69 * cannot). Also, if info is not null, asGradient populates info with
70 * the relevant (see below) parameters for the gradient. fColorCount
71 * is both an input and output parameter. On input, it indicates how
72 * many entries in fColors and fColorOffsets can be used, if they are
73 * non-NULL. After asGradient has run, fColorCount indicates how
74 * many color-offset pairs there are in the gradient. If there is
75 * insufficient space to store all of the color-offset pairs, fColors
76 * and fColorOffsets will not be altered. fColorOffsets specifies
77 * where on the range of 0 to 1 to transition to the given color.
78 * The meaning of fPoint and fRadius is dependent on the type of gradient.
79 *
80 * None:
81 * info is ignored.
82 * Color:
83 * fColorOffsets[0] is meaningless.
84 * Linear:
85 * fPoint[0] and fPoint[1] are the end-points of the gradient
86 * Radial:
87 * fPoint[0] and fRadius[0] are the center and radius
88 * Conical:
89 * fPoint[0] and fRadius[0] are the center and radius of the 1st circle
90 * fPoint[1] and fRadius[1] are the center and radius of the 2nd circle
91 * Sweep:
92 * fPoint[0] is the center of the sweep.
93 */
94 struct GradientInfo {
95 int fColorCount = 0; //!< In-out parameter, specifies passed size
96 // of fColors/fColorOffsets on input, and
97 // actual number of colors/offsets on
98 // output.
99 SkColor* fColors = nullptr; //!< The colors in the gradient.
100 SkScalar* fColorOffsets = nullptr; //!< The unit offset for color transitions.
101 SkPoint fPoint[2]; //!< Type specific, see above.
102 SkScalar fRadius[2]; //!< Type specific, see above.
103 SkTileMode fTileMode;
104 uint32_t fGradientFlags = 0; //!< see SkGradientShader::Flags
105 };
106
107 virtual GradientType asGradient(GradientInfo* info = nullptr,
108 SkMatrix* localMatrix = nullptr) const {
109 return GradientType::kNone;
110 }
111
112 enum Flags {
113 //!< set if all of the colors will be opaque
114 kOpaqueAlpha_Flag = 1 << 0,
115
116 /** set if the spans only vary in X (const in Y).
117 e.g. an Nx1 bitmap that is being tiled in Y, or a linear-gradient
118 that varies from left-to-right. This flag specifies this for
119 shadeSpan().
120 */
121 kConstInY32_Flag = 1 << 1,
122
123 /** hint for the blitter that 4f is the preferred shading mode.
124 */
125 kPrefers4f_Flag = 1 << 2,
126 };
127
128 /**
129 * ContextRec acts as a parameter bundle for creating Contexts.
130 */
131 struct ContextRec {
ContextRecContextRec132 ContextRec(const SkPaint& paint, const SkMatrix& matrix, const SkMatrix* localM,
133 SkColorType dstColorType, SkColorSpace* dstColorSpace, SkSurfaceProps props)
134 : fMatrix(&matrix)
135 , fLocalMatrix(localM)
136 , fDstColorType(dstColorType)
137 , fDstColorSpace(dstColorSpace)
138 , fProps(props) {
139 fPaintAlpha = paint.getAlpha();
140 fPaintDither = paint.isDither();
141 }
142
143 const SkMatrix* fMatrix; // the current matrix in the canvas
144 const SkMatrix* fLocalMatrix; // optional local matrix
145 SkColorType fDstColorType; // the color type of the dest surface
146 SkColorSpace* fDstColorSpace; // the color space of the dest surface (if any)
147 SkSurfaceProps fProps; // props of the dest surface
148 SkAlpha fPaintAlpha;
149 bool fPaintDither;
150
151 bool isLegacyCompatible(SkColorSpace* shadersColorSpace) const;
152 };
153
154 class Context : public ::SkNoncopyable {
155 public:
156 Context(const SkShaderBase& shader, const ContextRec&);
157
158 virtual ~Context();
159
160 /**
161 * Called sometimes before drawing with this shader. Return the type of
162 * alpha your shader will return. The default implementation returns 0.
163 * Your subclass should override if it can (even sometimes) report a
164 * non-zero value, since that will enable various blitters to perform
165 * faster.
166 */
getFlags()167 virtual uint32_t getFlags() const { return 0; }
168
169 /**
170 * Called for each span of the object being drawn. Your subclass should
171 * set the appropriate colors (with premultiplied alpha) that correspond
172 * to the specified device coordinates.
173 */
174 virtual void shadeSpan(int x, int y, SkPMColor[], int count) = 0;
175
176 protected:
177 // Reference to shader, so we don't have to dupe information.
178 const SkShaderBase& fShader;
179
getPaintAlpha()180 uint8_t getPaintAlpha() const { return fPaintAlpha; }
getTotalInverse()181 const SkMatrix& getTotalInverse() const { return fTotalInverse; }
getCTM()182 const SkMatrix& getCTM() const { return fCTM; }
183
184 private:
185 SkMatrix fCTM;
186 SkMatrix fTotalInverse;
187 uint8_t fPaintAlpha;
188
189 using INHERITED = SkNoncopyable;
190 };
191
192 /**
193 * This is used to accumulate matrices, starting with the CTM, when building up
194 * SkRasterPipeline, SkVM, and GrFragmentProcessor by walking the SkShader tree. It avoids
195 * adding a matrix multiply for each individual matrix. It also handles the reverse matrix
196 * concatenation order required by Android Framework, see b/256873449.
197 *
198 * This also tracks the dubious concept of a "total matrix", which includes all the matrices
199 * encountered during traversal to the current shader, including ones that have already been
200 * applied. The total matrix represents the transformation from the current shader's coordinate
201 * space to device space. It is dubious because it doesn't account for SkShaders that manipulate
202 * the coordinates passed to their children, which may not even be representable by a matrix.
203 *
204 * The total matrix is used for mipmap level selection and a filter downgrade optimizations in
205 * SkImageShader and sizing of the SkImage created by SkPictureShader. If we can remove usages
206 * of the "total matrix" and if Android Framework could be updated to not use backwards local
207 * matrix concatenation this could just be replaced by a simple SkMatrix or SkM44 passed down
208 * during traversal.
209 */
210 class MatrixRec {
211 public:
212 MatrixRec() = default;
213
214 explicit MatrixRec(const SkMatrix& ctm);
215
216 /**
217 * Returns a new MatrixRec that represents the existing total and pending matrix
218 * pre-concat'ed with m.
219 */
220 MatrixRec SK_WARN_UNUSED_RESULT concat(const SkMatrix& m) const;
221
222 /**
223 * Appends a mul by the inverse of the pending local matrix to the pipeline. 'postInv' is an
224 * additional matrix to post-apply to the inverted pending matrix. If the pending matrix is
225 * not invertible the std::optional result won't have a value and the pipeline will be
226 * unmodified.
227 */
228 std::optional<MatrixRec> SK_WARN_UNUSED_RESULT apply(const SkStageRec& rec,
229 const SkMatrix& postInv = {}) const;
230
231 /**
232 * Muls local by the inverse of the pending matrix. 'postInv' is an additional matrix to
233 * post-apply to the inverted pending matrix. If the pending matrix is not invertible the
234 * std::optional result won't have a value and the Builder will be unmodified.
235 */
236 std::optional<MatrixRec> SK_WARN_UNUSED_RESULT apply(skvm::Builder*,
237 skvm::Coord* local, // inout
238 skvm::Uniforms*,
239 const SkMatrix& postInv = {}) const;
240
241 #if defined(SK_GANESH)
242 /**
243 * Produces an FP that muls its input coords by the inverse of the pending matrix and then
244 * samples the passed FP with those coordinates. 'postInv' is an additional matrix to
245 * post-apply to the inverted pending matrix. If the pending matrix is not invertible the
246 * GrFPResult's bool will be false and the passed FP will be returned to the caller in the
247 * GrFPResult.
248 */
249 GrFPResult SK_WARN_UNUSED_RESULT apply(std::unique_ptr<GrFragmentProcessor>,
250 const SkMatrix& postInv = {}) const;
251 /**
252 * A parent FP may need to create a FP for its child by calling
253 * SkShaderBase::asFragmentProcessor() and then pass the result to the apply() above.
254 * This comes up when the parent needs to ensure pending matrices are applied before the
255 * child because the parent is going to manipulate the coordinates *after* any pending
256 * matrix and pass the resulting coords to the child. This function gets a MatrixRec that
257 * reflects the state after this MatrixRec has bee applied but it does not apply it!
258 * Example:
259 * auto childFP = fChild->asFragmentProcessor(args, mrec.applied());
260 * childFP = MakeAWrappingFPThatModifiesChildsCoords(std::move(childFP));
261 * auto [success, parentFP] = mrec.apply(std::move(childFP));
262 */
263 MatrixRec applied() const;
264 #endif
265
266 /** Call to indicate that the mapping from shader to device space is not known. */
markTotalMatrixInvalid()267 void markTotalMatrixInvalid() { fTotalMatrixIsValid = false; }
268
269 /** Marks the CTM as already applied; can avoid re-seeding the shader unnecessarily. */
markCTMApplied()270 void markCTMApplied() { fCTMApplied = true; }
271
272 /**
273 * Indicates whether the total matrix of a MatrixRec passed to a SkShader actually
274 * represents the full transform between that shader's coordinate space and device space.
275 */
totalMatrixIsValid()276 bool totalMatrixIsValid() const { return fTotalMatrixIsValid; }
277
278 /**
279 * Gets the total transform from the current shader's space to device space. This may or
280 * may not be valid. Shaders should avoid making decisions based on this matrix if
281 * totalMatrixIsValid() is false.
282 */
totalMatrix()283 SkMatrix totalMatrix() const { return SkMatrix::Concat(fCTM, fTotalLocalMatrix); }
284
285 /** Gets the inverse of totalMatrix(), if invertible. */
totalInverse(SkMatrix * out)286 bool SK_WARN_UNUSED_RESULT totalInverse(SkMatrix* out) const {
287 return this->totalMatrix().invert(out);
288 }
289
290 /** Is there a transform that has not yet been applied by a parent shader? */
hasPendingMatrix()291 bool hasPendingMatrix() const {
292 return (!fCTMApplied && !fCTM.isIdentity()) || !fPendingLocalMatrix.isIdentity();
293 }
294
295 /** When generating raster pipeline, have the device coordinates been seeded? */
rasterPipelineCoordsAreSeeded()296 bool rasterPipelineCoordsAreSeeded() const { return fCTMApplied; }
297
298 private:
MatrixRec(const SkMatrix & ctm,const SkMatrix & totalLocalMatrix,const SkMatrix & pendingLocalMatrix,bool totalIsValid,bool ctmApplied)299 MatrixRec(const SkMatrix& ctm,
300 const SkMatrix& totalLocalMatrix,
301 const SkMatrix& pendingLocalMatrix,
302 bool totalIsValid,
303 bool ctmApplied)
304 : fCTM(ctm)
305 , fTotalLocalMatrix(totalLocalMatrix)
306 , fPendingLocalMatrix(pendingLocalMatrix)
307 , fTotalMatrixIsValid(totalIsValid)
308 , fCTMApplied(ctmApplied) {}
309
310 const SkMatrix fCTM;
311
312 // Concatenation of all local matrices, including those already applied.
313 const SkMatrix fTotalLocalMatrix;
314
315 // The accumulated local matrices from walking down the shader hierarchy that have NOT yet
316 // been incorporated into the SkRasterPipeline.
317 const SkMatrix fPendingLocalMatrix;
318
319 bool fTotalMatrixIsValid = true;
320
321 // Tracks whether the CTM has already been applied (and in raster pipeline whether the
322 // device coords have been seeded.)
323 bool fCTMApplied = false;
324 };
325
326 /**
327 * Make a context using the memory provided by the arena.
328 *
329 * @return pointer to context or nullptr if can't be created
330 */
331 Context* makeContext(const ContextRec&, SkArenaAlloc*) const;
332
333 #if defined(SK_GANESH)
334 /**
335 * Call on the root SkShader to produce a GrFragmentProcessor.
336 *
337 * The returned GrFragmentProcessor expects an unpremultiplied input color and produces a
338 * premultiplied output.
339 */
340 std::unique_ptr<GrFragmentProcessor> asRootFragmentProcessor(const GrFPArgs&,
341 const SkMatrix& ctm) const;
342 /**
343 * Virtualized implementation of above. Any pending matrix in the MatrixRec should be applied
344 * to the coords if the SkShader uses its coordinates. This can be done by calling
345 * MatrixRec::apply() to wrap a GrFragmentProcessor in a GrMatrixEffect.
346 */
347 virtual std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
348 const MatrixRec&) const;
349 #endif
350
351 /**
352 * If the shader can represent its "average" luminance in a single color, return true and
353 * if color is not NULL, return that color. If it cannot, return false and ignore the color
354 * parameter.
355 *
356 * Note: if this returns true, the returned color will always be opaque, as only the RGB
357 * components are used to compute luminance.
358 */
359 bool asLuminanceColor(SkColor*) const;
360
361 /**
362 * If this returns false, then we draw nothing (do not fall back to shader context). This should
363 * only be called on a root-level effect. It assumes that the initial device coordinates have
364 * not yet been seeded.
365 */
366 SK_WARN_UNUSED_RESULT
367 bool appendRootStages(const SkStageRec& rec, const SkMatrix& ctm) const;
368
369 /**
370 * Adds stages to implement this shader. To ensure that the correct input coords are present
371 * in r,g MatrixRec::apply() must be called (unless the shader doesn't require it's input
372 * coords). The default impl creates shadercontext and calls that (not very efficient).
373 */
374 virtual bool appendStages(const SkStageRec&, const MatrixRec&) const;
375
376 bool SK_WARN_UNUSED_RESULT computeTotalInverse(const SkMatrix& ctm,
377 const SkMatrix* localMatrix,
378 SkMatrix* totalInverse) const;
379
onIsAImage(SkMatrix *,SkTileMode[2])380 virtual SkImage* onIsAImage(SkMatrix*, SkTileMode[2]) const {
381 return nullptr;
382 }
383
asRuntimeEffect()384 virtual SkRuntimeEffect* asRuntimeEffect() const { return nullptr; }
385
GetFlattenableType()386 static Type GetFlattenableType() { return kSkShader_Type; }
getFlattenableType()387 Type getFlattenableType() const override { return GetFlattenableType(); }
388
389 static sk_sp<SkShaderBase> Deserialize(const void* data, size_t size,
390 const SkDeserialProcs* procs = nullptr) {
391 return sk_sp<SkShaderBase>(static_cast<SkShaderBase*>(
392 SkFlattenable::Deserialize(GetFlattenableType(), data, size, procs).release()));
393 }
394 static void RegisterFlattenables();
395
396 /** DEPRECATED. skbug.com/8941
397 * If this shader can be represented by another shader + a localMatrix, return that shader and
398 * the localMatrix. If not, return nullptr and ignore the localMatrix parameter.
399 */
400 virtual sk_sp<SkShader> makeAsALocalMatrixShader(SkMatrix* localMatrix) const;
401
402 /**
403 * Called at the root of a shader tree to build a VM that produces color. The device coords
404 * should be initialized to the centers of device space pixels being shaded and the inverse of
405 * ctm should be the transform of those coords to local space.
406 */
407 SK_WARN_UNUSED_RESULT
408 skvm::Color rootProgram(skvm::Builder*,
409 skvm::Coord device,
410 skvm::Color paint,
411 const SkMatrix& ctm,
412 const SkColorInfo& dst,
413 skvm::Uniforms* uniforms,
414 SkArenaAlloc* alloc) const;
415
416 /**
417 * Virtualized implementation of above. A note on the local coords param: it must be transformed
418 * by the inverse of the "pending" matrix in MatrixRec to be put in the correct space for this
419 * shader. This is done by calling MatrixRec::apply().
420 */
421 virtual skvm::Color program(skvm::Builder*,
422 skvm::Coord device,
423 skvm::Coord local,
424 skvm::Color paint,
425 const MatrixRec&,
426 const SkColorInfo& dst,
427 skvm::Uniforms*,
428 SkArenaAlloc*) const = 0;
429
430 #if defined(SK_GRAPHITE)
431 /**
432 Add implementation details, for the specified backend, of this SkShader to the
433 provided key.
434
435 @param keyContext backend context for key creation
436 @param builder builder for creating the key for this SkShader
437 @param gatherer if non-null, storage for this shader's data
438 */
439 virtual void addToKey(const skgpu::graphite::KeyContext& keyContext,
440 skgpu::graphite::PaintParamsKeyBuilder* builder,
441 skgpu::graphite::PipelineDataGatherer* gatherer) const;
442 #endif
443
ConcatLocalMatrices(const SkMatrix & parentLM,const SkMatrix & childLM)444 static SkMatrix ConcatLocalMatrices(const SkMatrix& parentLM, const SkMatrix& childLM) {
445 #if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) // b/256873449
446 return SkMatrix::Concat(childLM, parentLM);
447 #endif
448 return SkMatrix::Concat(parentLM, childLM);
449 }
450
451 protected:
452 SkShaderBase();
453
454 void flatten(SkWriteBuffer&) const override;
455
456 #ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
457 /**
458 * Specialize creating a SkShader context using the supplied allocator.
459 * @return pointer to context owned by the arena allocator.
460 */
onMakeContext(const ContextRec &,SkArenaAlloc *)461 virtual Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const {
462 return nullptr;
463 }
464 #endif
465
onAsLuminanceColor(SkColor *)466 virtual bool onAsLuminanceColor(SkColor*) const {
467 return false;
468 }
469
470 protected:
471 static skvm::Coord ApplyMatrix(skvm::Builder*, const SkMatrix&, skvm::Coord, skvm::Uniforms*);
472
473 using INHERITED = SkShader;
474 };
as_SB(SkShader * shader)475 inline SkShaderBase* as_SB(SkShader* shader) {
476 return static_cast<SkShaderBase*>(shader);
477 }
478
as_SB(const SkShader * shader)479 inline const SkShaderBase* as_SB(const SkShader* shader) {
480 return static_cast<const SkShaderBase*>(shader);
481 }
482
as_SB(const sk_sp<SkShader> & shader)483 inline const SkShaderBase* as_SB(const sk_sp<SkShader>& shader) {
484 return static_cast<SkShaderBase*>(shader.get());
485 }
486
487 void SkRegisterColor4ShaderFlattenable();
488 void SkRegisterColorShaderFlattenable();
489 void SkRegisterComposeShaderFlattenable();
490 void SkRegisterCoordClampShaderFlattenable();
491 void SkRegisterEmptyShaderFlattenable();
492
493 #endif // SkShaderBase_DEFINED
494