1 /*
2 * Copyright 2013 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGeometryProcessor_DEFINED
9 #define GrGeometryProcessor_DEFINED
10
11 #include "src/gpu/GrColor.h"
12 #include "src/gpu/GrFragmentProcessor.h"
13 #include "src/gpu/GrProcessor.h"
14 #include "src/gpu/GrShaderCaps.h"
15 #include "src/gpu/GrShaderVar.h"
16 #include "src/gpu/Swizzle.h"
17 #include "src/gpu/glsl/GrGLSLProgramDataManager.h"
18 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
19 #include "src/gpu/glsl/GrGLSLVarying.h"
20
21 #include <unordered_map>
22
23 class GrGLSLFPFragmentBuilder;
24 class GrGLSLVaryingHandler;
25 class GrGLSLUniformHandler;
26 class GrGLSLVertexBuilder;
27
28 /**
29 * The GrGeometryProcessor represents some kind of geometric primitive. This includes the shape
30 * of the primitive and the inherent color of the primitive. The GrGeometryProcessor is
31 * responsible for providing a color and coverage input into the Ganesh rendering pipeline. Through
32 * optimization, Ganesh may decide a different color, no color, and / or no coverage are required
33 * from the GrGeometryProcessor, so the GrGeometryProcessor must be able to support this
34 * functionality.
35 *
36 * There are two feedback loops between the GrFragmentProcessors, the GrXferProcessor, and the
37 * GrGeometryProcessor. These loops run on the CPU and to determine known properties of the final
38 * color and coverage inputs to the GrXferProcessor in order to perform optimizations that preserve
39 * correctness. The GrDrawOp seeds these loops with initial color and coverage, in its
40 * getProcessorAnalysisInputs implementation. These seed values are processed by the
41 * subsequent stages of the rendering pipeline and the output is then fed back into the GrDrawOp
42 * in the applyPipelineOptimizations call, where the op can use the information to inform
43 * decisions about GrGeometryProcessor creation.
44 *
45 * Note that all derived classes should hide their constructors and provide a Make factory
46 * function that takes an arena (except for Tesselation-specific classes). This is because
47 * geometry processors can be created in either the record-time or flush-time arenas which
48 * define their lifetimes (i.e., a DDLs life time in the first case and a single flush in
49 * the second case).
50 */
51 class GrGeometryProcessor : public GrProcessor {
52 public:
53 /**
54 * Every GrGeometryProcessor must be capable of creating a subclass of ProgramImpl. The
55 * ProgramImpl emits the shader code that implements the GrGeometryProcessor, is attached to the
56 * generated backend API pipeline/program and used to extract uniform data from
57 * GrGeometryProcessor instances.
58 */
59 class ProgramImpl;
60
61 class TextureSampler;
62
63 /** Describes a vertex or instance attribute. */
64 class Attribute {
65 public:
AlignOffset(size_t offset)66 static constexpr size_t AlignOffset(size_t offset) { return SkAlign4(offset); }
67
68 constexpr Attribute() = default;
69 /**
70 * Makes an attribute whose offset will be implicitly determined by the types and ordering
71 * of an array attributes.
72 */
Attribute(const char * name,GrVertexAttribType cpuType,SkSLType gpuType)73 constexpr Attribute(const char* name,
74 GrVertexAttribType cpuType,
75 SkSLType gpuType)
76 : fName(name), fCPUType(cpuType), fGPUType(gpuType) {
77 SkASSERT(name && gpuType != SkSLType::kVoid);
78 }
79 /**
80 * Makes an attribute with an explicit offset.
81 */
Attribute(const char * name,GrVertexAttribType cpuType,SkSLType gpuType,size_t offset)82 constexpr Attribute(const char* name,
83 GrVertexAttribType cpuType,
84 SkSLType gpuType,
85 size_t offset)
86 : fName(name), fCPUType(cpuType), fGPUType(gpuType), fOffset(SkToU32(offset)) {
87 SkASSERT(AlignOffset(offset) == offset);
88 SkASSERT(name && gpuType != SkSLType::kVoid);
89 }
90 constexpr Attribute(const Attribute&) = default;
91
92 Attribute& operator=(const Attribute&) = default;
93
isInitialized()94 constexpr bool isInitialized() const { return fGPUType != SkSLType::kVoid; }
95
name()96 constexpr const char* name() const { return fName; }
cpuType()97 constexpr GrVertexAttribType cpuType() const { return fCPUType; }
gpuType()98 constexpr SkSLType gpuType() const { return fGPUType; }
99 /**
100 * Returns the offset if attributes were specified with explicit offsets. Otherwise,
101 * offsets (and total vertex stride) are implicitly determined from attribute order and
102 * types.
103 */
offset()104 std::optional<size_t> offset() const {
105 if (fOffset != kImplicitOffset) {
106 SkASSERT(AlignOffset(fOffset) == fOffset);
107 return {fOffset};
108 }
109 return std::nullopt;
110 }
111
112 inline constexpr size_t size() const;
113
asShaderVar()114 GrShaderVar asShaderVar() const {
115 return {fName, fGPUType, GrShaderVar::TypeModifier::In};
116 }
117
118 private:
119 static constexpr uint32_t kImplicitOffset = 1; // 1 is not valid because it isn't aligned.
120
121 const char* fName = nullptr;
122 GrVertexAttribType fCPUType = kFloat_GrVertexAttribType;
123 SkSLType fGPUType = SkSLType::kVoid;
124 uint32_t fOffset = kImplicitOffset;
125 };
126
127 /**
128 * A set of attributes that can iterated. The iterator handles hides two pieces of complexity:
129 * 1) It skips uninitialized attributes.
130 * 2) It always returns an attribute with a known offset.
131 */
132 class AttributeSet {
133 class Iter {
134 public:
135 Iter() = default;
136 Iter(const Iter& iter) = default;
137 Iter& operator=(const Iter& iter) = default;
138
Iter(const Attribute * attrs,int count)139 Iter(const Attribute* attrs, int count) : fCurr(attrs), fRemaining(count) {
140 this->skipUninitialized();
141 }
142
143 bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
144 Attribute operator*() const;
145 void operator++();
146
147 private:
148 void skipUninitialized();
149
150 const Attribute* fCurr = nullptr;
151 int fRemaining = 0;
152 size_t fImplicitOffset = 0;
153 };
154
155 public:
156 Iter begin() const;
157 Iter end() const;
158
count()159 int count() const { return fCount; }
stride()160 size_t stride() const { return fStride; }
161
162 // Init with implicit offsets and stride. No attributes can have a predetermined stride.
163 void initImplicit(const Attribute* attrs, int count);
164 // Init with explicit offsets and stride. All attributes must be initialized and have
165 // an explicit offset aligned to 4 bytes and with no attribute crossing stride boundaries.
166 void initExplicit(const Attribute* attrs, int count, size_t stride);
167
168 void addToKey(skgpu::KeyBuilder* b) const;
169
170 private:
171 const Attribute* fAttributes = nullptr;
172 int fRawCount = 0;
173 int fCount = 0;
174 size_t fStride = 0;
175 };
176
177 GrGeometryProcessor(ClassID);
178
numTextureSamplers()179 int numTextureSamplers() const { return fTextureSamplerCnt; }
180 const TextureSampler& textureSampler(int index) const;
numVertexAttributes()181 int numVertexAttributes() const { return fVertexAttributes.count(); }
vertexAttributes()182 const AttributeSet& vertexAttributes() const { return fVertexAttributes; }
numInstanceAttributes()183 int numInstanceAttributes() const { return fInstanceAttributes.count(); }
instanceAttributes()184 const AttributeSet& instanceAttributes() const { return fInstanceAttributes; }
185
hasVertexAttributes()186 bool hasVertexAttributes() const { return SkToBool(fVertexAttributes.count()); }
hasInstanceAttributes()187 bool hasInstanceAttributes() const { return SkToBool(fInstanceAttributes.count()); }
188
189 /**
190 * A common practice is to populate the the vertex/instance's memory using an implicit array of
191 * structs. In this case, it is best to assert that:
192 * stride == sizeof(struct)
193 */
vertexStride()194 size_t vertexStride() const { return fVertexAttributes.stride(); }
instanceStride()195 size_t instanceStride() const { return fInstanceAttributes.stride(); }
196
willUseTessellationShaders()197 bool willUseTessellationShaders() const {
198 return fShaders & (kTessControl_GrShaderFlag | kTessEvaluation_GrShaderFlag);
199 }
200
201 /**
202 * Computes a key for the transforms owned by an FP based on the shader code that will be
203 * emitted by the primitive processor to implement them.
204 */
205 static uint32_t ComputeCoordTransformsKey(const GrFragmentProcessor& fp);
206
207 inline static constexpr int kCoordTransformKeyBits = 4;
208
209 /**
210 * Adds a key on the skgpu::KeyBuilder that reflects any variety in the code that the
211 * geometry processor subclass can emit.
212 */
213 virtual void addToKey(const GrShaderCaps&, skgpu::KeyBuilder*) const = 0;
214
215 void getAttributeKey(skgpu::KeyBuilder* b) const;
216
217 /**
218 * Returns a new instance of the appropriate implementation class for the given
219 * GrGeometryProcessor.
220 */
221 virtual std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const = 0;
222
223 protected:
224 // GPs that need to use either float or ubyte colors can just call this to get a correctly
225 // configured Attribute struct
MakeColorAttribute(const char * name,bool wideColor)226 static Attribute MakeColorAttribute(const char* name, bool wideColor) {
227 return { name,
228 wideColor ? kFloat4_GrVertexAttribType : kUByte4_norm_GrVertexAttribType,
229 SkSLType::kHalf4 };
230 }
setVertexAttributes(const Attribute * attrs,int attrCount,size_t stride)231 void setVertexAttributes(const Attribute* attrs, int attrCount, size_t stride) {
232 fVertexAttributes.initExplicit(attrs, attrCount, stride);
233 }
setInstanceAttributes(const Attribute * attrs,int attrCount,size_t stride)234 void setInstanceAttributes(const Attribute* attrs, int attrCount, size_t stride) {
235 SkASSERT(attrCount >= 0);
236 fInstanceAttributes.initExplicit(attrs, attrCount, stride);
237 }
238
setVertexAttributesWithImplicitOffsets(const Attribute * attrs,int attrCount)239 void setVertexAttributesWithImplicitOffsets(const Attribute* attrs, int attrCount) {
240 fVertexAttributes.initImplicit(attrs, attrCount);
241 }
setInstanceAttributesWithImplicitOffsets(const Attribute * attrs,int attrCount)242 void setInstanceAttributesWithImplicitOffsets(const Attribute* attrs, int attrCount) {
243 SkASSERT(attrCount >= 0);
244 fInstanceAttributes.initImplicit(attrs, attrCount);
245 }
setWillUseTessellationShaders()246 void setWillUseTessellationShaders() {
247 fShaders |= kTessControl_GrShaderFlag | kTessEvaluation_GrShaderFlag;
248 }
setTextureSamplerCnt(int cnt)249 void setTextureSamplerCnt(int cnt) {
250 SkASSERT(cnt >= 0);
251 fTextureSamplerCnt = cnt;
252 }
253
254 /**
255 * Helper for implementing onTextureSampler(). E.g.:
256 * return IthTexureSampler(i, fMyFirstSampler, fMySecondSampler, fMyThirdSampler);
257 */
258 template <typename... Args>
IthTextureSampler(int i,const TextureSampler & samp0,const Args &...samps)259 static const TextureSampler& IthTextureSampler(int i, const TextureSampler& samp0,
260 const Args&... samps) {
261 return (0 == i) ? samp0 : IthTextureSampler(i - 1, samps...);
262 }
263 inline static const TextureSampler& IthTextureSampler(int i);
264
265 private:
onTextureSampler(int)266 virtual const TextureSampler& onTextureSampler(int) const { return IthTextureSampler(0); }
267
268 GrShaderFlags fShaders = kVertex_GrShaderFlag | kFragment_GrShaderFlag;
269
270 AttributeSet fVertexAttributes;
271 AttributeSet fInstanceAttributes;
272
273 int fTextureSamplerCnt = 0;
274 using INHERITED = GrProcessor;
275 };
276
277 //////////////////////////////////////////////////////////////////////////////
278
279 class GrGeometryProcessor::ProgramImpl {
280 public:
281 using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
282 using SamplerHandle = GrGLSLUniformHandler::SamplerHandle;
283 /**
284 * Struct of optional varying that replaces the input coords and bool indicating whether the FP
285 * should take a coord param as an argument. The latter may be false if the coords are simply
286 * unused or if the GP has lifted their computation to a varying emitted by the VS.
287 */
288 struct FPCoords {GrShaderVar coordsVarying; bool hasCoordsParam;};
289 using FPCoordsMap = std::unordered_map<const GrFragmentProcessor*, FPCoords>;
290
291 virtual ~ProgramImpl() = default;
292
293 struct EmitArgs {
EmitArgsEmitArgs294 EmitArgs(GrGLSLVertexBuilder* vertBuilder,
295 GrGLSLFPFragmentBuilder* fragBuilder,
296 GrGLSLVaryingHandler* varyingHandler,
297 GrGLSLUniformHandler* uniformHandler,
298 const GrShaderCaps* caps,
299 const GrGeometryProcessor& geomProc,
300 const char* outputColor,
301 const char* outputCoverage,
302 const SamplerHandle* texSamplers)
303 : fVertBuilder(vertBuilder)
304 , fFragBuilder(fragBuilder)
305 , fVaryingHandler(varyingHandler)
306 , fUniformHandler(uniformHandler)
307 , fShaderCaps(caps)
308 , fGeomProc(geomProc)
309 , fOutputColor(outputColor)
310 , fOutputCoverage(outputCoverage)
311 , fTexSamplers(texSamplers) {}
312 GrGLSLVertexBuilder* fVertBuilder;
313 GrGLSLFPFragmentBuilder* fFragBuilder;
314 GrGLSLVaryingHandler* fVaryingHandler;
315 GrGLSLUniformHandler* fUniformHandler;
316 const GrShaderCaps* fShaderCaps;
317 const GrGeometryProcessor& fGeomProc;
318 const char* fOutputColor;
319 const char* fOutputCoverage;
320 const SamplerHandle* fTexSamplers;
321 };
322
323 /**
324 * Emits the code from this geometry processor into the shaders. For any FP in the pipeline that
325 * has its input coords implemented by the GP as a varying, the varying will be accessible in
326 * the returned map and should be used when the FP code is emitted. The FS variable containing
327 * the GP's output local coords is also returned.
328 **/
329 std::tuple<FPCoordsMap, GrShaderVar> emitCode(EmitArgs&, const GrPipeline& pipeline);
330
331 /**
332 * Called after all effect emitCode() functions, to give the processor a chance to write out
333 * additional transformation code now that all uniforms have been emitted.
334 * It generates the final code for assigning transformed coordinates to the varyings recorded
335 * in the call to collectTransforms(). This must happen after FP code emission so that it has
336 * access to any uniforms the FPs registered for uniform sample matrix invocations.
337 */
338 void emitTransformCode(GrGLSLVertexBuilder* vb, GrGLSLUniformHandler* uniformHandler);
339
340 /**
341 * A ProgramImpl instance can be reused with any GrGeometryProcessor that produces the same key.
342 * This function reads data from a GrGeometryProcessor and updates any uniform variables
343 * required by the shaders created in emitCode(). The GrGeometryProcessor parameter is
344 * guaranteed to be of the same type and to have an identical processor key as the
345 * GrGeometryProcessor that created this ProgramImpl.
346 */
347 virtual void setData(const GrGLSLProgramDataManager&,
348 const GrShaderCaps&,
349 const GrGeometryProcessor&) = 0;
350
351 // We use these methods as a temporary back door to inject OpenGL tessellation code. Once
352 // tessellation is supported by SkSL we can remove these.
getTessControlShaderGLSL(const GrGeometryProcessor &,const char * versionAndExtensionDecls,const GrGLSLUniformHandler &,const GrShaderCaps &)353 virtual SkString getTessControlShaderGLSL(const GrGeometryProcessor&,
354 const char* versionAndExtensionDecls,
355 const GrGLSLUniformHandler&,
356 const GrShaderCaps&) const {
357 SK_ABORT("Not implemented.");
358 }
getTessEvaluationShaderGLSL(const GrGeometryProcessor &,const char * versionAndExtensionDecls,const GrGLSLUniformHandler &,const GrShaderCaps &)359 virtual SkString getTessEvaluationShaderGLSL(const GrGeometryProcessor&,
360 const char* versionAndExtensionDecls,
361 const GrGLSLUniformHandler&,
362 const GrShaderCaps&) const {
363 SK_ABORT("Not implemented.");
364 }
365
366 // GPs that use writeOutputPosition and/or writeLocalCoord must incorporate the matrix type
367 // into their key, and should use this function or one of the other related helpers.
ComputeMatrixKey(const GrShaderCaps & caps,const SkMatrix & mat)368 static uint32_t ComputeMatrixKey(const GrShaderCaps& caps, const SkMatrix& mat) {
369 if (!caps.reducedShaderMode()) {
370 if (mat.isIdentity()) {
371 return 0b00;
372 }
373 if (mat.isScaleTranslate()) {
374 return 0b01;
375 }
376 }
377 if (!mat.hasPerspective()) {
378 return 0b10;
379 }
380 return 0b11;
381 }
382
ComputeMatrixKeys(const GrShaderCaps & shaderCaps,const SkMatrix & viewMatrix,const SkMatrix & localMatrix)383 static uint32_t ComputeMatrixKeys(const GrShaderCaps& shaderCaps,
384 const SkMatrix& viewMatrix,
385 const SkMatrix& localMatrix) {
386 return (ComputeMatrixKey(shaderCaps, viewMatrix) << kMatrixKeyBits) |
387 ComputeMatrixKey(shaderCaps, localMatrix);
388 }
389
AddMatrixKeys(const GrShaderCaps & shaderCaps,uint32_t flags,const SkMatrix & viewMatrix,const SkMatrix & localMatrix)390 static uint32_t AddMatrixKeys(const GrShaderCaps& shaderCaps,
391 uint32_t flags,
392 const SkMatrix& viewMatrix,
393 const SkMatrix& localMatrix) {
394 // Shifting to make room for the matrix keys shouldn't lose bits
395 SkASSERT(((flags << (2 * kMatrixKeyBits)) >> (2 * kMatrixKeyBits)) == flags);
396 return (flags << (2 * kMatrixKeyBits)) |
397 ComputeMatrixKeys(shaderCaps, viewMatrix, localMatrix);
398 }
399 inline static constexpr int kMatrixKeyBits = 2;
400
401 protected:
402 void setupUniformColor(GrGLSLFPFragmentBuilder* fragBuilder,
403 GrGLSLUniformHandler* uniformHandler,
404 const char* outputName,
405 UniformHandle* colorUniform);
406
407 // A helper for setting the matrix on a uniform handle initialized through
408 // writeOutputPosition or writeLocalCoord. Automatically handles elided uniforms,
409 // scale+translate matrices, and state tracking (if provided state pointer is non-null).
410 static void SetTransform(const GrGLSLProgramDataManager&,
411 const GrShaderCaps&,
412 const UniformHandle& uniform,
413 const SkMatrix& matrix,
414 SkMatrix* state = nullptr);
415
416 struct GrGPArgs {
417 // Used to specify the output variable used by the GP to store its device position. It can
418 // either be a float2 or a float3 (in order to handle perspective). The subclass sets this
419 // in its onEmitCode().
420 GrShaderVar fPositionVar;
421 // Used to specify the variable storing the draw's local coordinates. It can be either a
422 // float2, float3, or void. It can only be void when no FP needs local coordinates. This
423 // variable can be an attribute or local variable, but should not itself be a varying.
424 // ProgramImpl automatically determines if this must be passed to a FS.
425 GrShaderVar fLocalCoordVar;
426 // The GP can specify the local coord var either in the VS or FS. When either is possible
427 // the VS is preferable. It may allow derived coordinates to be interpolated from the VS
428 // instead of computed in the FS per pixel.
429 GrShaderType fLocalCoordShader = kVertex_GrShaderType;
430 };
431
432 // Helpers for adding code to write the transformed vertex position. The first simple version
433 // just writes a variable named by 'posName' into the position output variable with the
434 // assumption that the position is 2D. The second version transforms the input position by a
435 // view matrix and the output variable is 2D or 3D depending on whether the view matrix is
436 // perspective. Both versions declare the output position variable and will set
437 // GrGPArgs::fPositionVar.
438 static void WriteOutputPosition(GrGLSLVertexBuilder*, GrGPArgs*, const char* posName);
439 static void WriteOutputPosition(GrGLSLVertexBuilder*,
440 GrGLSLUniformHandler*,
441 const GrShaderCaps&,
442 GrGPArgs*,
443 const char* posName,
444 const SkMatrix& viewMatrix,
445 UniformHandle* viewMatrixUniform);
446
447 // Helper to transform an existing variable by a given local matrix (e.g. the inverse view
448 // matrix). It will declare the transformed local coord variable and will set
449 // GrGPArgs::fLocalCoordVar.
450 static void WriteLocalCoord(GrGLSLVertexBuilder*,
451 GrGLSLUniformHandler*,
452 const GrShaderCaps&,
453 GrGPArgs*,
454 GrShaderVar localVar,
455 const SkMatrix& localMatrix,
456 UniformHandle* localMatrixUniform);
457
458 private:
459 virtual void onEmitCode(EmitArgs&, GrGPArgs*) = 0;
460
461 // Iterates over the FPs beginning with the passed iter to register additional varyings and
462 // uniforms to support VS-promoted local coord evaluation for the FPs.
463 //
464 // This must happen before FP code emission so that the FPs can find the appropriate varying
465 // handles they use in place of explicit coord sampling; it is automatically called after
466 // onEmitCode() returns using the value stored in GpArgs::fLocalCoordVar and
467 // GpArgs::fPositionVar.
468 FPCoordsMap collectTransforms(GrGLSLVertexBuilder* vb,
469 GrGLSLVaryingHandler* varyingHandler,
470 GrGLSLUniformHandler* uniformHandler,
471 GrShaderType localCoordsShader,
472 const GrShaderVar& localCoordsVar,
473 const GrShaderVar& positionVar,
474 const GrPipeline& pipeline);
475 struct TransformInfo {
476 // The varying that conveys the coordinates to one or more FPs in the FS.
477 GrGLSLVarying varying;
478 // The coordinate to be transformed. varying is computed from this.
479 GrShaderVar inputCoords;
480 // Used to sort so that ancestor FP varyings are initialized before descendant FP varyings.
481 int traversalOrder;
482 };
483 // Populated by collectTransforms() for use in emitTransformCode(). When we lift the computation
484 // of a FP's input coord to a varying we propagate that varying up the FP tree to the highest
485 // node that shares the same coordinates. This allows multiple FPs in a subtree to share a
486 // varying.
487 std::unordered_map<const GrFragmentProcessor*, TransformInfo> fTransformVaryingsMap;
488 };
489
490 ///////////////////////////////////////////////////////////////////////////
491
492 /**
493 * Used to capture the properties of the GrTextureProxies required/expected by a primitiveProcessor
494 * along with an associated GrSamplerState. The actual proxies used are stored in either the
495 * fixed or dynamic state arrays. TextureSamplers don't perform any coord manipulation to account
496 * for texture origin.
497 */
498 class GrGeometryProcessor::TextureSampler {
499 public:
500 TextureSampler() = default;
501
502 TextureSampler(GrSamplerState, const GrBackendFormat&, const skgpu::Swizzle&);
503
504 TextureSampler(const TextureSampler&) = delete;
505 TextureSampler& operator=(const TextureSampler&) = delete;
506
507 void reset(GrSamplerState, const GrBackendFormat&, const skgpu::Swizzle&);
508
backendFormat()509 const GrBackendFormat& backendFormat() const { return fBackendFormat; }
textureType()510 GrTextureType textureType() const { return fBackendFormat.textureType(); }
511
samplerState()512 GrSamplerState samplerState() const { return fSamplerState; }
swizzle()513 const skgpu::Swizzle& swizzle() const { return fSwizzle; }
514
isInitialized()515 bool isInitialized() const { return fIsInitialized; }
516
517 private:
518 GrSamplerState fSamplerState;
519 GrBackendFormat fBackendFormat;
520 skgpu::Swizzle fSwizzle;
521 bool fIsInitialized = false;
522 };
523
IthTextureSampler(int i)524 const GrGeometryProcessor::TextureSampler& GrGeometryProcessor::IthTextureSampler(int i) {
525 SK_ABORT("Illegal texture sampler index");
526 static const TextureSampler kBogus;
527 return kBogus;
528 }
529
530 //////////////////////////////////////////////////////////////////////////////
531
532 /**
533 * Returns the size of the attrib type in bytes.
534 * This was moved from include/private/GrTypesPriv.h in service of Skia dependents that build
535 * with C++11.
536 */
GrVertexAttribTypeSize(GrVertexAttribType type)537 static constexpr inline size_t GrVertexAttribTypeSize(GrVertexAttribType type) {
538 switch (type) {
539 case kFloat_GrVertexAttribType:
540 return sizeof(float);
541 case kFloat2_GrVertexAttribType:
542 return 2 * sizeof(float);
543 case kFloat3_GrVertexAttribType:
544 return 3 * sizeof(float);
545 case kFloat4_GrVertexAttribType:
546 return 4 * sizeof(float);
547 case kHalf_GrVertexAttribType:
548 return sizeof(uint16_t);
549 case kHalf2_GrVertexAttribType:
550 return 2 * sizeof(uint16_t);
551 case kHalf4_GrVertexAttribType:
552 return 4 * sizeof(uint16_t);
553 case kInt2_GrVertexAttribType:
554 return 2 * sizeof(int32_t);
555 case kInt3_GrVertexAttribType:
556 return 3 * sizeof(int32_t);
557 case kInt4_GrVertexAttribType:
558 return 4 * sizeof(int32_t);
559 case kByte_GrVertexAttribType:
560 return 1 * sizeof(char);
561 case kByte2_GrVertexAttribType:
562 return 2 * sizeof(char);
563 case kByte4_GrVertexAttribType:
564 return 4 * sizeof(char);
565 case kUByte_GrVertexAttribType:
566 return 1 * sizeof(char);
567 case kUByte2_GrVertexAttribType:
568 return 2 * sizeof(char);
569 case kUByte4_GrVertexAttribType:
570 return 4 * sizeof(char);
571 case kUByte_norm_GrVertexAttribType:
572 return 1 * sizeof(char);
573 case kUByte4_norm_GrVertexAttribType:
574 return 4 * sizeof(char);
575 case kShort2_GrVertexAttribType:
576 return 2 * sizeof(int16_t);
577 case kShort4_GrVertexAttribType:
578 return 4 * sizeof(int16_t);
579 case kUShort2_GrVertexAttribType: // fall through
580 case kUShort2_norm_GrVertexAttribType:
581 return 2 * sizeof(uint16_t);
582 case kInt_GrVertexAttribType:
583 return sizeof(int32_t);
584 case kUInt_GrVertexAttribType:
585 return sizeof(uint32_t);
586 case kUShort_norm_GrVertexAttribType:
587 return sizeof(uint16_t);
588 case kUShort4_norm_GrVertexAttribType:
589 return 4 * sizeof(uint16_t);
590 }
591 // GCC fails because SK_ABORT evaluates to non constexpr. clang and cl.exe think this is
592 // unreachable and don't complain.
593 #if defined(__clang__) || !defined(__GNUC__)
594 SK_ABORT("Unsupported type conversion");
595 #endif
596 return 0;
597 }
598
size()599 constexpr size_t GrGeometryProcessor::Attribute::size() const {
600 return GrVertexAttribTypeSize(fCPUType);
601 }
602
603 #endif
604