1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrGeometryProcessor.h"
9
10 #include "src/core/SkMatrixPriv.h"
11 #include "src/gpu/GrPipeline.h"
12 #include "src/gpu/KeyBuilder.h"
13 #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
14 #include "src/gpu/glsl/GrGLSLProgramBuilder.h"
15 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
16 #include "src/gpu/glsl/GrGLSLVarying.h"
17
18 #include <queue>
19
GrGeometryProcessor(ClassID classID)20 GrGeometryProcessor::GrGeometryProcessor(ClassID classID) : GrProcessor(classID) {}
21
textureSampler(int i) const22 const GrGeometryProcessor::TextureSampler& GrGeometryProcessor::textureSampler(int i) const {
23 SkASSERT(i >= 0 && i < this->numTextureSamplers());
24 return this->onTextureSampler(i);
25 }
26
ComputeCoordTransformsKey(const GrFragmentProcessor & fp)27 uint32_t GrGeometryProcessor::ComputeCoordTransformsKey(const GrFragmentProcessor& fp) {
28 // This is highly coupled with the code in ProgramImpl::collectTransforms().
29 uint32_t key = static_cast<uint32_t>(fp.sampleUsage().kind()) << 1;
30 // This needs to be updated if GP starts specializing varyings on additional matrix types.
31 if (fp.sampleUsage().hasPerspective()) {
32 key |= 0b1;
33 }
34 return key;
35 }
36
getAttributeKey(skgpu::KeyBuilder * b) const37 void GrGeometryProcessor::getAttributeKey(skgpu::KeyBuilder* b) const {
38 b->appendComment("vertex attributes");
39 fVertexAttributes.addToKey(b);
40 b->appendComment("instance attributes");
41 fInstanceAttributes.addToKey(b);
42 }
43
44 ///////////////////////////////////////////////////////////////////////////////////////////////////
45
clamp_filter(GrTextureType type,GrSamplerState::Filter requestedFilter)46 static inline GrSamplerState::Filter clamp_filter(GrTextureType type,
47 GrSamplerState::Filter requestedFilter) {
48 if (GrTextureTypeHasRestrictedSampling(type)) {
49 return std::min(requestedFilter, GrSamplerState::Filter::kLinear);
50 }
51 return requestedFilter;
52 }
53
TextureSampler(GrSamplerState samplerState,const GrBackendFormat & backendFormat,const skgpu::Swizzle & swizzle)54 GrGeometryProcessor::TextureSampler::TextureSampler(GrSamplerState samplerState,
55 const GrBackendFormat& backendFormat,
56 const skgpu::Swizzle& swizzle) {
57 this->reset(samplerState, backendFormat, swizzle);
58 }
59
reset(GrSamplerState samplerState,const GrBackendFormat & backendFormat,const skgpu::Swizzle & swizzle)60 void GrGeometryProcessor::TextureSampler::reset(GrSamplerState samplerState,
61 const GrBackendFormat& backendFormat,
62 const skgpu::Swizzle& swizzle) {
63 fSamplerState = samplerState;
64 fSamplerState.setFilterMode(clamp_filter(backendFormat.textureType(), samplerState.filter()));
65 fBackendFormat = backendFormat;
66 fSwizzle = swizzle;
67 fIsInitialized = true;
68 }
69
70 //////////////////////////////////////////////////////////////////////////////
71
72 using ProgramImpl = GrGeometryProcessor::ProgramImpl;
73
74 std::tuple<ProgramImpl::FPCoordsMap, GrShaderVar>
emitCode(EmitArgs & args,const GrPipeline & pipeline)75 ProgramImpl::emitCode(EmitArgs& args, const GrPipeline& pipeline) {
76 GrGPArgs gpArgs;
77 this->onEmitCode(args, &gpArgs);
78
79 GrShaderVar positionVar = gpArgs.fPositionVar;
80 // skia:12198
81 if (args.fGeomProc.willUseTessellationShaders()) {
82 positionVar = {};
83 }
84 FPCoordsMap transformMap = this->collectTransforms(args.fVertBuilder,
85 args.fVaryingHandler,
86 args.fUniformHandler,
87 gpArgs.fLocalCoordShader,
88 gpArgs.fLocalCoordVar,
89 positionVar,
90 pipeline);
91
92 // Tessellation shaders are temporarily responsible for integrating their own code strings
93 // while we work out full support.
94 if (!args.fGeomProc.willUseTessellationShaders()) {
95 GrGLSLVertexBuilder* vBuilder = args.fVertBuilder;
96 // Emit the vertex position to the hardware in the normalized window coordinates it expects.
97 SkASSERT(SkSLType::kFloat2 == gpArgs.fPositionVar.getType() ||
98 SkSLType::kFloat3 == gpArgs.fPositionVar.getType());
99 vBuilder->emitNormalizedSkPosition(gpArgs.fPositionVar.c_str(),
100 gpArgs.fPositionVar.getType());
101 if (SkSLType::kFloat2 == gpArgs.fPositionVar.getType()) {
102 args.fVaryingHandler->setNoPerspective();
103 }
104 }
105 return {transformMap, gpArgs.fLocalCoordVar};
106 }
107
collectTransforms(GrGLSLVertexBuilder * vb,GrGLSLVaryingHandler * varyingHandler,GrGLSLUniformHandler * uniformHandler,GrShaderType localCoordsShader,const GrShaderVar & localCoordsVar,const GrShaderVar & positionVar,const GrPipeline & pipeline)108 ProgramImpl::FPCoordsMap ProgramImpl::collectTransforms(GrGLSLVertexBuilder* vb,
109 GrGLSLVaryingHandler* varyingHandler,
110 GrGLSLUniformHandler* uniformHandler,
111 GrShaderType localCoordsShader,
112 const GrShaderVar& localCoordsVar,
113 const GrShaderVar& positionVar,
114 const GrPipeline& pipeline) {
115 SkASSERT(localCoordsVar.getType() == SkSLType::kFloat2 ||
116 localCoordsVar.getType() == SkSLType::kFloat3 ||
117 localCoordsVar.getType() == SkSLType::kVoid);
118 SkASSERT(positionVar.getType() == SkSLType::kFloat2 ||
119 positionVar.getType() == SkSLType::kFloat3 ||
120 positionVar.getType() == SkSLType::kVoid);
121
122 enum class BaseCoord { kNone, kLocal, kPosition };
123
124 auto baseLocalCoordFSVar = [&, baseLocalCoordVarying = GrGLSLVarying()]() mutable {
125 if (localCoordsShader == kFragment_GrShaderType) {
126 return localCoordsVar;
127 }
128 SkASSERT(localCoordsShader == kVertex_GrShaderType);
129 SkASSERT(SkSLTypeIsFloatType(localCoordsVar.getType()));
130 if (baseLocalCoordVarying.type() == SkSLType::kVoid) {
131 // Initialize to the GP provided coordinate
132 baseLocalCoordVarying = GrGLSLVarying(localCoordsVar.getType());
133 varyingHandler->addVarying("LocalCoord", &baseLocalCoordVarying);
134 vb->codeAppendf("%s = %s;\n",
135 baseLocalCoordVarying.vsOut(),
136 localCoordsVar.getName().c_str());
137 }
138 return baseLocalCoordVarying.fsInVar();
139 };
140
141 bool canUsePosition = positionVar.getType() != SkSLType::kVoid;
142
143 FPCoordsMap result;
144 // Performs a pre-order traversal of FP hierarchy rooted at fp and identifies FPs that are
145 // sampled with a series of matrices applied to local coords. For each such FP a varying is
146 // added to the varying handler and added to 'result'.
147 auto liftTransforms = [&, traversalIndex = 0](
148 auto& self,
149 const GrFragmentProcessor& fp,
150 bool hasPerspective,
151 const GrFragmentProcessor* lastMatrixFP = nullptr,
152 int lastMatrixTraversalIndex = -1,
153 BaseCoord baseCoord = BaseCoord::kLocal) mutable -> void {
154 ++traversalIndex;
155 if (localCoordsShader == kVertex_GrShaderType) {
156 switch (fp.sampleUsage().kind()) {
157 case SkSL::SampleUsage::Kind::kNone:
158 // This should only happen at the root. Otherwise how did this FP get added?
159 SkASSERT(!fp.parent());
160 break;
161 case SkSL::SampleUsage::Kind::kPassThrough:
162 break;
163 case SkSL::SampleUsage::Kind::kUniformMatrix:
164 // Update tracking of last matrix and matrix props.
165 hasPerspective |= fp.sampleUsage().hasPerspective();
166 lastMatrixFP = &fp;
167 lastMatrixTraversalIndex = traversalIndex;
168 break;
169 case SkSL::SampleUsage::Kind::kFragCoord:
170 hasPerspective = positionVar.getType() == SkSLType::kFloat3;
171 lastMatrixFP = nullptr;
172 lastMatrixTraversalIndex = -1;
173 baseCoord = BaseCoord::kPosition;
174 break;
175 case SkSL::SampleUsage::Kind::kExplicit:
176 baseCoord = BaseCoord::kNone;
177 break;
178 }
179 } else {
180 // If the GP doesn't provide an interpolatable local coord then there is no hope to
181 // lift.
182 baseCoord = BaseCoord::kNone;
183 }
184
185 auto& [varyingFSVar, hasCoordsParam] = result[&fp];
186 hasCoordsParam = fp.usesSampleCoordsDirectly();
187
188 // We add a varying if we're in a chain of matrices multiplied by local or device coords.
189 // If the coord is the untransformed local coord we add a varying. We don't if it is
190 // untransformed device coords since it doesn't save us anything over "sk_FragCoord.xy". Of
191 // course, if the FP doesn't directly use its coords then we don't add a varying.
192 if (fp.usesSampleCoordsDirectly() &&
193 (baseCoord == BaseCoord::kLocal ||
194 (baseCoord == BaseCoord::kPosition && lastMatrixFP && canUsePosition))) {
195 // Associate the varying with the highest possible node in the FP tree that shares the
196 // same coordinates so that multiple FPs in a subtree can share. If there are no matrix
197 // sample nodes on the way up the tree then directly use the local coord.
198 if (!lastMatrixFP) {
199 varyingFSVar = baseLocalCoordFSVar();
200 } else {
201 // If there is an already a varying that incorporates all matrices from the root to
202 // lastMatrixFP just use it. Otherwise, we add it.
203 auto& [varying, inputCoords, varyingIdx] = fTransformVaryingsMap[lastMatrixFP];
204 if (varying.type() == SkSLType::kVoid) {
205 varying = GrGLSLVarying(hasPerspective ? SkSLType::kFloat3 : SkSLType::kFloat2);
206 SkString strVaryingName = SkStringPrintf("TransformedCoords_%d",
207 lastMatrixTraversalIndex);
208 varyingHandler->addVarying(strVaryingName.c_str(), &varying);
209 inputCoords = baseCoord == BaseCoord::kLocal ? localCoordsVar : positionVar;
210 varyingIdx = lastMatrixTraversalIndex;
211 }
212 SkASSERT(varyingIdx == lastMatrixTraversalIndex);
213 // The FP will use the varying in the fragment shader as its coords.
214 varyingFSVar = varying.fsInVar();
215 }
216 hasCoordsParam = false;
217 }
218
219 for (int c = 0; c < fp.numChildProcessors(); ++c) {
220 if (auto* child = fp.childProcessor(c)) {
221 self(self,
222 *child,
223 hasPerspective,
224 lastMatrixFP,
225 lastMatrixTraversalIndex,
226 baseCoord);
227 // If we have a varying then we never need a param. Otherwise, if one of our
228 // children takes a non-explicit coord then we'll need our coord.
229 hasCoordsParam |= varyingFSVar.getType() == SkSLType::kVoid &&
230 !child->sampleUsage().isExplicit() &&
231 !child->sampleUsage().isFragCoord() &&
232 result[child].hasCoordsParam;
233 }
234 }
235 };
236
237 bool hasPerspective = SkSLTypeVecLength(localCoordsVar.getType()) == 3;
238 for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) {
239 liftTransforms(liftTransforms, pipeline.getFragmentProcessor(i), hasPerspective);
240 }
241 return result;
242 }
243
emitTransformCode(GrGLSLVertexBuilder * vb,GrGLSLUniformHandler * uniformHandler)244 void ProgramImpl::emitTransformCode(GrGLSLVertexBuilder* vb, GrGLSLUniformHandler* uniformHandler) {
245 // Because descendant varyings may be computed using the varyings of ancestor FPs we make
246 // sure to visit the varyings according to FP pre-order traversal by dumping them into a
247 // priority queue.
248 using FPAndInfo = std::tuple<const GrFragmentProcessor*, TransformInfo>;
249 auto compare = [](const FPAndInfo& a, const FPAndInfo& b) {
250 return std::get<1>(a).traversalOrder > std::get<1>(b).traversalOrder;
251 };
252 std::priority_queue<FPAndInfo, std::vector<FPAndInfo>, decltype(compare)> pq(compare);
253 std::for_each(fTransformVaryingsMap.begin(), fTransformVaryingsMap.end(), [&pq](auto entry) {
254 pq.push(entry);
255 });
256 for (; !pq.empty(); pq.pop()) {
257 const auto& [fp, info] = pq.top();
258 // If we recorded a transform info, its sample matrix must be uniform
259 SkASSERT(fp->sampleUsage().isUniformMatrix());
260 GrShaderVar uniform = uniformHandler->liftUniformToVertexShader(
261 *fp->parent(), SkString(SkSL::SampleUsage::MatrixUniformName()));
262 // Start with this matrix and accumulate additional matrices as we walk up the FP tree
263 // to either the base coords or an ancestor FP that has an associated varying.
264 SkString transformExpression = uniform.getName();
265
266 // If we hit an ancestor with a varying on our walk up then save off the varying as the
267 // input to our accumulated transformExpression. Start off assuming we'll reach the root.
268 GrShaderVar inputCoords = info.inputCoords;
269
270 for (const auto* base = fp->parent(); base; base = base->parent()) {
271 if (auto iter = fTransformVaryingsMap.find(base); iter != fTransformVaryingsMap.end()) {
272 // Can stop here, as this varying already holds all transforms from higher FPs
273 // We'll apply the residual transformExpression we've accumulated up from our
274 // starting FP to this varying.
275 inputCoords = iter->second.varying.vsOutVar();
276 break;
277 } else if (base->sampleUsage().isUniformMatrix()) {
278 // Accumulate any matrices along the path to either the original local/device coords
279 // or a parent varying. Getting here means this FP was sampled with a uniform matrix
280 // but all uses of coords below here in the FP hierarchy are beneath additional
281 // matrix samples and thus this node wasn't assigned a varying.
282 GrShaderVar parentUniform = uniformHandler->liftUniformToVertexShader(
283 *base->parent(), SkString(SkSL::SampleUsage::MatrixUniformName()));
284 transformExpression.appendf(" * %s", parentUniform.getName().c_str());
285 } else if (base->sampleUsage().isFragCoord()) {
286 // Our chain of matrices starts here and is based on the device space position.
287 break;
288 } else {
289 // This intermediate FP is just a pass through and doesn't need to be built
290 // in to the expression, but we must visit its parents in case they add transforms.
291 SkASSERT(base->sampleUsage().isPassThrough() || !base->sampleUsage().isSampled());
292 }
293 }
294
295 SkString inputStr;
296 if (inputCoords.getType() == SkSLType::kFloat2) {
297 inputStr = SkStringPrintf("%s.xy1", inputCoords.getName().c_str());
298 } else {
299 SkASSERT(inputCoords.getType() == SkSLType::kFloat3);
300 inputStr = inputCoords.getName();
301 }
302
303 vb->codeAppend("{\n");
304 if (info.varying.type() == SkSLType::kFloat2) {
305 if (vb->getProgramBuilder()->shaderCaps()->nonsquareMatrixSupport()) {
306 vb->codeAppendf("%s = float3x2(%s) * %s",
307 info.varying.vsOut(),
308 transformExpression.c_str(),
309 inputStr.c_str());
310 } else {
311 vb->codeAppendf("%s = (%s * %s).xy",
312 info.varying.vsOut(),
313 transformExpression.c_str(),
314 inputStr.c_str());
315 }
316 } else {
317 SkASSERT(info.varying.type() == SkSLType::kFloat3);
318 vb->codeAppendf("%s = %s * %s",
319 info.varying.vsOut(),
320 transformExpression.c_str(),
321 inputStr.c_str());
322 }
323 vb->codeAppend(";\n");
324 vb->codeAppend("}\n");
325 }
326 // We don't need this map anymore.
327 fTransformVaryingsMap.clear();
328 }
329
setupUniformColor(GrGLSLFPFragmentBuilder * fragBuilder,GrGLSLUniformHandler * uniformHandler,const char * outputName,UniformHandle * colorUniform)330 void ProgramImpl::setupUniformColor(GrGLSLFPFragmentBuilder* fragBuilder,
331 GrGLSLUniformHandler* uniformHandler,
332 const char* outputName,
333 UniformHandle* colorUniform) {
334 SkASSERT(colorUniform);
335 const char* stagedLocalVarName;
336 *colorUniform = uniformHandler->addUniform(nullptr,
337 kFragment_GrShaderFlag,
338 SkSLType::kHalf4,
339 "Color",
340 &stagedLocalVarName);
341 fragBuilder->codeAppendf("%s = %s;", outputName, stagedLocalVarName);
342 if (fragBuilder->getProgramBuilder()->shaderCaps()->mustObfuscateUniformColor()) {
343 fragBuilder->codeAppendf("%s = max(%s, half4(0));", outputName, outputName);
344 }
345 }
346
SetTransform(const GrGLSLProgramDataManager & pdman,const GrShaderCaps & shaderCaps,const UniformHandle & uniform,const SkMatrix & matrix,SkMatrix * state)347 void ProgramImpl::SetTransform(const GrGLSLProgramDataManager& pdman,
348 const GrShaderCaps& shaderCaps,
349 const UniformHandle& uniform,
350 const SkMatrix& matrix,
351 SkMatrix* state) {
352 if (!uniform.isValid() || (state && SkMatrixPriv::CheapEqual(*state, matrix))) {
353 // No update needed
354 return;
355 }
356 if (state) {
357 *state = matrix;
358 }
359 if (matrix.isScaleTranslate() && !shaderCaps.reducedShaderMode()) {
360 // ComputeMatrixKey and writeX() assume the uniform is a float4 (can't assert since nothing
361 // is exposed on a handle, but should be caught lower down).
362 float values[4] = {matrix.getScaleX(), matrix.getTranslateX(),
363 matrix.getScaleY(), matrix.getTranslateY()};
364 pdman.set4fv(uniform, 1, values);
365 } else {
366 pdman.setSkMatrix(uniform, matrix);
367 }
368 }
369
write_passthrough_vertex_position(GrGLSLVertexBuilder * vertBuilder,const GrShaderVar & inPos,GrShaderVar * outPos)370 static void write_passthrough_vertex_position(GrGLSLVertexBuilder* vertBuilder,
371 const GrShaderVar& inPos,
372 GrShaderVar* outPos) {
373 SkASSERT(inPos.getType() == SkSLType::kFloat3 || inPos.getType() == SkSLType::kFloat2);
374 SkString outName = vertBuilder->newTmpVarName(inPos.getName().c_str());
375 outPos->set(inPos.getType(), outName.c_str());
376 vertBuilder->codeAppendf("float%d %s = %s;",
377 SkSLTypeVecLength(inPos.getType()),
378 outName.c_str(),
379 inPos.getName().c_str());
380 }
381
write_vertex_position(GrGLSLVertexBuilder * vertBuilder,GrGLSLUniformHandler * uniformHandler,const GrShaderCaps & shaderCaps,const GrShaderVar & inPos,const SkMatrix & matrix,const char * matrixName,GrShaderVar * outPos,ProgramImpl::UniformHandle * matrixUniform)382 static void write_vertex_position(GrGLSLVertexBuilder* vertBuilder,
383 GrGLSLUniformHandler* uniformHandler,
384 const GrShaderCaps& shaderCaps,
385 const GrShaderVar& inPos,
386 const SkMatrix& matrix,
387 const char* matrixName,
388 GrShaderVar* outPos,
389 ProgramImpl::UniformHandle* matrixUniform) {
390 SkASSERT(inPos.getType() == SkSLType::kFloat3 || inPos.getType() == SkSLType::kFloat2);
391 SkString outName = vertBuilder->newTmpVarName(inPos.getName().c_str());
392
393 if (matrix.isIdentity() && !shaderCaps.reducedShaderMode()) {
394 write_passthrough_vertex_position(vertBuilder, inPos, outPos);
395 return;
396 }
397 SkASSERT(matrixUniform);
398
399 bool useCompactTransform = matrix.isScaleTranslate() && !shaderCaps.reducedShaderMode();
400 const char* mangledMatrixName;
401 *matrixUniform = uniformHandler->addUniform(nullptr,
402 kVertex_GrShaderFlag,
403 useCompactTransform ? SkSLType::kFloat4
404 : SkSLType::kFloat3x3,
405 matrixName,
406 &mangledMatrixName);
407
408 if (inPos.getType() == SkSLType::kFloat3) {
409 // A float3 stays a float3 whether or not the matrix adds perspective
410 if (useCompactTransform) {
411 vertBuilder->codeAppendf("float3 %s = %s.xz1 * %s + %s.yw0;\n",
412 outName.c_str(),
413 mangledMatrixName,
414 inPos.getName().c_str(),
415 mangledMatrixName);
416 } else {
417 vertBuilder->codeAppendf("float3 %s = %s * %s;\n",
418 outName.c_str(),
419 mangledMatrixName,
420 inPos.getName().c_str());
421 }
422 outPos->set(SkSLType::kFloat3, outName.c_str());
423 return;
424 }
425 if (matrix.hasPerspective()) {
426 // A float2 is promoted to a float3 if we add perspective via the matrix
427 SkASSERT(!useCompactTransform);
428 vertBuilder->codeAppendf("float3 %s = (%s * %s.xy1);",
429 outName.c_str(),
430 mangledMatrixName,
431 inPos.getName().c_str());
432 outPos->set(SkSLType::kFloat3, outName.c_str());
433 return;
434 }
435 if (useCompactTransform) {
436 vertBuilder->codeAppendf("float2 %s = %s.xz * %s + %s.yw;\n",
437 outName.c_str(),
438 mangledMatrixName,
439 inPos.getName().c_str(),
440 mangledMatrixName);
441 } else if (shaderCaps.nonsquareMatrixSupport()) {
442 vertBuilder->codeAppendf("float2 %s = float3x2(%s) * %s.xy1;\n",
443 outName.c_str(),
444 mangledMatrixName,
445 inPos.getName().c_str());
446 } else {
447 vertBuilder->codeAppendf("float2 %s = (%s * %s.xy1).xy;\n",
448 outName.c_str(),
449 mangledMatrixName,
450 inPos.getName().c_str());
451 }
452 outPos->set(SkSLType::kFloat2, outName.c_str());
453 }
454
WriteOutputPosition(GrGLSLVertexBuilder * vertBuilder,GrGPArgs * gpArgs,const char * posName)455 void ProgramImpl::WriteOutputPosition(GrGLSLVertexBuilder* vertBuilder,
456 GrGPArgs* gpArgs,
457 const char* posName) {
458 // writeOutputPosition assumes the incoming pos name points to a float2 variable
459 GrShaderVar inPos(posName, SkSLType::kFloat2);
460 write_passthrough_vertex_position(vertBuilder, inPos, &gpArgs->fPositionVar);
461 }
462
WriteOutputPosition(GrGLSLVertexBuilder * vertBuilder,GrGLSLUniformHandler * uniformHandler,const GrShaderCaps & shaderCaps,GrGPArgs * gpArgs,const char * posName,const SkMatrix & mat,UniformHandle * viewMatrixUniform)463 void ProgramImpl::WriteOutputPosition(GrGLSLVertexBuilder* vertBuilder,
464 GrGLSLUniformHandler* uniformHandler,
465 const GrShaderCaps& shaderCaps,
466 GrGPArgs* gpArgs,
467 const char* posName,
468 const SkMatrix& mat,
469 UniformHandle* viewMatrixUniform) {
470 GrShaderVar inPos(posName, SkSLType::kFloat2);
471 write_vertex_position(vertBuilder,
472 uniformHandler,
473 shaderCaps,
474 inPos,
475 mat,
476 "viewMatrix",
477 &gpArgs->fPositionVar,
478 viewMatrixUniform);
479 }
480
WriteLocalCoord(GrGLSLVertexBuilder * vertBuilder,GrGLSLUniformHandler * uniformHandler,const GrShaderCaps & shaderCaps,GrGPArgs * gpArgs,GrShaderVar localVar,const SkMatrix & localMatrix,UniformHandle * localMatrixUniform)481 void ProgramImpl::WriteLocalCoord(GrGLSLVertexBuilder* vertBuilder,
482 GrGLSLUniformHandler* uniformHandler,
483 const GrShaderCaps& shaderCaps,
484 GrGPArgs* gpArgs,
485 GrShaderVar localVar,
486 const SkMatrix& localMatrix,
487 UniformHandle* localMatrixUniform) {
488 write_vertex_position(vertBuilder,
489 uniformHandler,
490 shaderCaps,
491 localVar,
492 localMatrix,
493 "localMatrix",
494 &gpArgs->fLocalCoordVar,
495 localMatrixUniform);
496 }
497
498 //////////////////////////////////////////////////////////////////////////////
499
500 using Attribute = GrGeometryProcessor::Attribute;
501 using AttributeSet = GrGeometryProcessor::AttributeSet;
502
operator *() const503 GrGeometryProcessor::Attribute AttributeSet::Iter::operator*() const {
504 if (fCurr->offset().has_value()) {
505 return *fCurr;
506 }
507 return Attribute(fCurr->name(), fCurr->cpuType(), fCurr->gpuType(), fImplicitOffset);
508 }
509
operator ++()510 void AttributeSet::Iter::operator++() {
511 if (fRemaining) {
512 fRemaining--;
513 fImplicitOffset += Attribute::AlignOffset(fCurr->size());
514 fCurr++;
515 this->skipUninitialized();
516 }
517 }
518
skipUninitialized()519 void AttributeSet::Iter::skipUninitialized() {
520 if (!fRemaining) {
521 fCurr = nullptr;
522 } else {
523 while (!fCurr->isInitialized()) {
524 ++fCurr;
525 }
526 }
527 }
528
initImplicit(const Attribute * attrs,int count)529 void AttributeSet::initImplicit(const Attribute* attrs, int count) {
530 fAttributes = attrs;
531 fRawCount = count;
532 fCount = 0;
533 fStride = 0;
534 for (int i = 0; i < count; ++i) {
535 if (attrs[i].isInitialized()) {
536 fCount++;
537 fStride += Attribute::AlignOffset(attrs[i].size());
538 }
539 }
540 }
541
initExplicit(const Attribute * attrs,int count,size_t stride)542 void AttributeSet::initExplicit(const Attribute* attrs, int count, size_t stride) {
543 fAttributes = attrs;
544 fRawCount = count;
545 fCount = count;
546 fStride = stride;
547 SkASSERT(Attribute::AlignOffset(fStride) == fStride);
548 for (int i = 0; i < count; ++i) {
549 SkASSERT(attrs[i].isInitialized());
550 SkASSERT(attrs[i].offset().has_value());
551 SkASSERT(Attribute::AlignOffset(*attrs[i].offset()) == *attrs[i].offset());
552 SkASSERT(*attrs[i].offset() + attrs[i].size() <= fStride);
553 }
554 }
555
addToKey(skgpu::KeyBuilder * b) const556 void AttributeSet::addToKey(skgpu::KeyBuilder* b) const {
557 int rawCount = SkAbs32(fRawCount);
558 b->addBits(16, SkToU16(this->stride()), "stride");
559 b->addBits(16, rawCount, "attribute count");
560 size_t implicitOffset = 0;
561 for (int i = 0; i < rawCount; ++i) {
562 const Attribute& attr = fAttributes[i];
563 b->appendComment(attr.isInitialized() ? attr.name() : "unusedAttr");
564 static_assert(kGrVertexAttribTypeCount < (1 << 8), "");
565 static_assert(kSkSLTypeCount < (1 << 8), "");
566 b->addBits(8, attr.isInitialized() ? attr.cpuType() : 0xff, "attrType");
567 b->addBits(8 , attr.isInitialized() ? static_cast<int>(attr.gpuType()) : 0xff,
568 "attrGpuType");
569 int16_t offset = -1;
570 if (attr.isInitialized()) {
571 if (attr.offset().has_value()) {
572 offset = *attr.offset();
573 } else {
574 offset = implicitOffset;
575 implicitOffset += Attribute::AlignOffset(attr.size());
576 }
577 }
578 b->addBits(16, static_cast<uint16_t>(offset), "attrOffset");
579 }
580 }
581
begin() const582 AttributeSet::Iter AttributeSet::begin() const { return Iter(fAttributes, fCount); }
end() const583 AttributeSet::Iter AttributeSet::end() const { return Iter(); }
584