1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/dawn/DawnGraphicsPipeline.h"
9
10 #include "include/gpu/graphite/TextureInfo.h"
11 #include "src/gpu/PipelineUtils.h"
12 #include "src/gpu/Swizzle.h"
13 #include "src/gpu/graphite/Attribute.h"
14 #include "src/gpu/graphite/ContextUtils.h"
15 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
16 #include "src/gpu/graphite/Log.h"
17 #include "src/gpu/graphite/RenderPassDesc.h"
18 #include "src/gpu/graphite/RendererProvider.h"
19 #include "src/gpu/graphite/UniformManager.h"
20 #include "src/gpu/graphite/dawn/DawnCaps.h"
21 #include "src/gpu/graphite/dawn/DawnErrorChecker.h"
22 #include "src/gpu/graphite/dawn/DawnGraphiteUtilsPriv.h"
23 #include "src/gpu/graphite/dawn/DawnResourceProvider.h"
24 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
25 #include "src/gpu/graphite/dawn/DawnUtilsPriv.h"
26 #include "src/sksl/SkSLProgramSettings.h"
27 #include "src/sksl/SkSLUtil.h"
28 #include "src/sksl/ir/SkSLProgram.h"
29
30 #include <vector>
31
32 namespace skgpu::graphite {
33
34 namespace {
35
attribute_type_to_dawn(VertexAttribType type)36 inline wgpu::VertexFormat attribute_type_to_dawn(VertexAttribType type) {
37 switch (type) {
38 case VertexAttribType::kFloat:
39 return wgpu::VertexFormat::Float32;
40 case VertexAttribType::kFloat2:
41 return wgpu::VertexFormat::Float32x2;
42 case VertexAttribType::kFloat3:
43 return wgpu::VertexFormat::Float32x3;
44 case VertexAttribType::kFloat4:
45 return wgpu::VertexFormat::Float32x4;
46 case VertexAttribType::kHalf2:
47 return wgpu::VertexFormat::Float16x2;
48 case VertexAttribType::kHalf4:
49 return wgpu::VertexFormat::Float16x4;
50 case VertexAttribType::kInt2:
51 return wgpu::VertexFormat::Sint32x2;
52 case VertexAttribType::kInt3:
53 return wgpu::VertexFormat::Sint32x3;
54 case VertexAttribType::kInt4:
55 return wgpu::VertexFormat::Sint32x4;
56 case VertexAttribType::kByte2:
57 return wgpu::VertexFormat::Sint8x2;
58 case VertexAttribType::kByte4:
59 return wgpu::VertexFormat::Sint8x4;
60 case VertexAttribType::kUByte2:
61 return wgpu::VertexFormat::Uint8x2;
62 case VertexAttribType::kUByte4:
63 return wgpu::VertexFormat::Uint8x4;
64 case VertexAttribType::kUByte4_norm:
65 return wgpu::VertexFormat::Unorm8x4;
66 case VertexAttribType::kShort2:
67 return wgpu::VertexFormat::Sint16x2;
68 case VertexAttribType::kShort4:
69 return wgpu::VertexFormat::Sint16x4;
70 case VertexAttribType::kUShort2:
71 return wgpu::VertexFormat::Uint16x2;
72 case VertexAttribType::kUShort2_norm:
73 return wgpu::VertexFormat::Unorm16x2;
74 case VertexAttribType::kInt:
75 return wgpu::VertexFormat::Sint32;
76 case VertexAttribType::kUInt:
77 return wgpu::VertexFormat::Uint32;
78 case VertexAttribType::kUShort4_norm:
79 return wgpu::VertexFormat::Unorm16x4;
80 case VertexAttribType::kHalf:
81 case VertexAttribType::kByte:
82 case VertexAttribType::kUByte:
83 case VertexAttribType::kUByte_norm:
84 case VertexAttribType::kUShort_norm:
85 // Not supported.
86 break;
87 }
88 SkUNREACHABLE;
89 }
90
compare_op_to_dawn(CompareOp op)91 wgpu::CompareFunction compare_op_to_dawn(CompareOp op) {
92 switch (op) {
93 case CompareOp::kAlways:
94 return wgpu::CompareFunction::Always;
95 case CompareOp::kNever:
96 return wgpu::CompareFunction::Never;
97 case CompareOp::kGreater:
98 return wgpu::CompareFunction::Greater;
99 case CompareOp::kGEqual:
100 return wgpu::CompareFunction::GreaterEqual;
101 case CompareOp::kLess:
102 return wgpu::CompareFunction::Less;
103 case CompareOp::kLEqual:
104 return wgpu::CompareFunction::LessEqual;
105 case CompareOp::kEqual:
106 return wgpu::CompareFunction::Equal;
107 case CompareOp::kNotEqual:
108 return wgpu::CompareFunction::NotEqual;
109 }
110 SkUNREACHABLE;
111 }
112
stencil_op_to_dawn(StencilOp op)113 wgpu::StencilOperation stencil_op_to_dawn(StencilOp op) {
114 switch (op) {
115 case StencilOp::kKeep:
116 return wgpu::StencilOperation::Keep;
117 case StencilOp::kZero:
118 return wgpu::StencilOperation::Zero;
119 case StencilOp::kReplace:
120 return wgpu::StencilOperation::Replace;
121 case StencilOp::kInvert:
122 return wgpu::StencilOperation::Invert;
123 case StencilOp::kIncWrap:
124 return wgpu::StencilOperation::IncrementWrap;
125 case StencilOp::kDecWrap:
126 return wgpu::StencilOperation::DecrementWrap;
127 case StencilOp::kIncClamp:
128 return wgpu::StencilOperation::IncrementClamp;
129 case StencilOp::kDecClamp:
130 return wgpu::StencilOperation::DecrementClamp;
131 }
132 SkUNREACHABLE;
133 }
134
stencil_face_to_dawn(DepthStencilSettings::Face face)135 wgpu::StencilFaceState stencil_face_to_dawn(DepthStencilSettings::Face face) {
136 wgpu::StencilFaceState state;
137 state.compare = compare_op_to_dawn(face.fCompareOp);
138 state.failOp = stencil_op_to_dawn(face.fStencilFailOp);
139 state.depthFailOp = stencil_op_to_dawn(face.fDepthFailOp);
140 state.passOp = stencil_op_to_dawn(face.fDepthStencilPassOp);
141 return state;
142 }
143
create_vertex_attributes(SkSpan<const Attribute> attrs,int shaderLocationOffset,std::vector<wgpu::VertexAttribute> * out)144 size_t create_vertex_attributes(SkSpan<const Attribute> attrs,
145 int shaderLocationOffset,
146 std::vector<wgpu::VertexAttribute>* out) {
147 SkASSERT(out && out->empty());
148 out->resize(attrs.size());
149 size_t vertexAttributeOffset = 0;
150 int attributeIndex = 0;
151 for (const auto& attr : attrs) {
152 wgpu::VertexAttribute& vertexAttribute = (*out)[attributeIndex];
153 vertexAttribute.format = attribute_type_to_dawn(attr.cpuType());
154 vertexAttribute.offset = vertexAttributeOffset;
155 vertexAttribute.shaderLocation = shaderLocationOffset + attributeIndex;
156 vertexAttributeOffset += attr.sizeAlign4();
157 attributeIndex++;
158 }
159 return vertexAttributeOffset;
160 }
161
162 // TODO: share this w/ Ganesh dawn backend?
blend_coeff_to_dawn_blend(const DawnCaps & caps,skgpu::BlendCoeff coeff)163 static wgpu::BlendFactor blend_coeff_to_dawn_blend(const DawnCaps& caps, skgpu::BlendCoeff coeff) {
164 #if defined(__EMSCRIPTEN__)
165 #define VALUE_IF_DSB_OR_ZERO(VALUE) wgpu::BlendFactor::Zero
166 #else
167 #define VALUE_IF_DSB_OR_ZERO(VALUE) \
168 ((caps.shaderCaps()->fDualSourceBlendingSupport) ? (VALUE) : wgpu::BlendFactor::Zero)
169 #endif
170 switch (coeff) {
171 case skgpu::BlendCoeff::kZero:
172 return wgpu::BlendFactor::Zero;
173 case skgpu::BlendCoeff::kOne:
174 return wgpu::BlendFactor::One;
175 case skgpu::BlendCoeff::kSC:
176 return wgpu::BlendFactor::Src;
177 case skgpu::BlendCoeff::kISC:
178 return wgpu::BlendFactor::OneMinusSrc;
179 case skgpu::BlendCoeff::kDC:
180 return wgpu::BlendFactor::Dst;
181 case skgpu::BlendCoeff::kIDC:
182 return wgpu::BlendFactor::OneMinusDst;
183 case skgpu::BlendCoeff::kSA:
184 return wgpu::BlendFactor::SrcAlpha;
185 case skgpu::BlendCoeff::kISA:
186 return wgpu::BlendFactor::OneMinusSrcAlpha;
187 case skgpu::BlendCoeff::kDA:
188 return wgpu::BlendFactor::DstAlpha;
189 case skgpu::BlendCoeff::kIDA:
190 return wgpu::BlendFactor::OneMinusDstAlpha;
191 case skgpu::BlendCoeff::kConstC:
192 return wgpu::BlendFactor::Constant;
193 case skgpu::BlendCoeff::kIConstC:
194 return wgpu::BlendFactor::OneMinusConstant;
195 case skgpu::BlendCoeff::kS2C:
196 return VALUE_IF_DSB_OR_ZERO(wgpu::BlendFactor::Src1);
197 case skgpu::BlendCoeff::kIS2C:
198 return VALUE_IF_DSB_OR_ZERO(wgpu::BlendFactor::OneMinusSrc1);
199 case skgpu::BlendCoeff::kS2A:
200 return VALUE_IF_DSB_OR_ZERO(wgpu::BlendFactor::Src1Alpha);
201 case skgpu::BlendCoeff::kIS2A:
202 return VALUE_IF_DSB_OR_ZERO(wgpu::BlendFactor::OneMinusSrc1Alpha);
203 case skgpu::BlendCoeff::kIllegal:
204 return wgpu::BlendFactor::Zero;
205 }
206 SkUNREACHABLE;
207 #undef VALUE_IF_DSB_OR_ZERO
208 }
209
blend_coeff_to_dawn_blend_for_alpha(const DawnCaps & caps,skgpu::BlendCoeff coeff)210 static wgpu::BlendFactor blend_coeff_to_dawn_blend_for_alpha(const DawnCaps& caps,
211 skgpu::BlendCoeff coeff) {
212 switch (coeff) {
213 // Force all srcColor used in alpha slot to alpha version.
214 case skgpu::BlendCoeff::kSC:
215 return wgpu::BlendFactor::SrcAlpha;
216 case skgpu::BlendCoeff::kISC:
217 return wgpu::BlendFactor::OneMinusSrcAlpha;
218 case skgpu::BlendCoeff::kDC:
219 return wgpu::BlendFactor::DstAlpha;
220 case skgpu::BlendCoeff::kIDC:
221 return wgpu::BlendFactor::OneMinusDstAlpha;
222 default:
223 return blend_coeff_to_dawn_blend(caps, coeff);
224 }
225 }
226
227 // TODO: share this w/ Ganesh Metal backend?
blend_equation_to_dawn_blend_op(skgpu::BlendEquation equation)228 static wgpu::BlendOperation blend_equation_to_dawn_blend_op(skgpu::BlendEquation equation) {
229 static const wgpu::BlendOperation gTable[] = {
230 wgpu::BlendOperation::Add, // skgpu::BlendEquation::kAdd
231 wgpu::BlendOperation::Subtract, // skgpu::BlendEquation::kSubtract
232 wgpu::BlendOperation::ReverseSubtract, // skgpu::BlendEquation::kReverseSubtract
233 };
234 static_assert(std::size(gTable) == (int)skgpu::BlendEquation::kFirstAdvanced);
235 static_assert(0 == (int)skgpu::BlendEquation::kAdd);
236 static_assert(1 == (int)skgpu::BlendEquation::kSubtract);
237 static_assert(2 == (int)skgpu::BlendEquation::kReverseSubtract);
238
239 SkASSERT((unsigned)equation < skgpu::kBlendEquationCnt);
240 return gTable[(int)equation];
241 }
242
243 struct AsyncPipelineCreationBase {
244 wgpu::RenderPipeline fRenderPipeline;
245 bool fFinished = false;
246 };
247
248 } // anonymous namespace
249
250 #if defined(__EMSCRIPTEN__)
251 // For wasm, we don't use async compilation.
252 struct DawnGraphicsPipeline::AsyncPipelineCreation : public AsyncPipelineCreationBase {};
253 #else
254 struct DawnGraphicsPipeline::AsyncPipelineCreation : public AsyncPipelineCreationBase {
255 wgpu::Future fFuture;
256 };
257 #endif
258
259 // static
Make(const DawnSharedContext * sharedContext,DawnResourceProvider * resourceProvider,const RuntimeEffectDictionary * runtimeDict,const GraphicsPipelineDesc & pipelineDesc,const RenderPassDesc & renderPassDesc)260 sk_sp<DawnGraphicsPipeline> DawnGraphicsPipeline::Make(const DawnSharedContext* sharedContext,
261 DawnResourceProvider* resourceProvider,
262 const RuntimeEffectDictionary* runtimeDict,
263 const GraphicsPipelineDesc& pipelineDesc,
264 const RenderPassDesc& renderPassDesc) {
265 const DawnCaps& caps = *static_cast<const DawnCaps*>(sharedContext->caps());
266 const auto& device = sharedContext->device();
267
268 SkSL::Program::Interface vsInterface, fsInterface;
269 SkSL::ProgramSettings settings;
270
271 settings.fForceNoRTFlip = true;
272
273 ShaderErrorHandler* errorHandler = caps.shaderErrorHandler();
274
275 const RenderStep* step = sharedContext->rendererProvider()->lookup(pipelineDesc.renderStepID());
276 const bool useStorageBuffers = caps.storageBufferPreferred();
277
278 std::string vsCode, fsCode;
279 wgpu::ShaderModule fsModule, vsModule;
280
281 // Some steps just render depth buffer but not color buffer, so the fragment
282 // shader is null.
283 UniquePaintParamsID paintID = pipelineDesc.paintParamsID();
284 FragSkSLInfo fsSkSLInfo = BuildFragmentSkSL(&caps,
285 sharedContext->shaderCodeDictionary(),
286 runtimeDict,
287 step,
288 paintID,
289 useStorageBuffers,
290 renderPassDesc.fWriteSwizzle);
291 std::string& fsSkSL = fsSkSLInfo.fSkSL;
292 const BlendInfo& blendInfo = fsSkSLInfo.fBlendInfo;
293 const bool localCoordsNeeded = fsSkSLInfo.fRequiresLocalCoords;
294 const int numTexturesAndSamplers = fsSkSLInfo.fNumTexturesAndSamplers;
295
296 bool hasFragmentSkSL = !fsSkSL.empty();
297 if (hasFragmentSkSL) {
298 if (!skgpu::SkSLToWGSL(caps.shaderCaps(),
299 fsSkSL,
300 SkSL::ProgramKind::kGraphiteFragment,
301 settings,
302 &fsCode,
303 &fsInterface,
304 errorHandler)) {
305 return {};
306 }
307 if (!DawnCompileWGSLShaderModule(sharedContext, fsSkSLInfo.fLabel.c_str(), fsCode,
308 &fsModule, errorHandler)) {
309 return {};
310 }
311 }
312
313 VertSkSLInfo vsSkSLInfo = BuildVertexSkSL(caps.resourceBindingRequirements(),
314 step,
315 useStorageBuffers,
316 localCoordsNeeded);
317 const std::string& vsSkSL = vsSkSLInfo.fSkSL;
318 if (!skgpu::SkSLToWGSL(caps.shaderCaps(),
319 vsSkSL,
320 SkSL::ProgramKind::kGraphiteVertex,
321 settings,
322 &vsCode,
323 &vsInterface,
324 errorHandler)) {
325 return {};
326 }
327 if (!DawnCompileWGSLShaderModule(sharedContext, vsSkSLInfo.fLabel.c_str(), vsCode,
328 &vsModule, errorHandler)) {
329 return {};
330 }
331
332 std::string pipelineLabel =
333 GetPipelineLabel(sharedContext->shaderCodeDictionary(), renderPassDesc, step, paintID);
334 wgpu::RenderPipelineDescriptor descriptor;
335 // Always set the label for pipelines, dawn may need it for tracing.
336 descriptor.label = pipelineLabel.c_str();
337
338 // Fragment state
339 skgpu::BlendEquation equation = blendInfo.fEquation;
340 skgpu::BlendCoeff srcCoeff = blendInfo.fSrcBlend;
341 skgpu::BlendCoeff dstCoeff = blendInfo.fDstBlend;
342 bool blendOn = !skgpu::BlendShouldDisable(equation, srcCoeff, dstCoeff);
343
344 wgpu::BlendState blend;
345 if (blendOn) {
346 blend.color.operation = blend_equation_to_dawn_blend_op(equation);
347 blend.color.srcFactor = blend_coeff_to_dawn_blend(caps, srcCoeff);
348 blend.color.dstFactor = blend_coeff_to_dawn_blend(caps, dstCoeff);
349 blend.alpha.operation = blend_equation_to_dawn_blend_op(equation);
350 blend.alpha.srcFactor = blend_coeff_to_dawn_blend_for_alpha(caps, srcCoeff);
351 blend.alpha.dstFactor = blend_coeff_to_dawn_blend_for_alpha(caps, dstCoeff);
352 }
353
354 wgpu::ColorTargetState colorTarget;
355 colorTarget.format =
356 renderPassDesc.fColorAttachment.fTextureInfo.dawnTextureSpec().getViewFormat();
357 colorTarget.blend = blendOn ? &blend : nullptr;
358 colorTarget.writeMask = blendInfo.fWritesColor && hasFragmentSkSL ? wgpu::ColorWriteMask::All
359 : wgpu::ColorWriteMask::None;
360
361 #if !defined(__EMSCRIPTEN__)
362 const bool loadMsaaFromResolve =
363 renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
364 renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
365 // Special case: a render pass loading resolve texture requires additional settings for the
366 // pipeline to make it compatible.
367 wgpu::ColorTargetStateExpandResolveTextureDawn pipelineMSAALoadResolveTextureDesc;
368 if (loadMsaaFromResolve && sharedContext->dawnCaps()->resolveTextureLoadOp().has_value()) {
369 SkASSERT(device.HasFeature(wgpu::FeatureName::DawnLoadResolveTexture));
370 colorTarget.nextInChain = &pipelineMSAALoadResolveTextureDesc;
371 pipelineMSAALoadResolveTextureDesc.enabled = true;
372 }
373 #endif
374
375 wgpu::FragmentState fragment;
376 // Dawn doesn't allow having a color attachment but without fragment shader, so have to use a
377 // noop fragment shader, if fragment shader is null.
378 fragment.module = hasFragmentSkSL ? std::move(fsModule) : sharedContext->noopFragment();
379 fragment.entryPoint = "main";
380 fragment.targetCount = 1;
381 fragment.targets = &colorTarget;
382 descriptor.fragment = &fragment;
383
384 // Depth stencil state
385 const auto& depthStencilSettings = step->depthStencilSettings();
386 SkASSERT(depthStencilSettings.fDepthTestEnabled ||
387 depthStencilSettings.fDepthCompareOp == CompareOp::kAlways);
388 wgpu::DepthStencilState depthStencil;
389 if (renderPassDesc.fDepthStencilAttachment.fTextureInfo.isValid()) {
390 wgpu::TextureFormat dsFormat =
391 renderPassDesc.fDepthStencilAttachment.fTextureInfo.dawnTextureSpec()
392 .getViewFormat();
393 depthStencil.format =
394 DawnFormatIsDepthOrStencil(dsFormat) ? dsFormat : wgpu::TextureFormat::Undefined;
395 if (depthStencilSettings.fDepthTestEnabled) {
396 depthStencil.depthWriteEnabled = depthStencilSettings.fDepthWriteEnabled;
397 }
398 depthStencil.depthCompare = compare_op_to_dawn(depthStencilSettings.fDepthCompareOp);
399
400 // Dawn validation fails if the stencil state is non-default and the
401 // format doesn't have the stencil aspect.
402 if (DawnFormatIsStencil(dsFormat) && depthStencilSettings.fStencilTestEnabled) {
403 depthStencil.stencilFront = stencil_face_to_dawn(depthStencilSettings.fFrontStencil);
404 depthStencil.stencilBack = stencil_face_to_dawn(depthStencilSettings.fBackStencil);
405 depthStencil.stencilReadMask = depthStencilSettings.fFrontStencil.fReadMask;
406 depthStencil.stencilWriteMask = depthStencilSettings.fFrontStencil.fWriteMask;
407 }
408
409 descriptor.depthStencil = &depthStencil;
410 }
411
412 // Pipeline layout
413 BindGroupLayouts groupLayouts;
414 {
415 groupLayouts[0] = resourceProvider->getOrCreateUniformBuffersBindGroupLayout();
416 if (!groupLayouts[0]) {
417 return {};
418 }
419
420 bool hasFragmentSamplers = hasFragmentSkSL && numTexturesAndSamplers > 0;
421 if (hasFragmentSamplers) {
422 if (numTexturesAndSamplers == 2) {
423 // Common case: single texture + sampler.
424 groupLayouts[1] =
425 resourceProvider->getOrCreateSingleTextureSamplerBindGroupLayout();
426 } else {
427 std::vector<wgpu::BindGroupLayoutEntry> entries(numTexturesAndSamplers);
428 for (int i = 0; i < numTexturesAndSamplers;) {
429 entries[i].binding = static_cast<uint32_t>(i);
430 entries[i].visibility = wgpu::ShaderStage::Fragment;
431 entries[i].sampler.type = wgpu::SamplerBindingType::Filtering;
432 ++i;
433 entries[i].binding = i;
434 entries[i].visibility = wgpu::ShaderStage::Fragment;
435 entries[i].texture.sampleType = wgpu::TextureSampleType::Float;
436 entries[i].texture.viewDimension = wgpu::TextureViewDimension::e2D;
437 entries[i].texture.multisampled = false;
438 ++i;
439 }
440
441 wgpu::BindGroupLayoutDescriptor groupLayoutDesc;
442 if (sharedContext->caps()->setBackendLabels()) {
443 groupLayoutDesc.label = vsSkSLInfo.fLabel.c_str();
444 }
445 groupLayoutDesc.entryCount = entries.size();
446 groupLayoutDesc.entries = entries.data();
447 groupLayouts[1] = device.CreateBindGroupLayout(&groupLayoutDesc);
448 }
449 if (!groupLayouts[1]) {
450 return {};
451 }
452 }
453
454 wgpu::PipelineLayoutDescriptor layoutDesc;
455 if (sharedContext->caps()->setBackendLabels()) {
456 layoutDesc.label = fsSkSLInfo.fLabel.c_str();
457 }
458 layoutDesc.bindGroupLayoutCount =
459 hasFragmentSamplers ? groupLayouts.size() : groupLayouts.size() - 1;
460 layoutDesc.bindGroupLayouts = groupLayouts.data();
461 auto layout = device.CreatePipelineLayout(&layoutDesc);
462 if (!layout) {
463 return {};
464 }
465 descriptor.layout = std::move(layout);
466 }
467
468 // Vertex state
469 std::array<wgpu::VertexBufferLayout, kNumVertexBuffers> vertexBufferLayouts;
470 // Vertex buffer layout
471 std::vector<wgpu::VertexAttribute> vertexAttributes;
472 {
473 auto arrayStride = create_vertex_attributes(step->vertexAttributes(),
474 0,
475 &vertexAttributes);
476 auto& layout = vertexBufferLayouts[kVertexBufferIndex];
477 if (arrayStride) {
478 layout.arrayStride = arrayStride;
479 layout.stepMode = wgpu::VertexStepMode::Vertex;
480 layout.attributeCount = vertexAttributes.size();
481 layout.attributes = vertexAttributes.data();
482 } else {
483 layout.arrayStride = 0;
484 layout.stepMode = wgpu::VertexStepMode::VertexBufferNotUsed;
485 layout.attributeCount = 0;
486 layout.attributes = nullptr;
487 }
488 }
489
490 // Instance buffer layout
491 std::vector<wgpu::VertexAttribute> instanceAttributes;
492 {
493 auto arrayStride = create_vertex_attributes(step->instanceAttributes(),
494 step->vertexAttributes().size(),
495 &instanceAttributes);
496 auto& layout = vertexBufferLayouts[kInstanceBufferIndex];
497 if (arrayStride) {
498 layout.arrayStride = arrayStride;
499 layout.stepMode = wgpu::VertexStepMode::Instance;
500 layout.attributeCount = instanceAttributes.size();
501 layout.attributes = instanceAttributes.data();
502 } else {
503 layout.arrayStride = 0;
504 layout.stepMode = wgpu::VertexStepMode::VertexBufferNotUsed;
505 layout.attributeCount = 0;
506 layout.attributes = nullptr;
507 }
508 }
509
510 auto& vertex = descriptor.vertex;
511 vertex.module = std::move(vsModule);
512 vertex.entryPoint = "main";
513 vertex.constantCount = 0;
514 vertex.constants = nullptr;
515 vertex.bufferCount = vertexBufferLayouts.size();
516 vertex.buffers = vertexBufferLayouts.data();
517
518 // Other state
519 descriptor.primitive.frontFace = wgpu::FrontFace::CCW;
520 descriptor.primitive.cullMode = wgpu::CullMode::None;
521 switch (step->primitiveType()) {
522 case PrimitiveType::kTriangles:
523 descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
524 break;
525 case PrimitiveType::kTriangleStrip:
526 descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
527 descriptor.primitive.stripIndexFormat = wgpu::IndexFormat::Uint16;
528 break;
529 case PrimitiveType::kPoints:
530 descriptor.primitive.topology = wgpu::PrimitiveTopology::PointList;
531 break;
532 }
533
534 // Multisampled state
535 descriptor.multisample.count = renderPassDesc.fSampleCount;
536 descriptor.multisample.mask = 0xFFFFFFFF;
537 descriptor.multisample.alphaToCoverageEnabled = false;
538
539 auto asyncCreation = std::make_unique<AsyncPipelineCreation>();
540
541 if (caps.useAsyncPipelineCreation()) {
542 #if defined(__EMSCRIPTEN__)
543 // We shouldn't use CreateRenderPipelineAsync in wasm.
544 SKGPU_LOG_F("CreateRenderPipelineAsync shouldn't be used in WASM");
545 #else
546 asyncCreation->fFuture = device.CreateRenderPipelineAsync(
547 &descriptor,
548 wgpu::CallbackMode::WaitAnyOnly,
549 [asyncCreationPtr = asyncCreation.get()](wgpu::CreatePipelineAsyncStatus status,
550 wgpu::RenderPipeline pipeline,
551 char const* message) {
552 if (status != wgpu::CreatePipelineAsyncStatus::Success) {
553 SKGPU_LOG_E("Failed to create render pipeline (%d): %s",
554 static_cast<int>(status),
555 message);
556 // invalidate AsyncPipelineCreation pointer to signal that this pipeline has
557 // failed.
558 asyncCreationPtr->fRenderPipeline = nullptr;
559 } else {
560 asyncCreationPtr->fRenderPipeline = std::move(pipeline);
561 }
562
563 asyncCreationPtr->fFinished = true;
564 });
565 #endif
566 } else {
567 std::optional<DawnErrorChecker> errorChecker;
568 if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
569 errorChecker.emplace(sharedContext);
570 }
571 asyncCreation->fRenderPipeline = device.CreateRenderPipeline(&descriptor);
572 asyncCreation->fFinished = true;
573
574 if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
575 asyncCreation->fRenderPipeline = nullptr;
576 }
577 }
578 #if defined(GRAPHITE_TEST_UTILS)
579 GraphicsPipeline::PipelineInfo pipelineInfo = {pipelineDesc.renderStepID(),
580 pipelineDesc.paintParamsID(),
581 std::move(vsSkSL),
582 std::move(fsSkSL),
583 std::move(vsCode),
584 std::move(fsCode)};
585 GraphicsPipeline::PipelineInfo* pipelineInfoPtr = &pipelineInfo;
586 #else
587 GraphicsPipeline::PipelineInfo* pipelineInfoPtr = nullptr;
588 #endif
589
590 return sk_sp<DawnGraphicsPipeline>(
591 new DawnGraphicsPipeline(sharedContext,
592 pipelineInfoPtr,
593 std::move(asyncCreation),
594 std::move(groupLayouts),
595 step->primitiveType(),
596 depthStencilSettings.fStencilReferenceValue,
597 /*hasStepUniforms=*/!step->uniforms().empty(),
598 /*hasPaintUniforms=*/fsSkSLInfo.fNumPaintUniforms > 0,
599 /*hasGradientbuffer=*/fsSkSLInfo.fHasGradientBuffer,
600 numTexturesAndSamplers));
601 }
602
DawnGraphicsPipeline(const skgpu::graphite::SharedContext * sharedContext,PipelineInfo * pipelineInfo,std::unique_ptr<AsyncPipelineCreation> asyncCreationInfo,BindGroupLayouts groupLayouts,PrimitiveType primitiveType,uint32_t refValue,bool hasStepUniforms,bool hasPaintUniforms,bool hasGradientBuffer,int numFragmentTexturesAndSamplers)603 DawnGraphicsPipeline::DawnGraphicsPipeline(const skgpu::graphite::SharedContext* sharedContext,
604 PipelineInfo* pipelineInfo,
605 std::unique_ptr<AsyncPipelineCreation> asyncCreationInfo,
606 BindGroupLayouts groupLayouts,
607 PrimitiveType primitiveType,
608 uint32_t refValue,
609 bool hasStepUniforms,
610 bool hasPaintUniforms,
611 bool hasGradientBuffer,
612 int numFragmentTexturesAndSamplers)
613 : GraphicsPipeline(sharedContext, pipelineInfo)
614 , fAsyncPipelineCreation(std::move(asyncCreationInfo))
615 , fGroupLayouts(std::move(groupLayouts))
616 , fPrimitiveType(primitiveType)
617 , fStencilReferenceValue(refValue)
618 , fHasStepUniforms(hasStepUniforms)
619 , fHasPaintUniforms(hasPaintUniforms)
620 , fHasGradientBuffer(hasGradientBuffer)
621 , fNumFragmentTexturesAndSamplers(numFragmentTexturesAndSamplers) {}
622
~DawnGraphicsPipeline()623 DawnGraphicsPipeline::~DawnGraphicsPipeline() {
624 this->freeGpuData();
625 }
626
freeGpuData()627 void DawnGraphicsPipeline::freeGpuData() {
628 // Wait for async creation to finish before we can destroy this object.
629 (void)this->dawnRenderPipeline();
630 fAsyncPipelineCreation = nullptr;
631 }
632
dawnRenderPipeline() const633 const wgpu::RenderPipeline& DawnGraphicsPipeline::dawnRenderPipeline() const {
634 if (!fAsyncPipelineCreation) {
635 static const wgpu::RenderPipeline kNullPipeline = nullptr;
636 return kNullPipeline;
637 }
638 if (fAsyncPipelineCreation->fFinished) {
639 return fAsyncPipelineCreation->fRenderPipeline;
640 }
641 #if defined(__EMSCRIPTEN__)
642 // We shouldn't use CreateRenderPipelineAsync in wasm.
643 SKGPU_LOG_F("CreateRenderPipelineAsync shouldn't be used in WASM");
644 #else
645 wgpu::FutureWaitInfo waitInfo{};
646 waitInfo.future = fAsyncPipelineCreation->fFuture;
647 const auto& instance = static_cast<const DawnSharedContext*>(sharedContext())
648 ->device()
649 .GetAdapter()
650 .GetInstance();
651
652 [[maybe_unused]] auto status =
653 instance.WaitAny(1, &waitInfo, /*timeoutNS=*/std::numeric_limits<uint64_t>::max());
654 SkASSERT(status == wgpu::WaitStatus::Success);
655 SkASSERT(waitInfo.completed);
656 #endif
657
658 return fAsyncPipelineCreation->fRenderPipeline;
659 }
660
661 } // namespace skgpu::graphite
662