• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/dawn/DawnComputePipeline.h"
9 
10 #include "src/gpu/PipelineUtils.h"
11 #include "src/gpu/graphite/Caps.h"
12 #include "src/gpu/graphite/ComputePipelineDesc.h"
13 #include "src/gpu/graphite/ContextUtils.h"
14 #include "src/gpu/graphite/dawn/DawnAsyncWait.h"
15 #include "src/gpu/graphite/dawn/DawnErrorChecker.h"
16 #include "src/gpu/graphite/dawn/DawnGraphiteUtilsPriv.h"
17 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
18 #include "src/gpu/graphite/dawn/DawnUtilsPriv.h"
19 #include "src/sksl/SkSLProgramSettings.h"
20 
21 namespace skgpu::graphite {
22 namespace {
23 
24 struct ShaderInfo {
25     wgpu::ShaderModule fModule;
26     std::string fEntryPoint;
27 
isValidskgpu::graphite::__anonb27bcaf80111::ShaderInfo28     bool isValid() const { return static_cast<bool>(fModule); }
29 };
30 
compile_shader_module(const DawnSharedContext * sharedContext,const ComputePipelineDesc & pipelineDesc)31 static ShaderInfo compile_shader_module(const DawnSharedContext* sharedContext,
32                                         const ComputePipelineDesc& pipelineDesc) {
33     SkASSERT(sharedContext);
34 
35     ShaderInfo info;
36 
37     const Caps* caps = sharedContext->caps();
38     const ComputeStep* step = pipelineDesc.computeStep();
39     ShaderErrorHandler* errorHandler = caps->shaderErrorHandler();
40 
41     if (step->supportsNativeShader()) {
42         auto nativeShader = step->nativeShaderSource(ComputeStep::NativeShaderFormat::kWGSL);
43         if (!DawnCompileWGSLShaderModule(sharedContext,
44                                          step->name(),
45                                          std::string(nativeShader.fSource),
46                                          &info.fModule,
47                                          errorHandler)) {
48             return {};
49         }
50         info.fEntryPoint = std::move(nativeShader.fEntryPoint);
51     } else {
52         std::string wgsl;
53         SkSL::Program::Interface interface;
54         SkSL::ProgramSettings settings;
55 
56         std::string sksl = BuildComputeSkSL(caps, step);
57         if (skgpu::SkSLToWGSL(caps->shaderCaps(),
58                               sksl,
59                               SkSL::ProgramKind::kCompute,
60                               settings,
61                               &wgsl,
62                               &interface,
63                               errorHandler)) {
64             if (!DawnCompileWGSLShaderModule(sharedContext, step->name(), wgsl,
65                                              &info.fModule, errorHandler)) {
66                 return {};
67             }
68             info.fEntryPoint = "main";
69         }
70     }
71 
72     return info;
73 }
74 
75 }  // namespace
76 
Make(const DawnSharedContext * sharedContext,const ComputePipelineDesc & pipelineDesc)77 sk_sp<DawnComputePipeline> DawnComputePipeline::Make(const DawnSharedContext* sharedContext,
78                                                      const ComputePipelineDesc& pipelineDesc) {
79     auto [shaderModule, entryPointName] = compile_shader_module(sharedContext, pipelineDesc);
80     if (!shaderModule) {
81         return nullptr;
82     }
83 
84     const ComputeStep* step = pipelineDesc.computeStep();
85 
86     // ComputeStep resources are listed in the order that they must be declared in the shader. This
87     // order is then used for the index assignment using an "indexed by order" policy that has
88     // backend-specific semantics. The semantics on Dawn is to assign the index number in increasing
89     // order.
90     //
91     // All resources get assigned to a single bind group at index 0.
92     SkASSERT(!sharedContext->caps()->resourceBindingRequirements().fDistinctIndexRanges);
93     std::vector<wgpu::BindGroupLayoutEntry> bindGroupLayoutEntries;
94     auto resources = step->resources();
95 
96     // Sampled textures count as 2 resources (1 texture and 1 sampler). All other types count as 1.
97     size_t resourceCount = 0;
98     for (const ComputeStep::ResourceDesc& r : resources) {
99         resourceCount++;
100         if (r.fType == ComputeStep::ResourceType::kSampledTexture) {
101             resourceCount++;
102         }
103     }
104 
105     bindGroupLayoutEntries.reserve(resourceCount);
106     int declarationIndex = 0;
107     for (const ComputeStep::ResourceDesc& r : resources) {
108         bindGroupLayoutEntries.emplace_back();
109         uint32_t bindingIndex = bindGroupLayoutEntries.size() - 1;
110 
111         wgpu::BindGroupLayoutEntry& entry = bindGroupLayoutEntries.back();
112         entry.binding = bindingIndex;
113         entry.visibility = wgpu::ShaderStage::Compute;
114         switch (r.fType) {
115             case ComputeStep::ResourceType::kUniformBuffer:
116                 entry.buffer.type = wgpu::BufferBindingType::Uniform;
117                 break;
118             case ComputeStep::ResourceType::kStorageBuffer:
119             case ComputeStep::ResourceType::kIndirectBuffer:
120                 entry.buffer.type = wgpu::BufferBindingType::Storage;
121                 break;
122             case ComputeStep::ResourceType::kReadOnlyStorageBuffer:
123                 entry.buffer.type = wgpu::BufferBindingType::ReadOnlyStorage;
124                 break;
125             case ComputeStep::ResourceType::kReadOnlyTexture:
126                 entry.texture.sampleType = wgpu::TextureSampleType::Float;
127                 entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
128                 break;
129             case ComputeStep::ResourceType::kWriteOnlyStorageTexture: {
130                 entry.storageTexture.access = wgpu::StorageTextureAccess::WriteOnly;
131                 entry.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
132 
133                 auto [_, colorType] = step->calculateTextureParameters(declarationIndex, r);
134                 auto textureInfo = sharedContext->caps()->getDefaultStorageTextureInfo(colorType);
135                 entry.storageTexture.format = textureInfo.dawnTextureSpec().getViewFormat();
136                 break;
137             }
138             case ComputeStep::ResourceType::kSampledTexture: {
139                 entry.sampler.type = wgpu::SamplerBindingType::Filtering;
140 
141                 // Add an additional entry for the texture.
142                 bindGroupLayoutEntries.emplace_back();
143                 wgpu::BindGroupLayoutEntry& texEntry = bindGroupLayoutEntries.back();
144                 texEntry.binding = bindingIndex + 1;
145                 texEntry.visibility = wgpu::ShaderStage::Compute;
146                 texEntry.texture.sampleType = wgpu::TextureSampleType::Float;
147                 texEntry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
148                 break;
149             }
150         }
151         declarationIndex++;
152     }
153 
154     const wgpu::Device& device = sharedContext->device();
155 
156     // All resources of a ComputeStep currently get assigned to a single bind group at index 0.
157     wgpu::BindGroupLayoutDescriptor bindGroupLayoutDesc;
158     bindGroupLayoutDesc.entryCount = bindGroupLayoutEntries.size();
159     bindGroupLayoutDesc.entries = bindGroupLayoutEntries.data();
160     wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(&bindGroupLayoutDesc);
161     if (!bindGroupLayout) {
162         return nullptr;
163     }
164 
165     wgpu::PipelineLayoutDescriptor pipelineLayoutDesc;
166     if (sharedContext->caps()->setBackendLabels()) {
167         pipelineLayoutDesc.label = step->name();
168     }
169     pipelineLayoutDesc.bindGroupLayoutCount = 1;
170     pipelineLayoutDesc.bindGroupLayouts = &bindGroupLayout;
171     wgpu::PipelineLayout layout = device.CreatePipelineLayout(&pipelineLayoutDesc);
172     if (!layout) {
173         return nullptr;
174     }
175 
176     wgpu::ComputePipelineDescriptor descriptor;
177     // Always set the label for pipelines, dawn may need it for tracing.
178     descriptor.label = step->name();
179     descriptor.compute.module = std::move(shaderModule);
180     descriptor.compute.entryPoint = entryPointName.c_str();
181     descriptor.layout = std::move(layout);
182 
183     std::optional<DawnErrorChecker> errorChecker;
184     if (sharedContext->dawnCaps()->allowScopedErrorChecks()) {
185         errorChecker.emplace(sharedContext);
186     }
187     wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&descriptor);
188     SkASSERT(pipeline);
189     if (errorChecker.has_value() && errorChecker->popErrorScopes() != DawnErrorType::kNoError) {
190         return nullptr;
191     }
192 
193     return sk_sp<DawnComputePipeline>(new DawnComputePipeline(
194             sharedContext, std::move(pipeline), std::move(bindGroupLayout)));
195 }
196 
DawnComputePipeline(const SharedContext * sharedContext,wgpu::ComputePipeline pso,wgpu::BindGroupLayout groupLayout)197 DawnComputePipeline::DawnComputePipeline(const SharedContext* sharedContext,
198                                          wgpu::ComputePipeline pso,
199                                          wgpu::BindGroupLayout groupLayout)
200         : ComputePipeline(sharedContext)
201         , fPipeline(std::move(pso))
202         , fGroupLayout(std::move(groupLayout)) {}
203 
freeGpuData()204 void DawnComputePipeline::freeGpuData() { fPipeline = nullptr; }
205 
206 }  // namespace skgpu::graphite
207