• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/dawn/GrDawnGpu.h"
9 
10 #include "include/core/SkColorSpace.h"
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrContextOptions.h"
14 #include "include/gpu/GrDirectContext.h"
15 #include "include/private/SkSLProgramKind.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/gpu/ganesh/GrDataUtils.h"
18 #include "src/gpu/ganesh/GrDirectContextPriv.h"
19 #include "src/gpu/ganesh/GrGeometryProcessor.h"
20 #include "src/gpu/ganesh/GrGpuResourceCacheAccess.h"
21 #include "src/gpu/ganesh/GrPipeline.h"
22 #include "src/gpu/ganesh/GrRenderTarget.h"
23 #include "src/gpu/ganesh/GrSemaphore.h"
24 #include "src/gpu/ganesh/GrStencilSettings.h"
25 #include "src/gpu/ganesh/GrTexture.h"
26 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
27 #include "src/gpu/ganesh/dawn/GrDawnAsyncWait.h"
28 #include "src/gpu/ganesh/dawn/GrDawnAttachment.h"
29 #include "src/gpu/ganesh/dawn/GrDawnBuffer.h"
30 #include "src/gpu/ganesh/dawn/GrDawnCaps.h"
31 #include "src/gpu/ganesh/dawn/GrDawnOpsRenderPass.h"
32 #include "src/gpu/ganesh/dawn/GrDawnProgramBuilder.h"
33 #include "src/gpu/ganesh/dawn/GrDawnRenderTarget.h"
34 #include "src/gpu/ganesh/dawn/GrDawnTexture.h"
35 #include "src/gpu/ganesh/dawn/GrDawnUtil.h"
36 #include "src/sksl/SkSLProgramSettings.h"
37 
38 #include "src/base/SkAutoMalloc.h"
39 #include "src/core/SkMipmap.h"
40 #include "src/sksl/SkSLCompiler.h"
41 
42 #if !defined(SK_BUILD_FOR_WIN)
43 #include <unistd.h>
44 #endif // !defined(SK_BUILD_FOR_WIN)
45 
46 static const int kMaxRenderPipelineEntries = 1024;
47 
to_dawn_filter_mode(GrSamplerState::Filter filter)48 static wgpu::FilterMode to_dawn_filter_mode(GrSamplerState::Filter filter) {
49     switch (filter) {
50         case GrSamplerState::Filter::kNearest:
51             return wgpu::FilterMode::Nearest;
52         case GrSamplerState::Filter::kLinear:
53             return wgpu::FilterMode::Linear;
54         default:
55             SkASSERT(!"unsupported filter mode");
56             return wgpu::FilterMode::Nearest;
57     }
58 }
59 
to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode)60 static wgpu::FilterMode to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode) {
61     switch (mode) {
62         case GrSamplerState::MipmapMode::kNone:
63             // Fall-through (Dawn does not have an equivalent for "None")
64         case GrSamplerState::MipmapMode::kNearest:
65             return wgpu::FilterMode::Nearest;
66         case GrSamplerState::MipmapMode::kLinear:
67             return wgpu::FilterMode::Linear;
68         default:
69             SkASSERT(!"unsupported filter mode");
70             return wgpu::FilterMode::Nearest;
71     }
72 }
73 
to_dawn_address_mode(GrSamplerState::WrapMode wrapMode)74 static wgpu::AddressMode to_dawn_address_mode(GrSamplerState::WrapMode wrapMode) {
75     switch (wrapMode) {
76         case GrSamplerState::WrapMode::kClamp:
77             return wgpu::AddressMode::ClampToEdge;
78         case GrSamplerState::WrapMode::kRepeat:
79             return wgpu::AddressMode::Repeat;
80         case GrSamplerState::WrapMode::kMirrorRepeat:
81             return wgpu::AddressMode::MirrorRepeat;
82         case GrSamplerState::WrapMode::kClampToBorder:
83             SkASSERT(!"unsupported address mode");
84     }
85     SkASSERT(!"unsupported address mode");
86     return wgpu::AddressMode::ClampToEdge;
87 }
88 
Make(const wgpu::Device & device,const GrContextOptions & options,GrDirectContext * direct)89 sk_sp<GrGpu> GrDawnGpu::Make(const wgpu::Device& device,
90                              const GrContextOptions& options, GrDirectContext* direct) {
91     if (!device) {
92         return nullptr;
93     }
94 
95     return sk_sp<GrGpu>(new GrDawnGpu(direct, options, device));
96 }
97 
98 ////////////////////////////////////////////////////////////////////////////////
99 
PendingMapAsyncRequests(const wgpu::Device & device)100 GrDawnGpu::PendingMapAsyncRequests::PendingMapAsyncRequests(const wgpu::Device& device)
101         : wait_(device) {}
102 
addOne()103 void GrDawnGpu::PendingMapAsyncRequests::addOne() {
104     if (fCount == 0) {
105         wait_.reset();
106     }
107     fCount++;
108 }
109 
completeOne()110 void GrDawnGpu::PendingMapAsyncRequests::completeOne() {
111     if (fCount == 1) {
112         wait_.signal();
113     }
114     if (fCount > 0) {
115         fCount--;
116     }
117 }
118 
waitUntilDone() const119 void GrDawnGpu::PendingMapAsyncRequests::waitUntilDone() const {
120     if (fCount == 0) {
121         return;
122     }
123     wait_.busyWait();
124     SkASSERT(fCount == 0);
125 }
126 
GrDawnGpu(GrDirectContext * direct,const GrContextOptions & options,const wgpu::Device & device)127 GrDawnGpu::GrDawnGpu(GrDirectContext* direct,
128                      const GrContextOptions& options,
129                      const wgpu::Device& device)
130         : INHERITED(direct)
131         , fDevice(device)
132         , fQueue(device.GetQueue())
133         , fUniformRingBuffer(this, wgpu::BufferUsage::Uniform)
134         , fStagingBufferManager(this)
135         , fPendingMapAsyncRequests(device)
136         , fRenderPipelineCache(kMaxRenderPipelineEntries)
137         , fFinishCallbacks(this) {
138     this->initCapsAndCompiler(sk_make_sp<GrDawnCaps>(options));
139     device.SetUncapturedErrorCallback(
140             [](WGPUErrorType type, char const* message, void*) {
141                 SkDebugf("GrDawnGpu: ERROR type %u, msg: %s", type, message);
142             },
143             nullptr);
144 }
145 
~GrDawnGpu()146 GrDawnGpu::~GrDawnGpu() { this->finishOutstandingGpuWork(); }
147 
disconnect(DisconnectType type)148 void GrDawnGpu::disconnect(DisconnectType type) {
149     if (DisconnectType::kCleanup == type) {
150         this->finishOutstandingGpuWork();
151     }
152     fStagingBufferManager.reset();
153     fQueue = nullptr;
154     fDevice = nullptr;
155     INHERITED::disconnect(type);
156 }
157 
pipelineBuilder()158 GrThreadSafePipelineBuilder* GrDawnGpu::pipelineBuilder() {
159     return nullptr;
160 }
161 
refPipelineBuilder()162 sk_sp<GrThreadSafePipelineBuilder> GrDawnGpu::refPipelineBuilder() {
163     return nullptr;
164 }
165 
166 ///////////////////////////////////////////////////////////////////////////////
167 
onGetOpsRenderPass(GrRenderTarget * rt,bool,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)168 GrOpsRenderPass* GrDawnGpu::onGetOpsRenderPass(
169         GrRenderTarget* rt,
170         bool /*useMSAASurface*/,
171         GrAttachment*,
172         GrSurfaceOrigin origin,
173         const SkIRect& bounds,
174         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
175         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
176         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
177         GrXferBarrierFlags renderPassXferBarriers) {
178     fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
179     return fOpsRenderPass.get();
180 }
181 
182 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern)183 sk_sp<GrGpuBuffer> GrDawnGpu::onCreateBuffer(size_t size,
184                                              GrGpuBufferType type,
185                                              GrAccessPattern accessPattern) {
186     return GrDawnBuffer::Make(this, size, type, accessPattern,
187                               /*label=*/"DawnGpu_GetOpsRenderPass");
188 }
189 
190 ////////////////////////////////////////////////////////////////////////////////
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)191 bool GrDawnGpu::onWritePixels(GrSurface* surface,
192                               SkIRect rect,
193                               GrColorType surfaceColorType,
194                               GrColorType srcColorType,
195                               const GrMipLevel texels[],
196                               int mipLevelCount,
197                               bool prepForTexSampling) {
198     GrDawnTexture* texture = static_cast<GrDawnTexture*>(surface->asTexture());
199     if (!texture) {
200         return false;
201     }
202     this->uploadTextureData(srcColorType, texels, mipLevelCount, rect, texture->texture());
203     if (mipLevelCount < texture->maxMipmapLevel() + 1) {
204         texture->markMipmapsDirty();
205     }
206     return true;
207 }
208 
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)209 bool GrDawnGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
210                                              size_t srcOffset,
211                                              sk_sp<GrGpuBuffer> dst,
212                                              size_t dstOffset,
213                                              size_t size) {
214     // skbug.com/13453
215     SkASSERT(!"unimplemented");
216     return false;
217 }
218 
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)219 bool GrDawnGpu::onTransferPixelsTo(GrTexture* texture,
220                                    SkIRect rect,
221                                    GrColorType textureColorType,
222                                    GrColorType bufferColorType,
223                                    sk_sp<GrGpuBuffer> transferBuffer,
224                                    size_t bufferOffset,
225                                    size_t rowBytes) {
226     // skbug.com/13453
227     SkASSERT(!"unimplemented");
228     return false;
229 }
230 
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)231 bool GrDawnGpu::onTransferPixelsFrom(GrSurface* surface,
232                                      SkIRect rect,
233                                      GrColorType surfaceColorType,
234                                      GrColorType bufferColorType,
235                                      sk_sp<GrGpuBuffer> transferBuffer,
236                                      size_t offset) {
237     // skbug.com/13453
238     SkASSERT(!"unimplemented");
239     return false;
240 }
241 
242 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)243 sk_sp<GrTexture> GrDawnGpu::onCreateTexture(SkISize dimensions,
244                                             const GrBackendFormat& backendFormat,
245                                             GrRenderable renderable,
246                                             int renderTargetSampleCnt,
247                                             skgpu::Budgeted budgeted,
248                                             GrProtected,
249                                             int mipLevelCount,
250                                             uint32_t levelClearMask,
251                                             std::string_view label) {
252     if (levelClearMask) {
253         return nullptr;
254     }
255 
256     wgpu::TextureFormat format;
257     if (!backendFormat.asDawnFormat(&format)) {
258         return nullptr;
259     }
260 
261     GrMipmapStatus mipmapStatus =
262         mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
263 
264     return GrDawnTexture::Make(this, dimensions, format, renderable, renderTargetSampleCnt,
265                                budgeted, mipLevelCount, mipmapStatus, label);
266 }
267 
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat &,skgpu::Budgeted,GrMipmapped,GrProtected,const void * data,size_t dataSize)268 sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(SkISize dimensions,
269                                                       const GrBackendFormat&,
270                                                       skgpu::Budgeted,
271                                                       GrMipmapped,
272                                                       GrProtected,
273                                                       const void* data,
274                                                       size_t dataSize) {
275     SkASSERT(!"unimplemented");
276     return nullptr;
277 }
278 
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)279 sk_sp<GrTexture> GrDawnGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
280                                                  GrWrapOwnership ownership,
281                                                  GrWrapCacheable cacheable,
282                                                  GrIOType ioType) {
283     GrDawnTextureInfo info;
284     if (!backendTex.getDawnTextureInfo(&info)) {
285         return nullptr;
286     }
287 
288     SkISize dimensions = { backendTex.width(), backendTex.height() };
289     return GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kNo, 1, cacheable, ioType,
290                                       info, backendTex.getLabel());
291 }
292 
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)293 sk_sp<GrTexture> GrDawnGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
294                                                            GrWrapOwnership ownership,
295                                                            GrWrapCacheable cacheable) {
296     return nullptr;
297 }
298 
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrWrapOwnership,GrWrapCacheable cacheable)299 sk_sp<GrTexture> GrDawnGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
300                                                            int sampleCnt,
301                                                            GrWrapOwnership,
302                                                            GrWrapCacheable cacheable) {
303     GrDawnTextureInfo info;
304     if (!tex.getDawnTextureInfo(&info) || !info.fTexture) {
305         return nullptr;
306     }
307 
308     SkISize dimensions = { tex.width(), tex.height() };
309     sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
310     if (sampleCnt < 1) {
311         return nullptr;
312     }
313 
314     sk_sp<GrTexture> result = GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kYes,
315                                                          sampleCnt, cacheable, kRW_GrIOType, info,
316                                                          tex.getLabel());
317     result->markMipmapsDirty();
318     return result;
319 }
320 
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt)321 sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt) {
322     GrDawnRenderTargetInfo info;
323     if (!rt.getDawnRenderTargetInfo(&info) || !info.fTextureView) {
324         return nullptr;
325     }
326 
327     SkISize dimensions = { rt.width(), rt.height() };
328     int sampleCnt = 1;
329     return GrDawnRenderTarget::MakeWrapped(
330             this, dimensions, sampleCnt, info, /*label=*/"DawnGpu_WrapBackendRenderTarget");
331 }
332 
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)333 sk_sp<GrAttachment> GrDawnGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
334                                                      SkISize dimensions, int numStencilSamples) {
335     fStats.incStencilAttachmentCreates();
336     return GrDawnAttachment::MakeStencil(this, dimensions, numStencilSamples);
337 }
338 
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,GrMipmapped mipmapped,GrProtected isProtected,std::string_view label)339 GrBackendTexture GrDawnGpu::onCreateBackendTexture(SkISize dimensions,
340                                                    const GrBackendFormat& backendFormat,
341                                                    GrRenderable renderable,
342                                                    GrMipmapped mipmapped,
343                                                    GrProtected isProtected,
344                                                    std::string_view label) {
345     wgpu::TextureFormat format;
346     if (!backendFormat.asDawnFormat(&format)) {
347         return GrBackendTexture();
348     }
349 
350     wgpu::TextureDescriptor desc;
351     desc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
352                  wgpu::TextureUsage::CopyDst;
353 
354     if (GrRenderable::kYes == renderable) {
355         desc.usage |= wgpu::TextureUsage::RenderAttachment;
356     }
357 
358     int numMipLevels = 1;
359     if (mipmapped == GrMipmapped::kYes) {
360         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
361     }
362 
363     desc.size.width = dimensions.width();
364     desc.size.height = dimensions.height();
365     desc.size.depthOrArrayLayers = 1;
366     desc.format = format;
367     desc.mipLevelCount = numMipLevels;
368 
369     wgpu::Texture tex = this->device().CreateTexture(&desc);
370 
371     GrDawnTextureInfo info;
372     info.fTexture = tex;
373     info.fFormat = desc.format;
374     info.fLevelCount = desc.mipLevelCount;
375     return GrBackendTexture(dimensions.width(), dimensions.height(), info);
376 }
377 
uploadTextureData(GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,const SkIRect & rect,wgpu::Texture texture)378 void GrDawnGpu::uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[],
379                                   int mipLevelCount, const SkIRect& rect,
380                                   wgpu::Texture texture) {
381     uint32_t x = rect.x();
382     uint32_t y = rect.y();
383     uint32_t width = rect.width();
384     uint32_t height = rect.height();
385 
386     for (int i = 0; i < mipLevelCount; i++) {
387         const void* src = texels[i].fPixels;
388         size_t srcRowBytes = texels[i].fRowBytes;
389         SkColorType colorType = GrColorTypeToSkColorType(srcColorType);
390         size_t trimRowBytes = width * SkColorTypeBytesPerPixel(colorType);
391         size_t dstRowBytes = GrDawnRoundRowBytes(trimRowBytes);
392         size_t size = dstRowBytes * height;
393         GrStagingBufferManager::Slice slice =
394                 this->stagingBufferManager()->allocateStagingBufferSlice(size);
395         SkRectMemcpy(slice.fOffsetMapPtr, dstRowBytes, src, srcRowBytes, trimRowBytes, height);
396 
397         wgpu::ImageCopyBuffer srcBuffer = {};
398         srcBuffer.buffer = static_cast<GrDawnBuffer*>(slice.fBuffer)->get();
399         srcBuffer.layout.offset = slice.fOffset;
400         srcBuffer.layout.bytesPerRow = dstRowBytes;
401         srcBuffer.layout.rowsPerImage = height;
402 
403         wgpu::ImageCopyTexture dstTexture;
404         dstTexture.texture = texture;
405         dstTexture.mipLevel = i;
406         dstTexture.origin = {x, y, 0};
407 
408         wgpu::Extent3D copySize = {width, height, 1};
409         this->getCopyEncoder().CopyBufferToTexture(&srcBuffer, &dstTexture, &copySize);
410         x /= 2;
411         y /= 2;
412         width = std::max(1u, width / 2);
413         height = std::max(1u, height / 2);
414     }
415 }
416 
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)417 bool GrDawnGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
418                                       sk_sp<skgpu::RefCntedCallback> finishedCallback,
419                                       std::array<float, 4> color) {
420     GrDawnTextureInfo info;
421     SkAssertResult(backendTexture.getDawnTextureInfo(&info));
422 
423     GrColorType colorType;
424     if (!GrDawnFormatToGrColorType(info.fFormat, &colorType)) {
425         return false;
426     }
427 
428     size_t bpp = GrDawnBytesPerBlock(info.fFormat);
429     size_t baseLayerSize = bpp * backendTexture.width() * backendTexture.height();
430     SkAutoMalloc defaultStorage(baseLayerSize);
431     GrImageInfo imageInfo(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
432     GrClearImage(imageInfo, defaultStorage.get(), bpp * backendTexture.width(), color);
433 
434     wgpu::Device device = this->device();
435     wgpu::CommandEncoder copyEncoder = this->getCopyEncoder();
436     int w = backendTexture.width(), h = backendTexture.height();
437     for (uint32_t i = 0; i < info.fLevelCount; i++) {
438         size_t origRowBytes = bpp * w;
439         size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
440         size_t size = rowBytes * h;
441         GrStagingBufferManager::Slice stagingBuffer =
442                 this->stagingBufferManager()->allocateStagingBufferSlice(size);
443         if (rowBytes == origRowBytes) {
444             memcpy(stagingBuffer.fOffsetMapPtr, defaultStorage.get(), size);
445         } else {
446             const char* src = static_cast<const char*>(defaultStorage.get());
447             char* dst = static_cast<char*>(stagingBuffer.fOffsetMapPtr);
448             for (int row = 0; row < h; row++) {
449                 memcpy(dst, src, origRowBytes);
450                 dst += rowBytes;
451                 src += origRowBytes;
452             }
453         }
454         wgpu::ImageCopyBuffer srcBuffer = {};
455         srcBuffer.buffer = static_cast<GrDawnBuffer*>(stagingBuffer.fBuffer)->get();
456         srcBuffer.layout.offset = stagingBuffer.fOffset;
457         srcBuffer.layout.bytesPerRow = rowBytes;
458         srcBuffer.layout.rowsPerImage = h;
459         wgpu::ImageCopyTexture dstTexture;
460         dstTexture.texture = info.fTexture;
461         dstTexture.mipLevel = i;
462         dstTexture.origin = {0, 0, 0};
463         wgpu::Extent3D copySize = {(uint32_t)w, (uint32_t)h, 1};
464         copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, &copySize);
465         w = std::max(1, w / 2);
466         h = std::max(1, h / 2);
467     }
468     return true;
469 }
470 
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat &,GrMipmapped,GrProtected)471 GrBackendTexture GrDawnGpu::onCreateCompressedBackendTexture(
472         SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) {
473     return {};
474 }
475 
onUpdateCompressedBackendTexture(const GrBackendTexture &,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)476 bool GrDawnGpu::onUpdateCompressedBackendTexture(const GrBackendTexture&,
477                                                  sk_sp<skgpu::RefCntedCallback> finishedCallback,
478                                                  const void* data,
479                                                  size_t size) {
480     return false;
481 }
482 
deleteBackendTexture(const GrBackendTexture & tex)483 void GrDawnGpu::deleteBackendTexture(const GrBackendTexture& tex) {
484     GrDawnTextureInfo info;
485     if (tex.getDawnTextureInfo(&info)) {
486         info.fTexture = nullptr;
487     }
488 }
489 
compile(const GrProgramDesc &,const GrProgramInfo &)490 bool GrDawnGpu::compile(const GrProgramDesc&, const GrProgramInfo&) {
491     return false;
492 }
493 
494 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const495 bool GrDawnGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
496     GrDawnTextureInfo info;
497     if (!tex.getDawnTextureInfo(&info)) {
498         return false;
499     }
500 
501     return info.fTexture.Get();
502 }
503 
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)504 GrBackendRenderTarget GrDawnGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
505                                                                       GrColorType colorType,
506                                                                       int sampleCnt,
507                                                                       GrProtected isProtected) {
508     if (dimensions.width()  > this->caps()->maxTextureSize() ||
509         dimensions.height() > this->caps()->maxTextureSize()) {
510         return {};
511     }
512 
513     // We don't support MSAA in this backend yet.
514     if (sampleCnt != 1) {
515         return {};
516     }
517 
518     if (isProtected == GrProtected::kYes) {
519         return {};
520     }
521 
522     wgpu::TextureFormat format;
523     if (!GrColorTypeToDawnFormat(colorType, &format)) {
524         return {};
525     }
526 
527     wgpu::TextureDescriptor desc;
528     desc.usage =
529         wgpu::TextureUsage::CopySrc |
530         wgpu::TextureUsage::RenderAttachment;
531 
532     desc.size.width = dimensions.width();
533     desc.size.height = dimensions.height();
534     desc.size.depthOrArrayLayers = 1;
535     desc.format = format;
536 
537     wgpu::Texture tex = this->device().CreateTexture(&desc);
538 
539     GrDawnRenderTargetInfo info;
540     info.fTextureView = tex.CreateView();
541     info.fFormat = desc.format;
542     info.fLevelCount = desc.mipLevelCount;
543 
544     return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 1, 0, info);
545 }
546 
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)547 void GrDawnGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
548     GrDawnRenderTargetInfo info;
549     if (rt.getDawnRenderTargetInfo(&info)) {
550         info.fTextureView = nullptr;
551     }
552 }
553 
554 #endif
555 
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)556 void GrDawnGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
557                                 GrGpuFinishedContext finishedContext) {
558     fFinishCallbacks.add(finishedProc, finishedContext);
559 }
560 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)561 void GrDawnGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
562     fSubmittedStagingBuffers.push_back(std::move(buffer));
563 }
564 
onSubmitToGpu(bool syncCpu)565 bool GrDawnGpu::onSubmitToGpu(bool syncCpu) {
566     this->flushCopyEncoder();
567 
568     if (!fCommandBuffers.empty()) {
569         fQueue.Submit(fCommandBuffers.size(), &fCommandBuffers.front());
570         fCommandBuffers.clear();
571     }
572 
573     // Schedule the queue done callback if it hasn't been scheduled already and if we just submitted
574     // a new batch of recorded commands. If a callback was already registered in a prior call to
575     // onSubmitToGpu then it will include the commands we just submitted.
576     if (!fSubmittedWorkDoneCallbackPending) {
577         auto callback = [](WGPUQueueWorkDoneStatus status, void* userData) {
578             static_cast<GrDawnGpu*>(userData)->onSubmittedWorkDone(status);
579         };
580         fDevice.GetQueue().OnSubmittedWorkDone(0u, callback, this);
581         fSubmittedWorkDoneCallbackPending = true;
582     }
583 
584     this->mapPendingStagingBuffers();
585     if (syncCpu) {
586         // If no callback was scheduled then there is no pending work and we don't need to spin on a
587         // fence.
588         if (fSubmittedWorkDoneCallbackPending) {
589             GrDawnAsyncWait* fence = this->createFence();
590             fence->busyWait();
591             this->destroyFence(fence);
592         }
593         fFinishCallbacks.callAll(true);
594     }
595 
596     return true;
597 }
598 
onSubmittedWorkDone(WGPUQueueWorkDoneStatus status)599 void GrDawnGpu::onSubmittedWorkDone(WGPUQueueWorkDoneStatus status) {
600     fSubmittedWorkDoneCallbackPending = false;
601     fQueueFences.foreach([](GrDawnAsyncWait* fence) {
602         fence->signal();
603     });
604 }
605 
mapPendingStagingBuffers()606 void GrDawnGpu::mapPendingStagingBuffers() {
607     // Request to asynchronously map the submitted staging buffers. Dawn will ensure that these
608     // buffers are not mapped until the pending submitted queue work is done at which point they
609     // are free for re-use.
610     for (unsigned i = 0; i < fSubmittedStagingBuffers.size(); i++) {
611         fPendingMapAsyncRequests.addOne();
612         sk_sp<GrGpuBuffer> buffer = std::move(fSubmittedStagingBuffers[i]);
613         static_cast<GrDawnBuffer*>(buffer.get())
614                 ->mapAsync(
615                         // We capture `buffer` into the callback which ensures that it stays alive
616                         // until mapAsync completes.
617                         [this, buffer = std::move(buffer)](bool success) {
618                             fPendingMapAsyncRequests.completeOne();
619                             if (!success) {
620                                 SkDebugf(
621                                         "Failed to map staging buffer before making it available "
622                                         "again");
623                             }
624                             // When this callback returns, the captured `buffer` will be dropped and
625                             // returned back to its backing resource pool.
626                         });
627     }
628     fSubmittedStagingBuffers.clear();
629 }
630 
createFence()631 GrDawnAsyncWait* GrDawnGpu::createFence() {
632     auto* fence = new GrDawnAsyncWait(fDevice);
633     fQueueFences.add(fence);
634     return fence;
635 }
636 
destroyFence(GrDawnAsyncWait * fence)637 void GrDawnGpu::destroyFence(GrDawnAsyncWait* fence) {
638     fQueueFences.remove(fence);
639     delete fence;
640 }
641 
get_dawn_texture_from_surface(GrSurface * src)642 static wgpu::Texture get_dawn_texture_from_surface(GrSurface* src) {
643     if (auto t = static_cast<GrDawnTexture*>(src->asTexture())) {
644         return t->texture();
645     } else {
646         return nullptr;
647     }
648 }
649 
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter)650 bool GrDawnGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
651                               GrSurface* src, const SkIRect& srcRect,
652                               GrSamplerState::Filter) {
653     wgpu::Texture srcTexture = get_dawn_texture_from_surface(src);
654     wgpu::Texture dstTexture = get_dawn_texture_from_surface(dst);
655     if (!srcTexture || !dstTexture) {
656         return false;
657     }
658     if (srcRect.size() != dstRect.size()) {
659         return false;
660     }
661 
662     uint32_t width = srcRect.width(), height = srcRect.height();
663 
664     wgpu::ImageCopyTexture srcTextureView, dstTextureView;
665     srcTextureView.texture = srcTexture;
666     srcTextureView.origin = {(uint32_t) srcRect.x(), (uint32_t) srcRect.y(), 0};
667     dstTextureView.texture = dstTexture;
668     dstTextureView.origin = {(uint32_t) dstRect.x(), (uint32_t) dstRect.y(), 0};
669 
670     wgpu::Extent3D copySize = {width, height, 1};
671     this->getCopyEncoder().CopyTextureToTexture(&srcTextureView, &dstTextureView, &copySize);
672     return true;
673 }
674 
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)675 bool GrDawnGpu::onReadPixels(GrSurface* surface,
676                              SkIRect rect,
677                              GrColorType surfaceColorType,
678                              GrColorType dstColorType,
679                              void* buffer,
680                              size_t rowBytes) {
681     wgpu::Texture tex = get_dawn_texture_from_surface(surface);
682 
683     if (!tex || 0 == rowBytes) {
684         return false;
685     }
686     size_t origRowBytes = rowBytes;
687     int origSizeInBytes = origRowBytes*rect.height();
688     rowBytes = GrDawnRoundRowBytes(rowBytes);
689     int sizeInBytes = rowBytes*rect.height();
690 
691     sk_sp<GrDawnBuffer> dawnBuffer = GrDawnBuffer::Make(this,
692                                                         sizeInBytes,
693                                                         GrGpuBufferType::kXferGpuToCpu,
694                                                         kStatic_GrAccessPattern,
695                                                         "onReadPixels");
696     if (!dawnBuffer) {
697         SkDebugf("onReadPixels: failed to create GPU buffer");
698         return false;
699     }
700 
701     wgpu::ImageCopyTexture srcTexture;
702     srcTexture.texture = tex;
703     srcTexture.origin = {(uint32_t) rect.left(), (uint32_t) rect.top(), 0};
704 
705     wgpu::ImageCopyBuffer dstBuffer = {};
706     dstBuffer.buffer = dawnBuffer->get();
707     dstBuffer.layout.offset = 0;
708     dstBuffer.layout.bytesPerRow = rowBytes;
709     dstBuffer.layout.rowsPerImage = rect.height();
710 
711     wgpu::Extent3D copySize = {(uint32_t) rect.width(), (uint32_t) rect.height(), 1};
712     this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, &copySize);
713     this->submitToGpu(true);
714 
715     const void* readPixelsPtr = dawnBuffer->map();
716     if (!readPixelsPtr) {
717         SkDebugf("onReadPixels: failed to map GPU buffer");
718         return false;
719     }
720 
721     if (rowBytes == origRowBytes) {
722         memcpy(buffer, readPixelsPtr, origSizeInBytes);
723     } else {
724         const char* src = static_cast<const char*>(readPixelsPtr);
725         char* dst = static_cast<char*>(buffer);
726         for (int row = 0; row < rect.height(); row++) {
727             memcpy(dst, src, origRowBytes);
728             dst += origRowBytes;
729             src += rowBytes;
730         }
731     }
732 
733     dawnBuffer->unmap();
734     return true;
735 }
736 
onRegenerateMipMapLevels(GrTexture * tex)737 bool GrDawnGpu::onRegenerateMipMapLevels(GrTexture* tex) {
738     this->flushCopyEncoder();
739     GrDawnTexture* src = static_cast<GrDawnTexture*>(tex);
740     int srcWidth = tex->width();
741     int srcHeight = tex->height();
742 
743     // SkMipmap doesn't include the base level in the level count so we have to add 1
744     uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
745 
746     // Create a temporary texture for mipmap generation, then copy to source.
747     // We have to do this even for renderable textures, since GrDawnRenderTarget currently only
748     // contains a view, not a texture.
749     wgpu::TextureDescriptor texDesc;
750     texDesc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
751                     wgpu::TextureUsage::RenderAttachment;
752     texDesc.size.width = (tex->width() + 1) / 2;
753     texDesc.size.height = (tex->height() + 1) / 2;
754     texDesc.size.depthOrArrayLayers = 1;
755     texDesc.mipLevelCount = levelCount - 1;
756     texDesc.format = src->format();
757     wgpu::Texture dstTexture = fDevice.CreateTexture(&texDesc);
758 
759     const char* vs =
760         "layout(spirv, location = 0) out float2 texCoord;"
761         "float2 positions[4] = float2[4](float2(-1.0, 1.0),"
762                                         "float2(1.0, 1.0),"
763                                         "float2(-1.0, -1.0),"
764                                         "float2(1.0, -1.0));"
765         "float2 texCoords[4] = float2[4](float2(0.0, 0.0),"
766                                         "float2(1.0, 0.0),"
767                                         "float2(0.0, 1.0),"
768                                         "float2(1.0, 1.0));"
769         "void main() {"
770             "sk_Position = float4(positions[sk_VertexID], 0.0, 1.0);"
771             "texCoord = texCoords[sk_VertexID];"
772         "}";
773     std::string vsSPIRV = this->SkSLToSPIRV(vs,
774                                             SkSL::ProgramKind::kVertex,
775                                             /*rtFlipOffset*/ 0,
776                                             nullptr);
777 
778     const char* fs =
779         "layout(spirv, set = 0, binding = 0) uniform sampler samp;"
780         "layout(spirv, set = 0, binding = 1) uniform texture2D tex;"
781         "layout(location = 0) in float2 texCoord;"
782         "void main() {"
783             "sk_FragColor = sample(makeSampler2D(tex, samp), texCoord);"
784         "}";
785     std::string fsSPIRV = this->SkSLToSPIRV(fs,
786                                             SkSL::ProgramKind::kFragment,
787                                             /*rtFlipOffset=*/ 0,
788                                             nullptr);
789 
790     wgpu::VertexState vertexState;
791     vertexState.module = this->createShaderModule(vsSPIRV);
792     vertexState.entryPoint = "main";
793     vertexState.bufferCount = 0;
794 
795     wgpu::ColorTargetState colorTargetState;
796     colorTargetState.format = static_cast<GrDawnTexture*>(tex)->format();
797 
798     wgpu::FragmentState fragmentState;
799     fragmentState.module = this->createShaderModule(fsSPIRV);
800     fragmentState.entryPoint = "main";
801     fragmentState.targetCount = 1;
802     fragmentState.targets = &colorTargetState;
803 
804     wgpu::RenderPipelineDescriptor renderPipelineDesc;
805     renderPipelineDesc.vertex = vertexState;
806     renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
807     renderPipelineDesc.primitive.stripIndexFormat = wgpu::IndexFormat::Uint16;
808     renderPipelineDesc.fragment = &fragmentState;
809     wgpu::RenderPipeline pipeline = fDevice.CreateRenderPipeline(&renderPipelineDesc);
810 
811     wgpu::BindGroupLayout bgl = pipeline.GetBindGroupLayout(0);
812     wgpu::TextureViewDescriptor srcViewDesc;
813     srcViewDesc.mipLevelCount = 1;
814     wgpu::TextureView srcView = src->texture().CreateView(&srcViewDesc);
815     wgpu::SamplerDescriptor samplerDesc;
816     samplerDesc.minFilter = wgpu::FilterMode::Linear;
817     wgpu::Sampler sampler = fDevice.CreateSampler(&samplerDesc);
818     wgpu::CommandEncoder commandEncoder = fDevice.CreateCommandEncoder();
819     for (uint32_t mipLevel = 0; mipLevel < texDesc.mipLevelCount; mipLevel++) {
820         int dstWidth = std::max(1, srcWidth / 2);
821         int dstHeight = std::max(1, srcHeight / 2);
822         wgpu::TextureViewDescriptor dstViewDesc;
823         dstViewDesc.format = static_cast<GrDawnTexture*>(tex)->format();
824         dstViewDesc.dimension = wgpu::TextureViewDimension::e2D;
825         dstViewDesc.baseMipLevel = mipLevel;
826         dstViewDesc.mipLevelCount = 1;
827         wgpu::TextureView dstView = dstTexture.CreateView(&dstViewDesc);
828         wgpu::BindGroupEntry bge[2];
829         bge[0].binding = 0;
830         bge[0].sampler = sampler;
831         bge[1].binding = 1;
832         bge[1].textureView = srcView;
833         wgpu::BindGroupDescriptor bgDesc;
834         bgDesc.layout = bgl;
835         bgDesc.entryCount = 2;
836         bgDesc.entries = bge;
837         wgpu::BindGroup bindGroup = fDevice.CreateBindGroup(&bgDesc);
838         wgpu::RenderPassColorAttachment colorAttachment;
839         colorAttachment.view = dstView;
840         colorAttachment.clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
841         colorAttachment.loadOp = wgpu::LoadOp::Load;
842         colorAttachment.storeOp = wgpu::StoreOp::Store;
843         wgpu::RenderPassColorAttachment* colorAttachments = { &colorAttachment };
844         wgpu::RenderPassDescriptor renderPassDesc;
845         renderPassDesc.colorAttachmentCount = 1;
846         renderPassDesc.colorAttachments = colorAttachments;
847         wgpu::RenderPassEncoder rpe = commandEncoder.BeginRenderPass(&renderPassDesc);
848         rpe.SetPipeline(pipeline);
849         rpe.SetBindGroup(0, bindGroup);
850         rpe.Draw(4, 1, 0, 0);
851         rpe.End();
852 
853         wgpu::Extent3D copySize = {(uint32_t)dstWidth, (uint32_t)dstHeight, 1};
854         wgpu::ImageCopyTexture srcCopyView;
855         srcCopyView.texture = dstTexture;
856         srcCopyView.mipLevel = mipLevel;
857         wgpu::ImageCopyTexture dstCopyView;
858         dstCopyView.mipLevel = mipLevel + 1;
859         dstCopyView.texture = src->texture();
860         commandEncoder.CopyTextureToTexture(&srcCopyView, &dstCopyView, &copySize);
861 
862         srcHeight = dstHeight;
863         srcWidth = dstWidth;
864         srcView = dstView;
865     }
866     fCommandBuffers.push_back(commandEncoder.Finish());
867     return true;
868 }
869 
submit(GrOpsRenderPass * renderPass)870 void GrDawnGpu::submit(GrOpsRenderPass* renderPass) {
871     this->flushCopyEncoder();
872     static_cast<GrDawnOpsRenderPass*>(renderPass)->submit();
873 }
874 
insertFence()875 GrFence SK_WARN_UNUSED_RESULT GrDawnGpu::insertFence() {
876     return reinterpret_cast<GrFence>(this->createFence());
877 }
878 
waitFence(GrFence fence)879 bool GrDawnGpu::waitFence(GrFence fence) {
880     return reinterpret_cast<const GrDawnAsyncWait*>(fence)->yieldAndCheck();
881 }
882 
deleteFence(GrFence fence)883 void GrDawnGpu::deleteFence(GrFence fence) {
884     this->destroyFence(reinterpret_cast<GrDawnAsyncWait*>(fence));
885 }
886 
makeSemaphore(bool isOwned)887 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrDawnGpu::makeSemaphore(bool isOwned) {
888     SkASSERT(!"unimplemented");
889     return nullptr;
890 }
891 
wrapBackendSemaphore(const GrBackendSemaphore &,GrSemaphoreWrapType,GrWrapOwnership)892 std::unique_ptr<GrSemaphore> GrDawnGpu::wrapBackendSemaphore(const GrBackendSemaphore& /* sema */,
893                                                              GrSemaphoreWrapType /* wrapType */,
894                                                              GrWrapOwnership /* ownership */) {
895     SkASSERT(!"unimplemented");
896     return nullptr;
897 }
898 
insertSemaphore(GrSemaphore * semaphore)899 void GrDawnGpu::insertSemaphore(GrSemaphore* semaphore) {
900     SkASSERT(!"unimplemented");
901 }
902 
waitSemaphore(GrSemaphore * semaphore)903 void GrDawnGpu::waitSemaphore(GrSemaphore* semaphore) {
904     SkASSERT(!"unimplemented");
905 }
906 
checkFinishProcs()907 void GrDawnGpu::checkFinishProcs() {
908     fFinishCallbacks.check();
909 }
910 
finishOutstandingGpuWork()911 void GrDawnGpu::finishOutstandingGpuWork() {
912     // If a callback is pending then any fence added here is guaranteed to get signaled when the
913     // callback eventually runs.
914     if (fSubmittedWorkDoneCallbackPending) {
915         GrDawnAsyncWait* fence = this->createFence();
916         fence->busyWait();
917         this->destroyFence(fence);
918     }
919 
920     // Make sure all pending mapAsync requests on staging buffers are complete before shutting down.
921     fPendingMapAsyncRequests.waitUntilDone();
922 }
923 
prepareTextureForCrossContextUsage(GrTexture * texture)924 std::unique_ptr<GrSemaphore> GrDawnGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
925     SkASSERT(!"unimplemented");
926     return nullptr;
927 }
928 
getOrCreateRenderPipeline(GrRenderTarget * rt,const GrProgramInfo & programInfo)929 sk_sp<GrDawnProgram> GrDawnGpu::getOrCreateRenderPipeline(
930         GrRenderTarget* rt,
931         const GrProgramInfo& programInfo) {
932     GrProgramDesc desc = this->caps()->makeDesc(rt, programInfo);
933     if (!desc.isValid()) {
934         return nullptr;
935     }
936 
937     if (sk_sp<GrDawnProgram>* program = fRenderPipelineCache.find(desc)) {
938         return *program;
939     }
940 
941     wgpu::TextureFormat colorFormat;
942     SkAssertResult(programInfo.backendFormat().asDawnFormat(&colorFormat));
943 
944     wgpu::TextureFormat stencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
945     bool hasDepthStencil = rt->getStencilAttachment() != nullptr;
946 
947     sk_sp<GrDawnProgram> program = GrDawnProgramBuilder::Build(
948         this, rt, programInfo, colorFormat,
949         hasDepthStencil, stencilFormat, &desc);
950     fRenderPipelineCache.insert(desc, program);
951     return program;
952 }
953 
getOrCreateSampler(GrSamplerState samplerState)954 wgpu::Sampler GrDawnGpu::getOrCreateSampler(GrSamplerState samplerState) {
955     auto i = fSamplers.find(samplerState);
956     if (i != fSamplers.end()) {
957         return i->second;
958     }
959     wgpu::SamplerDescriptor desc;
960     desc.addressModeU = to_dawn_address_mode(samplerState.wrapModeX());
961     desc.addressModeV = to_dawn_address_mode(samplerState.wrapModeY());
962     desc.addressModeW = wgpu::AddressMode::ClampToEdge;
963     desc.maxAnisotropy = samplerState.maxAniso();
964     if (samplerState.isAniso()) {
965         // WebGPU requires these to be linear when maxAnisotropy is > 1.
966         desc.magFilter = desc.minFilter = desc.mipmapFilter = wgpu::FilterMode::Linear;
967     } else {
968         desc.magFilter = desc.minFilter = to_dawn_filter_mode(samplerState.filter());
969         desc.mipmapFilter = to_dawn_mipmap_mode(samplerState.mipmapMode());
970     }
971     wgpu::Sampler sampler = device().CreateSampler(&desc);
972     fSamplers.insert(std::pair<GrSamplerState, wgpu::Sampler>(samplerState, sampler));
973     return sampler;
974 }
975 
allocateUniformRingBufferSlice(int size)976 GrDawnRingBuffer::Slice GrDawnGpu::allocateUniformRingBufferSlice(int size) {
977     return fUniformRingBuffer.allocate(size);
978 }
979 
appendCommandBuffer(wgpu::CommandBuffer commandBuffer)980 void GrDawnGpu::appendCommandBuffer(wgpu::CommandBuffer commandBuffer) {
981     if (commandBuffer) {
982         fCommandBuffers.push_back(commandBuffer);
983     }
984 }
985 
getCopyEncoder()986 wgpu::CommandEncoder GrDawnGpu::getCopyEncoder() {
987     if (!fCopyEncoder) {
988         fCopyEncoder = fDevice.CreateCommandEncoder();
989     }
990     return fCopyEncoder;
991 }
992 
flushCopyEncoder()993 void GrDawnGpu::flushCopyEncoder() {
994     if (fCopyEncoder) {
995         fCommandBuffers.push_back(fCopyEncoder.Finish());
996         fCopyEncoder = nullptr;
997     }
998 }
999 
SkSLToSPIRV(const char * shaderString,SkSL::ProgramKind kind,uint32_t rtFlipOffset,SkSL::Program::Inputs * inputs)1000 std::string GrDawnGpu::SkSLToSPIRV(const char* shaderString,
1001                                    SkSL::ProgramKind kind,
1002                                    uint32_t rtFlipOffset,
1003                                    SkSL::Program::Inputs* inputs) {
1004     auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
1005     SkSL::ProgramSettings settings;
1006     settings.fRTFlipOffset = rtFlipOffset;
1007     settings.fRTFlipBinding = 0;
1008     settings.fRTFlipSet = 0;
1009     std::unique_ptr<SkSL::Program> program = this->shaderCompiler()->convertProgram(
1010         kind,
1011         shaderString,
1012         settings);
1013     if (!program) {
1014         errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
1015         return "";
1016     }
1017     if (inputs) {
1018         *inputs = program->fInputs;
1019     }
1020     std::string code;
1021     if (!this->shaderCompiler()->toSPIRV(*program, &code)) {
1022         errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
1023         return "";
1024     }
1025     return code;
1026 }
1027 
createShaderModule(const std::string & spirvSource)1028 wgpu::ShaderModule GrDawnGpu::createShaderModule(const std::string& spirvSource) {
1029     wgpu::ShaderModuleSPIRVDescriptor desc;
1030     desc.codeSize = spirvSource.size() / 4;
1031     desc.code = reinterpret_cast<const uint32_t*>(spirvSource.c_str());
1032 
1033     wgpu::ShaderModuleDescriptor smDesc;
1034     smDesc.nextInChain = &desc;
1035 
1036     return fDevice.CreateShaderModule(&smDesc);
1037 }
1038