1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/dawn/GrDawnGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkConvertPixels.h"
15 #include "src/gpu/GrDataUtils.h"
16 #include "src/gpu/GrDirectContextPriv.h"
17 #include "src/gpu/GrGeometryProcessor.h"
18 #include "src/gpu/GrGpuResourceCacheAccess.h"
19 #include "src/gpu/GrPipeline.h"
20 #include "src/gpu/GrRenderTarget.h"
21 #include "src/gpu/GrSemaphore.h"
22 #include "src/gpu/GrStencilSettings.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/GrThreadSafePipelineBuilder.h"
25 #include "src/gpu/dawn/GrDawnAttachment.h"
26 #include "src/gpu/dawn/GrDawnBuffer.h"
27 #include "src/gpu/dawn/GrDawnCaps.h"
28 #include "src/gpu/dawn/GrDawnOpsRenderPass.h"
29 #include "src/gpu/dawn/GrDawnProgramBuilder.h"
30 #include "src/gpu/dawn/GrDawnRenderTarget.h"
31 #include "src/gpu/dawn/GrDawnTexture.h"
32 #include "src/gpu/dawn/GrDawnUtil.h"
33
34 #include "src/core/SkAutoMalloc.h"
35 #include "src/core/SkMipmap.h"
36 #include "src/sksl/SkSLCompiler.h"
37
38 #if !defined(SK_BUILD_FOR_WIN)
39 #include <unistd.h>
40 #endif // !defined(SK_BUILD_FOR_WIN)
41
42 static const int kMaxRenderPipelineEntries = 1024;
43
44 namespace {
45
46 class Fence {
47 public:
Fence(const wgpu::Device & device)48 Fence(const wgpu::Device& device)
49 : fDevice(device), fCalled(false) {
50 device.GetQueue().OnSubmittedWorkDone(0, callback, this);
51 }
52
callback(WGPUQueueWorkDoneStatus status,void * userData)53 static void callback(WGPUQueueWorkDoneStatus status, void* userData) {
54 Fence* fence = static_cast<Fence*>(userData);
55 fence->fCalled = true;
56 }
57
check()58 bool check() {
59 fDevice.Tick();
60 return fCalled;
61 }
62
63 private:
64 wgpu::Device fDevice;
65 bool fCalled;
66 };
67
68 }
69
to_dawn_filter_mode(GrSamplerState::Filter filter)70 static wgpu::FilterMode to_dawn_filter_mode(GrSamplerState::Filter filter) {
71 switch (filter) {
72 case GrSamplerState::Filter::kNearest:
73 return wgpu::FilterMode::Nearest;
74 case GrSamplerState::Filter::kLinear:
75 return wgpu::FilterMode::Linear;
76 default:
77 SkASSERT(!"unsupported filter mode");
78 return wgpu::FilterMode::Nearest;
79 }
80 }
81
to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode)82 static wgpu::FilterMode to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode) {
83 switch (mode) {
84 case GrSamplerState::MipmapMode::kNone:
85 // Fall-through (Dawn does not have an equivalent for "None")
86 case GrSamplerState::MipmapMode::kNearest:
87 return wgpu::FilterMode::Nearest;
88 case GrSamplerState::MipmapMode::kLinear:
89 return wgpu::FilterMode::Linear;
90 default:
91 SkASSERT(!"unsupported filter mode");
92 return wgpu::FilterMode::Nearest;
93 }
94 }
95
to_dawn_address_mode(GrSamplerState::WrapMode wrapMode)96 static wgpu::AddressMode to_dawn_address_mode(GrSamplerState::WrapMode wrapMode) {
97 switch (wrapMode) {
98 case GrSamplerState::WrapMode::kClamp:
99 return wgpu::AddressMode::ClampToEdge;
100 case GrSamplerState::WrapMode::kRepeat:
101 return wgpu::AddressMode::Repeat;
102 case GrSamplerState::WrapMode::kMirrorRepeat:
103 return wgpu::AddressMode::MirrorRepeat;
104 case GrSamplerState::WrapMode::kClampToBorder:
105 SkASSERT(!"unsupported address mode");
106 }
107 SkASSERT(!"unsupported address mode");
108 return wgpu::AddressMode::ClampToEdge;
109 }
110
Make(const wgpu::Device & device,const GrContextOptions & options,GrDirectContext * direct)111 sk_sp<GrGpu> GrDawnGpu::Make(const wgpu::Device& device,
112 const GrContextOptions& options, GrDirectContext* direct) {
113 if (!device) {
114 return nullptr;
115 }
116
117 return sk_sp<GrGpu>(new GrDawnGpu(direct, options, device));
118 }
119
120 ////////////////////////////////////////////////////////////////////////////////
121
GrDawnGpu(GrDirectContext * direct,const GrContextOptions & options,const wgpu::Device & device)122 GrDawnGpu::GrDawnGpu(GrDirectContext* direct, const GrContextOptions& options,
123 const wgpu::Device& device)
124 : INHERITED(direct)
125 , fDevice(device)
126 , fQueue(device.GetQueue())
127 , fUniformRingBuffer(this, wgpu::BufferUsage::Uniform)
128 , fStagingBufferManager(this)
129 , fRenderPipelineCache(kMaxRenderPipelineEntries)
130 , fFinishCallbacks(this) {
131 this->initCapsAndCompiler(sk_make_sp<GrDawnCaps>(options));
132 }
133
~GrDawnGpu()134 GrDawnGpu::~GrDawnGpu() { this->finishOutstandingGpuWork(); }
135
disconnect(DisconnectType type)136 void GrDawnGpu::disconnect(DisconnectType type) {
137 if (DisconnectType::kCleanup == type) {
138 this->finishOutstandingGpuWork();
139 }
140 fStagingBufferManager.reset();
141 fQueue = nullptr;
142 fDevice = nullptr;
143 INHERITED::disconnect(type);
144 }
145
pipelineBuilder()146 GrThreadSafePipelineBuilder* GrDawnGpu::pipelineBuilder() {
147 return nullptr;
148 }
149
refPipelineBuilder()150 sk_sp<GrThreadSafePipelineBuilder> GrDawnGpu::refPipelineBuilder() {
151 return nullptr;
152 }
153
154 ///////////////////////////////////////////////////////////////////////////////
155
onGetOpsRenderPass(GrRenderTarget * rt,bool,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)156 GrOpsRenderPass* GrDawnGpu::onGetOpsRenderPass(
157 GrRenderTarget* rt,
158 bool /*useMSAASurface*/,
159 GrAttachment*,
160 GrSurfaceOrigin origin,
161 const SkIRect& bounds,
162 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
163 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
164 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
165 GrXferBarrierFlags renderPassXferBarriers) {
166 fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
167 return fOpsRenderPass.get();
168 }
169
170 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)171 sk_sp<GrGpuBuffer> GrDawnGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
172 GrAccessPattern accessPattern, const void* data) {
173 sk_sp<GrGpuBuffer> b(new GrDawnBuffer(this, size, type, accessPattern));
174 if (data && b) {
175 b->updateData(data, size);
176 }
177 return b;
178 }
179
180 ////////////////////////////////////////////////////////////////////////////////
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)181 bool GrDawnGpu::onWritePixels(GrSurface* surface,
182 SkIRect rect,
183 GrColorType surfaceColorType,
184 GrColorType srcColorType,
185 const GrMipLevel texels[],
186 int mipLevelCount,
187 bool prepForTexSampling) {
188 GrDawnTexture* texture = static_cast<GrDawnTexture*>(surface->asTexture());
189 if (!texture) {
190 return false;
191 }
192 this->uploadTextureData(srcColorType, texels, mipLevelCount, rect, texture->texture());
193 if (mipLevelCount < texture->maxMipmapLevel() + 1) {
194 texture->markMipmapsDirty();
195 }
196 return true;
197 }
198
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)199 bool GrDawnGpu::onTransferPixelsTo(GrTexture* texture,
200 SkIRect rect,
201 GrColorType textureColorType,
202 GrColorType bufferColorType,
203 sk_sp<GrGpuBuffer> transferBuffer,
204 size_t bufferOffset,
205 size_t rowBytes) {
206 SkASSERT(!"unimplemented");
207 return false;
208 }
209
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)210 bool GrDawnGpu::onTransferPixelsFrom(GrSurface* surface,
211 SkIRect rect,
212 GrColorType surfaceColorType,
213 GrColorType bufferColorType,
214 sk_sp<GrGpuBuffer> transferBuffer,
215 size_t offset) {
216 SkASSERT(!"unimplemented");
217 return false;
218 }
219
220 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected,int mipLevelCount,uint32_t levelClearMask)221 sk_sp<GrTexture> GrDawnGpu::onCreateTexture(SkISize dimensions,
222 const GrBackendFormat& backendFormat,
223 GrRenderable renderable,
224 int renderTargetSampleCnt,
225 SkBudgeted budgeted,
226 GrProtected,
227 int mipLevelCount,
228 uint32_t levelClearMask) {
229 if (levelClearMask) {
230 return nullptr;
231 }
232
233 wgpu::TextureFormat format;
234 if (!backendFormat.asDawnFormat(&format)) {
235 return nullptr;
236 }
237
238 GrMipmapStatus mipmapStatus =
239 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
240
241 return GrDawnTexture::Make(this, dimensions, format, renderable, renderTargetSampleCnt,
242 budgeted, mipLevelCount, mipmapStatus);
243 }
244
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat &,SkBudgeted,GrMipmapped,GrProtected,const void * data,size_t dataSize)245 sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat&,
246 SkBudgeted, GrMipmapped, GrProtected,
247 const void* data, size_t dataSize) {
248 SkASSERT(!"unimplemented");
249 return nullptr;
250 }
251
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)252 sk_sp<GrTexture> GrDawnGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
253 GrWrapOwnership ownership,
254 GrWrapCacheable cacheable,
255 GrIOType ioType) {
256 GrDawnTextureInfo info;
257 if (!backendTex.getDawnTextureInfo(&info)) {
258 return nullptr;
259 }
260
261 SkISize dimensions = { backendTex.width(), backendTex.height() };
262 return GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kNo, 1, cacheable, ioType,
263 info);
264 }
265
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)266 sk_sp<GrTexture> GrDawnGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
267 GrWrapOwnership ownership,
268 GrWrapCacheable cacheable) {
269 return nullptr;
270 }
271
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrWrapOwnership,GrWrapCacheable cacheable)272 sk_sp<GrTexture> GrDawnGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
273 int sampleCnt,
274 GrWrapOwnership,
275 GrWrapCacheable cacheable) {
276 GrDawnTextureInfo info;
277 if (!tex.getDawnTextureInfo(&info) || !info.fTexture) {
278 return nullptr;
279 }
280
281 SkISize dimensions = { tex.width(), tex.height() };
282 sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
283 if (sampleCnt < 1) {
284 return nullptr;
285 }
286
287 sk_sp<GrTexture> result = GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kYes,
288 sampleCnt, cacheable, kRW_GrIOType, info);
289 result->markMipmapsDirty();
290 return result;
291 }
292
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt)293 sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt) {
294 GrDawnRenderTargetInfo info;
295 if (!rt.getDawnRenderTargetInfo(&info) || !info.fTextureView) {
296 return nullptr;
297 }
298
299 SkISize dimensions = { rt.width(), rt.height() };
300 int sampleCnt = 1;
301 return GrDawnRenderTarget::MakeWrapped(this, dimensions, sampleCnt, info);
302 }
303
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)304 sk_sp<GrAttachment> GrDawnGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
305 SkISize dimensions, int numStencilSamples) {
306 fStats.incStencilAttachmentCreates();
307 return GrDawnAttachment::MakeStencil(this, dimensions, numStencilSamples);
308 }
309
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)310 GrBackendTexture GrDawnGpu::onCreateBackendTexture(SkISize dimensions,
311 const GrBackendFormat& backendFormat,
312 GrRenderable renderable,
313 GrMipmapped mipMapped,
314 GrProtected isProtected) {
315 wgpu::TextureFormat format;
316 if (!backendFormat.asDawnFormat(&format)) {
317 return GrBackendTexture();
318 }
319
320 wgpu::TextureDescriptor desc;
321 desc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
322 wgpu::TextureUsage::CopyDst;
323
324 if (GrRenderable::kYes == renderable) {
325 desc.usage |= wgpu::TextureUsage::RenderAttachment;
326 }
327
328 int numMipLevels = 1;
329 if (mipMapped == GrMipmapped::kYes) {
330 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
331 }
332
333 desc.size.width = dimensions.width();
334 desc.size.height = dimensions.height();
335 desc.size.depthOrArrayLayers = 1;
336 desc.format = format;
337 desc.mipLevelCount = numMipLevels;
338
339 wgpu::Texture tex = this->device().CreateTexture(&desc);
340
341 GrDawnTextureInfo info;
342 info.fTexture = tex;
343 info.fFormat = desc.format;
344 info.fLevelCount = desc.mipLevelCount;
345 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
346 }
347
uploadTextureData(GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,const SkIRect & rect,wgpu::Texture texture)348 void GrDawnGpu::uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[],
349 int mipLevelCount, const SkIRect& rect,
350 wgpu::Texture texture) {
351 uint32_t x = rect.x();
352 uint32_t y = rect.y();
353 uint32_t width = rect.width();
354 uint32_t height = rect.height();
355
356 for (int i = 0; i < mipLevelCount; i++) {
357 const void* src = texels[i].fPixels;
358 size_t srcRowBytes = texels[i].fRowBytes;
359 SkColorType colorType = GrColorTypeToSkColorType(srcColorType);
360 size_t trimRowBytes = width * SkColorTypeBytesPerPixel(colorType);
361 size_t dstRowBytes = GrDawnRoundRowBytes(trimRowBytes);
362 size_t size = dstRowBytes * height;
363 GrStagingBufferManager::Slice slice =
364 this->stagingBufferManager()->allocateStagingBufferSlice(size);
365 SkRectMemcpy(slice.fOffsetMapPtr, dstRowBytes, src, srcRowBytes, trimRowBytes, height);
366
367 wgpu::ImageCopyBuffer srcBuffer = {};
368 srcBuffer.buffer = static_cast<GrDawnBuffer*>(slice.fBuffer)->get();
369 srcBuffer.layout.offset = slice.fOffset;
370 srcBuffer.layout.bytesPerRow = dstRowBytes;
371 srcBuffer.layout.rowsPerImage = height;
372
373 wgpu::ImageCopyTexture dstTexture;
374 dstTexture.texture = texture;
375 dstTexture.mipLevel = i;
376 dstTexture.origin = {x, y, 0};
377
378 wgpu::Extent3D copySize = {width, height, 1};
379 this->getCopyEncoder().CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
380 x /= 2;
381 y /= 2;
382 width = std::max(1u, width / 2);
383 height = std::max(1u, height / 2);
384 }
385 }
386
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)387 bool GrDawnGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
388 sk_sp<GrRefCntedCallback> finishedCallback,
389 std::array<float, 4> color) {
390 GrDawnTextureInfo info;
391 SkAssertResult(backendTexture.getDawnTextureInfo(&info));
392
393 GrColorType colorType;
394 if (!GrDawnFormatToGrColorType(info.fFormat, &colorType)) {
395 return false;
396 }
397
398 size_t bpp = GrDawnBytesPerBlock(info.fFormat);
399 size_t baseLayerSize = bpp * backendTexture.width() * backendTexture.height();
400 SkAutoMalloc defaultStorage(baseLayerSize);
401 GrImageInfo imageInfo(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
402 GrClearImage(imageInfo, defaultStorage.get(), bpp * backendTexture.width(), color);
403
404 wgpu::Device device = this->device();
405 wgpu::CommandEncoder copyEncoder = this->getCopyEncoder();
406 int w = backendTexture.width(), h = backendTexture.height();
407 for (uint32_t i = 0; i < info.fLevelCount; i++) {
408 size_t origRowBytes = bpp * w;
409 size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
410 size_t size = rowBytes * h;
411 GrStagingBufferManager::Slice stagingBuffer =
412 this->stagingBufferManager()->allocateStagingBufferSlice(size);
413 if (rowBytes == origRowBytes) {
414 memcpy(stagingBuffer.fOffsetMapPtr, defaultStorage.get(), size);
415 } else {
416 const char* src = static_cast<const char*>(defaultStorage.get());
417 char* dst = static_cast<char*>(stagingBuffer.fOffsetMapPtr);
418 for (int row = 0; row < h; row++) {
419 memcpy(dst, src, origRowBytes);
420 dst += rowBytes;
421 src += origRowBytes;
422 }
423 }
424 wgpu::ImageCopyBuffer srcBuffer = {};
425 srcBuffer.buffer = static_cast<GrDawnBuffer*>(stagingBuffer.fBuffer)->get();
426 srcBuffer.layout.offset = stagingBuffer.fOffset;
427 srcBuffer.layout.bytesPerRow = rowBytes;
428 srcBuffer.layout.rowsPerImage = h;
429 wgpu::ImageCopyTexture dstTexture;
430 dstTexture.texture = info.fTexture;
431 dstTexture.mipLevel = i;
432 dstTexture.origin = {0, 0, 0};
433 wgpu::Extent3D copySize = {(uint32_t)w, (uint32_t)h, 1};
434 copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
435 w = std::max(1, w / 2);
436 h = std::max(1, h / 2);
437 }
438 return true;
439 }
440
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat &,GrMipmapped,GrProtected)441 GrBackendTexture GrDawnGpu::onCreateCompressedBackendTexture(
442 SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) {
443 return {};
444 }
445
onUpdateCompressedBackendTexture(const GrBackendTexture &,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)446 bool GrDawnGpu::onUpdateCompressedBackendTexture(const GrBackendTexture&,
447 sk_sp<GrRefCntedCallback> finishedCallback,
448 const void* data,
449 size_t size) {
450 return false;
451 }
452
deleteBackendTexture(const GrBackendTexture & tex)453 void GrDawnGpu::deleteBackendTexture(const GrBackendTexture& tex) {
454 GrDawnTextureInfo info;
455 if (tex.getDawnTextureInfo(&info)) {
456 info.fTexture = nullptr;
457 }
458 }
459
compile(const GrProgramDesc &,const GrProgramInfo &)460 bool GrDawnGpu::compile(const GrProgramDesc&, const GrProgramInfo&) {
461 return false;
462 }
463
464 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const465 bool GrDawnGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
466 GrDawnTextureInfo info;
467 if (!tex.getDawnTextureInfo(&info)) {
468 return false;
469 }
470
471 return info.fTexture.Get();
472 }
473
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)474 GrBackendRenderTarget GrDawnGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
475 GrColorType colorType,
476 int sampleCnt,
477 GrProtected isProtected) {
478 if (dimensions.width() > this->caps()->maxTextureSize() ||
479 dimensions.height() > this->caps()->maxTextureSize()) {
480 return {};
481 }
482
483 // We don't support MSAA in this backend yet.
484 if (sampleCnt != 1) {
485 return {};
486 }
487
488 if (isProtected == GrProtected::kYes) {
489 return {};
490 }
491
492 wgpu::TextureFormat format;
493 if (!GrColorTypeToDawnFormat(colorType, &format)) {
494 return {};
495 }
496
497 wgpu::TextureDescriptor desc;
498 desc.usage =
499 wgpu::TextureUsage::CopySrc |
500 wgpu::TextureUsage::RenderAttachment;
501
502 desc.size.width = dimensions.width();
503 desc.size.height = dimensions.height();
504 desc.size.depthOrArrayLayers = 1;
505 desc.format = format;
506
507 wgpu::Texture tex = this->device().CreateTexture(&desc);
508
509 GrDawnRenderTargetInfo info;
510 info.fTextureView = tex.CreateView();
511 info.fFormat = desc.format;
512 info.fLevelCount = desc.mipLevelCount;
513
514 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 1, 0, info);
515 }
516
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)517 void GrDawnGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
518 GrDawnRenderTargetInfo info;
519 if (rt.getDawnRenderTargetInfo(&info)) {
520 info.fTextureView = nullptr;
521 }
522 }
523
524 #endif
525
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)526 void GrDawnGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
527 GrGpuFinishedContext finishedContext) {
528 fFinishCallbacks.add(finishedProc, finishedContext);
529 }
530
checkForCompletedStagingBuffers()531 void GrDawnGpu::checkForCompletedStagingBuffers() {
532 // We expect all the buffer maps to trigger in order of submission so we bail after the first
533 // non finished map since we always push new busy buffers to the back of our list.
534 while (!fBusyStagingBuffers.empty() && fBusyStagingBuffers.front()->isMapped()) {
535 fBusyStagingBuffers.pop_front();
536 }
537 }
538
waitOnAllBusyStagingBuffers()539 void GrDawnGpu::waitOnAllBusyStagingBuffers() {
540 while (!fBusyStagingBuffers.empty()) {
541 fDevice.Tick();
542 this->checkForCompletedStagingBuffers();
543 }
544 }
545
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)546 void GrDawnGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
547 fSubmittedStagingBuffers.push_back(std::move(buffer));
548 }
549
550
callback(WGPUQueueWorkDoneStatus status,void * userData)551 static void callback(WGPUQueueWorkDoneStatus status, void* userData) {
552 *static_cast<bool*>(userData) = true;
553 }
554
onSubmitToGpu(bool syncCpu)555 bool GrDawnGpu::onSubmitToGpu(bool syncCpu) {
556 this->flushCopyEncoder();
557 if (!fCommandBuffers.empty()) {
558 fQueue.Submit(fCommandBuffers.size(), &fCommandBuffers.front());
559 fCommandBuffers.clear();
560 }
561
562 this->moveStagingBuffersToBusyAndMapAsync();
563 if (syncCpu) {
564 bool called = false;
565 fDevice.GetQueue().OnSubmittedWorkDone(0, callback, &called);
566 while (!called) {
567 fDevice.Tick();
568 }
569 fFinishCallbacks.callAll(true);
570 }
571
572 this->checkForCompletedStagingBuffers();
573
574 return true;
575 }
576
get_dawn_texture_from_surface(GrSurface * src)577 static wgpu::Texture get_dawn_texture_from_surface(GrSurface* src) {
578 if (auto t = static_cast<GrDawnTexture*>(src->asTexture())) {
579 return t->texture();
580 } else {
581 return nullptr;
582 }
583 }
584
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)585 bool GrDawnGpu::onCopySurface(GrSurface* dst,
586 GrSurface* src,
587 const SkIRect& srcRect,
588 const SkIPoint& dstPoint) {
589 wgpu::Texture srcTexture = get_dawn_texture_from_surface(src);
590 wgpu::Texture dstTexture = get_dawn_texture_from_surface(dst);
591 if (!srcTexture || !dstTexture) {
592 return false;
593 }
594
595 uint32_t width = srcRect.width(), height = srcRect.height();
596
597 wgpu::ImageCopyTexture srcTextureView, dstTextureView;
598 srcTextureView.texture = srcTexture;
599 srcTextureView.origin = {(uint32_t) srcRect.x(), (uint32_t) srcRect.y(), 0};
600 dstTextureView.texture = dstTexture;
601 dstTextureView.origin = {(uint32_t) dstPoint.x(), (uint32_t) dstPoint.y(), 0};
602
603 wgpu::Extent3D copySize = {width, height, 1};
604 this->getCopyEncoder().CopyTextureToTexture(&srcTextureView, &dstTextureView, ©Size);
605 return true;
606 }
607
callback(WGPUBufferMapAsyncStatus status,void * userdata)608 static void callback(WGPUBufferMapAsyncStatus status, void* userdata) {
609 *static_cast<bool*>(userdata) = true;
610 }
611
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)612 bool GrDawnGpu::onReadPixels(GrSurface* surface,
613 SkIRect rect,
614 GrColorType surfaceColorType,
615 GrColorType dstColorType,
616 void* buffer,
617 size_t rowBytes) {
618 wgpu::Texture tex = get_dawn_texture_from_surface(surface);
619
620 if (!tex || 0 == rowBytes) {
621 return false;
622 }
623 size_t origRowBytes = rowBytes;
624 int origSizeInBytes = origRowBytes*rect.height();
625 rowBytes = GrDawnRoundRowBytes(rowBytes);
626 int sizeInBytes = rowBytes*rect.height();
627
628 wgpu::BufferDescriptor desc;
629 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
630 desc.size = sizeInBytes;
631
632 wgpu::Buffer buf = device().CreateBuffer(&desc);
633
634 wgpu::ImageCopyTexture srcTexture;
635 srcTexture.texture = tex;
636 srcTexture.origin = {(uint32_t) rect.left(), (uint32_t) rect.top(), 0};
637
638 wgpu::ImageCopyBuffer dstBuffer = {};
639 dstBuffer.buffer = buf;
640 dstBuffer.layout.offset = 0;
641 dstBuffer.layout.bytesPerRow = rowBytes;
642 dstBuffer.layout.rowsPerImage = rect.height();
643
644 wgpu::Extent3D copySize = {(uint32_t) rect.width(), (uint32_t) rect.height(), 1};
645 this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, ©Size);
646 this->submitToGpu(true);
647
648 bool mapped = false;
649 buf.MapAsync(wgpu::MapMode::Read, 0, 0, callback, &mapped);
650 while (!mapped) {
651 device().Tick();
652 }
653 const void* readPixelsPtr = buf.GetConstMappedRange();
654
655 if (rowBytes == origRowBytes) {
656 memcpy(buffer, readPixelsPtr, origSizeInBytes);
657 } else {
658 const char* src = static_cast<const char*>(readPixelsPtr);
659 char* dst = static_cast<char*>(buffer);
660 for (int row = 0; row < rect.height(); row++) {
661 memcpy(dst, src, origRowBytes);
662 dst += origRowBytes;
663 src += rowBytes;
664 }
665 }
666 buf.Unmap();
667 return true;
668 }
669
onRegenerateMipMapLevels(GrTexture * tex)670 bool GrDawnGpu::onRegenerateMipMapLevels(GrTexture* tex) {
671 this->flushCopyEncoder();
672 GrDawnTexture* src = static_cast<GrDawnTexture*>(tex);
673 int srcWidth = tex->width();
674 int srcHeight = tex->height();
675
676 // SkMipmap doesn't include the base level in the level count so we have to add 1
677 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
678
679 // Create a temporary texture for mipmap generation, then copy to source.
680 // We have to do this even for renderable textures, since GrDawnRenderTarget currently only
681 // contains a view, not a texture.
682 wgpu::TextureDescriptor texDesc;
683 texDesc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
684 wgpu::TextureUsage::RenderAttachment;
685 texDesc.size.width = (tex->width() + 1) / 2;
686 texDesc.size.height = (tex->height() + 1) / 2;
687 texDesc.size.depthOrArrayLayers = 1;
688 texDesc.mipLevelCount = levelCount - 1;
689 texDesc.format = src->format();
690 wgpu::Texture dstTexture = fDevice.CreateTexture(&texDesc);
691
692 const char* vs =
693 "layout(location = 0) out float2 texCoord;\n"
694 "float2 positions[4] = float2[4](float2(-1.0, 1.0),\n"
695 "float2(1.0, 1.0),\n"
696 "float2(-1.0, -1.0),\n"
697 "float2(1.0, -1.0));\n"
698 "float2 texCoords[4] = float2[4](float2(0.0, 0.0),\n"
699 "float2(1.0, 0.0),\n"
700 "float2(0.0, 1.0),\n"
701 "float2(1.0, 1.0));\n"
702 "void main() {\n"
703 " sk_Position = float4(positions[sk_VertexID], 0.0, 1.0);\n"
704 " texCoord = texCoords[sk_VertexID];\n"
705 "}\n";
706 SkSL::String vsSPIRV = this->SkSLToSPIRV(vs,
707 SkSL::ProgramKind::kVertex,
708 /*rtFlipOffset*/ 0,
709 nullptr);
710
711 const char* fs =
712 "layout(set = 0, binding = 0) uniform sampler samp;\n"
713 "layout(set = 0, binding = 1) uniform texture2D tex;\n"
714 "layout(location = 0) in float2 texCoord;\n"
715 "void main() {\n"
716 " sk_FragColor = sample(makeSampler2D(tex, samp), texCoord);\n"
717 "}\n";
718 SkSL::String fsSPIRV = this->SkSLToSPIRV(fs,
719 SkSL::ProgramKind::kFragment,
720 /*rtFlipOffset=*/ 0,
721 nullptr);
722
723 wgpu::VertexState vertexState;
724 vertexState.module = this->createShaderModule(vsSPIRV);
725 vertexState.entryPoint = "main";
726 vertexState.bufferCount = 0;
727
728 wgpu::ColorTargetState colorTargetState;
729 colorTargetState.format = static_cast<GrDawnTexture*>(tex)->format();
730
731 wgpu::FragmentState fragmentState;
732 fragmentState.module = this->createShaderModule(fsSPIRV);
733 fragmentState.entryPoint = "main";
734 fragmentState.targetCount = 1;
735 fragmentState.targets = &colorTargetState;
736
737 wgpu::RenderPipelineDescriptor renderPipelineDesc;
738 renderPipelineDesc.vertex = vertexState;
739 renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
740 renderPipelineDesc.primitive.stripIndexFormat = wgpu::IndexFormat::Uint16;
741 renderPipelineDesc.fragment = &fragmentState;
742 wgpu::RenderPipeline pipeline = fDevice.CreateRenderPipeline(&renderPipelineDesc);
743
744 wgpu::BindGroupLayout bgl = pipeline.GetBindGroupLayout(0);
745 wgpu::TextureViewDescriptor srcViewDesc;
746 srcViewDesc.mipLevelCount = 1;
747 wgpu::TextureView srcView = src->texture().CreateView(&srcViewDesc);
748 wgpu::SamplerDescriptor samplerDesc;
749 samplerDesc.minFilter = wgpu::FilterMode::Linear;
750 wgpu::Sampler sampler = fDevice.CreateSampler(&samplerDesc);
751 wgpu::CommandEncoder commandEncoder = fDevice.CreateCommandEncoder();
752 for (uint32_t mipLevel = 0; mipLevel < texDesc.mipLevelCount; mipLevel++) {
753 int dstWidth = std::max(1, srcWidth / 2);
754 int dstHeight = std::max(1, srcHeight / 2);
755 wgpu::TextureViewDescriptor dstViewDesc;
756 dstViewDesc.format = static_cast<GrDawnTexture*>(tex)->format();
757 dstViewDesc.dimension = wgpu::TextureViewDimension::e2D;
758 dstViewDesc.baseMipLevel = mipLevel;
759 dstViewDesc.mipLevelCount = 1;
760 wgpu::TextureView dstView = dstTexture.CreateView(&dstViewDesc);
761 wgpu::BindGroupEntry bge[2];
762 bge[0].binding = 0;
763 bge[0].sampler = sampler;
764 bge[1].binding = 1;
765 bge[1].textureView = srcView;
766 wgpu::BindGroupDescriptor bgDesc;
767 bgDesc.layout = bgl;
768 bgDesc.entryCount = 2;
769 bgDesc.entries = bge;
770 wgpu::BindGroup bindGroup = fDevice.CreateBindGroup(&bgDesc);
771 wgpu::RenderPassColorAttachment colorAttachment;
772 colorAttachment.view = dstView;
773 colorAttachment.clearColor = { 0.0f, 0.0f, 0.0f, 0.0f };
774 colorAttachment.loadOp = wgpu::LoadOp::Load;
775 colorAttachment.storeOp = wgpu::StoreOp::Store;
776 wgpu::RenderPassColorAttachment* colorAttachments = { &colorAttachment };
777 wgpu::RenderPassDescriptor renderPassDesc;
778 renderPassDesc.colorAttachmentCount = 1;
779 renderPassDesc.colorAttachments = colorAttachments;
780 wgpu::RenderPassEncoder rpe = commandEncoder.BeginRenderPass(&renderPassDesc);
781 rpe.SetPipeline(pipeline);
782 rpe.SetBindGroup(0, bindGroup);
783 rpe.Draw(4, 1, 0, 0);
784 rpe.EndPass();
785
786 wgpu::Extent3D copySize = {(uint32_t)dstWidth, (uint32_t)dstHeight, 1};
787 wgpu::ImageCopyTexture srcCopyView;
788 srcCopyView.texture = dstTexture;
789 srcCopyView.mipLevel = mipLevel;
790 wgpu::ImageCopyTexture dstCopyView;
791 dstCopyView.mipLevel = mipLevel + 1;
792 dstCopyView.texture = src->texture();
793 commandEncoder.CopyTextureToTexture(&srcCopyView, &dstCopyView, ©Size);
794
795 srcHeight = dstHeight;
796 srcWidth = dstWidth;
797 srcView = dstView;
798 }
799 fCommandBuffers.push_back(commandEncoder.Finish());
800 return true;
801 }
802
submit(GrOpsRenderPass * renderPass)803 void GrDawnGpu::submit(GrOpsRenderPass* renderPass) {
804 this->flushCopyEncoder();
805 static_cast<GrDawnOpsRenderPass*>(renderPass)->submit();
806 }
807
insertFence()808 GrFence SK_WARN_UNUSED_RESULT GrDawnGpu::insertFence() {
809 return reinterpret_cast<GrFence>(new Fence(fDevice));
810 }
811
waitFence(GrFence fence)812 bool GrDawnGpu::waitFence(GrFence fence) {
813 return reinterpret_cast<Fence*>(fence)->check();
814 }
815
deleteFence(GrFence fence) const816 void GrDawnGpu::deleteFence(GrFence fence) const {
817 delete reinterpret_cast<Fence*>(fence);
818 }
819
makeSemaphore(bool isOwned)820 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrDawnGpu::makeSemaphore(bool isOwned) {
821 SkASSERT(!"unimplemented");
822 return nullptr;
823 }
824
wrapBackendSemaphore(const GrBackendSemaphore &,GrSemaphoreWrapType,GrWrapOwnership)825 std::unique_ptr<GrSemaphore> GrDawnGpu::wrapBackendSemaphore(const GrBackendSemaphore& /* sema */,
826 GrSemaphoreWrapType /* wrapType */,
827 GrWrapOwnership /* ownership */) {
828 SkASSERT(!"unimplemented");
829 return nullptr;
830 }
831
insertSemaphore(GrSemaphore * semaphore)832 void GrDawnGpu::insertSemaphore(GrSemaphore* semaphore) {
833 SkASSERT(!"unimplemented");
834 }
835
waitSemaphore(GrSemaphore * semaphore)836 void GrDawnGpu::waitSemaphore(GrSemaphore* semaphore) {
837 SkASSERT(!"unimplemented");
838 }
839
checkFinishProcs()840 void GrDawnGpu::checkFinishProcs() {
841 fFinishCallbacks.check();
842 }
843
finishOutstandingGpuWork()844 void GrDawnGpu::finishOutstandingGpuWork() {
845 this->waitOnAllBusyStagingBuffers();
846 }
847
prepareTextureForCrossContextUsage(GrTexture * texture)848 std::unique_ptr<GrSemaphore> GrDawnGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
849 SkASSERT(!"unimplemented");
850 return nullptr;
851 }
852
getOrCreateRenderPipeline(GrRenderTarget * rt,const GrProgramInfo & programInfo)853 sk_sp<GrDawnProgram> GrDawnGpu::getOrCreateRenderPipeline(
854 GrRenderTarget* rt,
855 const GrProgramInfo& programInfo) {
856 GrProgramDesc desc = this->caps()->makeDesc(rt, programInfo);
857 if (!desc.isValid()) {
858 return nullptr;
859 }
860
861 if (sk_sp<GrDawnProgram>* program = fRenderPipelineCache.find(desc)) {
862 return *program;
863 }
864
865 wgpu::TextureFormat colorFormat;
866 SkAssertResult(programInfo.backendFormat().asDawnFormat(&colorFormat));
867
868 wgpu::TextureFormat stencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
869 bool hasDepthStencil = rt->getStencilAttachment() != nullptr;
870
871 sk_sp<GrDawnProgram> program = GrDawnProgramBuilder::Build(
872 this, rt, programInfo, colorFormat,
873 hasDepthStencil, stencilFormat, &desc);
874 fRenderPipelineCache.insert(desc, program);
875 return program;
876 }
877
getOrCreateSampler(GrSamplerState samplerState)878 wgpu::Sampler GrDawnGpu::getOrCreateSampler(GrSamplerState samplerState) {
879 auto i = fSamplers.find(samplerState);
880 if (i != fSamplers.end()) {
881 return i->second;
882 }
883 wgpu::SamplerDescriptor desc;
884 desc.addressModeU = to_dawn_address_mode(samplerState.wrapModeX());
885 desc.addressModeV = to_dawn_address_mode(samplerState.wrapModeY());
886 desc.addressModeW = wgpu::AddressMode::ClampToEdge;
887 desc.magFilter = desc.minFilter = to_dawn_filter_mode(samplerState.filter());
888 desc.mipmapFilter = to_dawn_mipmap_mode(samplerState.mipmapMode());
889 wgpu::Sampler sampler = device().CreateSampler(&desc);
890 fSamplers.insert(std::pair<GrSamplerState, wgpu::Sampler>(samplerState, sampler));
891 return sampler;
892 }
893
allocateUniformRingBufferSlice(int size)894 GrDawnRingBuffer::Slice GrDawnGpu::allocateUniformRingBufferSlice(int size) {
895 return fUniformRingBuffer.allocate(size);
896 }
897
appendCommandBuffer(wgpu::CommandBuffer commandBuffer)898 void GrDawnGpu::appendCommandBuffer(wgpu::CommandBuffer commandBuffer) {
899 if (commandBuffer) {
900 fCommandBuffers.push_back(commandBuffer);
901 }
902 }
903
getCopyEncoder()904 wgpu::CommandEncoder GrDawnGpu::getCopyEncoder() {
905 if (!fCopyEncoder) {
906 fCopyEncoder = fDevice.CreateCommandEncoder();
907 }
908 return fCopyEncoder;
909 }
910
flushCopyEncoder()911 void GrDawnGpu::flushCopyEncoder() {
912 if (fCopyEncoder) {
913 fCommandBuffers.push_back(fCopyEncoder.Finish());
914 fCopyEncoder = nullptr;
915 }
916 }
917
moveStagingBuffersToBusyAndMapAsync()918 void GrDawnGpu::moveStagingBuffersToBusyAndMapAsync() {
919 for (size_t i = 0; i < fSubmittedStagingBuffers.size(); ++i) {
920 GrDawnBuffer* buffer = static_cast<GrDawnBuffer*>(fSubmittedStagingBuffers[i].get());
921 buffer->mapWriteAsync();
922 fBusyStagingBuffers.push_back(std::move(fSubmittedStagingBuffers[i]));
923 }
924 fSubmittedStagingBuffers.clear();
925 }
926
SkSLToSPIRV(const char * shaderString,SkSL::ProgramKind kind,uint32_t rtFlipOffset,SkSL::Program::Inputs * inputs)927 SkSL::String GrDawnGpu::SkSLToSPIRV(const char* shaderString,
928 SkSL::ProgramKind kind,
929 uint32_t rtFlipOffset,
930 SkSL::Program::Inputs* inputs) {
931 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
932 SkSL::Program::Settings settings;
933 settings.fRTFlipOffset = rtFlipOffset;
934 settings.fRTFlipBinding = 0;
935 settings.fRTFlipSet = 0;
936 std::unique_ptr<SkSL::Program> program = this->shaderCompiler()->convertProgram(
937 kind,
938 shaderString,
939 settings);
940 if (!program) {
941 errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
942 return "";
943 }
944 if (inputs) {
945 *inputs = program->fInputs;
946 }
947 SkSL::String code;
948 if (!this->shaderCompiler()->toSPIRV(*program, &code)) {
949 errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
950 return "";
951 }
952 return code;
953 }
954
createShaderModule(const SkSL::String & spirvSource)955 wgpu::ShaderModule GrDawnGpu::createShaderModule(const SkSL::String& spirvSource) {
956 wgpu::ShaderModuleSPIRVDescriptor desc;
957 desc.codeSize = spirvSource.size() / 4;
958 desc.code = reinterpret_cast<const uint32_t*>(spirvSource.c_str());
959
960 wgpu::ShaderModuleDescriptor smDesc;
961 smDesc.nextInChain = &desc;
962
963 return fDevice.CreateShaderModule(&smDesc);
964 }
965