1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/dawn/GrDawnGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkConvertPixels.h"
15 #include "src/gpu/GrDataUtils.h"
16 #include "src/gpu/GrDirectContextPriv.h"
17 #include "src/gpu/GrGeometryProcessor.h"
18 #include "src/gpu/GrGpuResourceCacheAccess.h"
19 #include "src/gpu/GrPipeline.h"
20 #include "src/gpu/GrRenderTarget.h"
21 #include "src/gpu/GrSemaphore.h"
22 #include "src/gpu/GrStencilSettings.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/GrThreadSafePipelineBuilder.h"
25 #include "src/gpu/dawn/GrDawnAttachment.h"
26 #include "src/gpu/dawn/GrDawnBuffer.h"
27 #include "src/gpu/dawn/GrDawnCaps.h"
28 #include "src/gpu/dawn/GrDawnOpsRenderPass.h"
29 #include "src/gpu/dawn/GrDawnProgramBuilder.h"
30 #include "src/gpu/dawn/GrDawnRenderTarget.h"
31 #include "src/gpu/dawn/GrDawnTexture.h"
32 #include "src/gpu/dawn/GrDawnUtil.h"
33
34 #include "src/core/SkAutoMalloc.h"
35 #include "src/core/SkMipmap.h"
36 #include "src/sksl/SkSLCompiler.h"
37
38 #if !defined(SK_BUILD_FOR_WIN)
39 #include <unistd.h>
40 #endif // !defined(SK_BUILD_FOR_WIN)
41
42 static const int kMaxRenderPipelineEntries = 1024;
43
44 namespace {
45
46 class Fence {
47 public:
Fence(const wgpu::Device & device,const wgpu::Fence & fence)48 Fence(const wgpu::Device& device, const wgpu::Fence& fence)
49 : fDevice(device), fCalled(false) {
50 device.GetQueue().OnSubmittedWorkDone(0, callback, this);
51 }
52
callback(WGPUQueueWorkDoneStatus status,void * userData)53 static void callback(WGPUQueueWorkDoneStatus status, void* userData) {
54 Fence* fence = static_cast<Fence*>(userData);
55 fence->fCalled = true;
56 }
57
check()58 bool check() {
59 fDevice.Tick();
60 return fCalled;
61 }
62
63 private:
64 wgpu::Device fDevice;
65 bool fCalled;
66 };
67
68 }
69
to_dawn_filter_mode(GrSamplerState::Filter filter)70 static wgpu::FilterMode to_dawn_filter_mode(GrSamplerState::Filter filter) {
71 switch (filter) {
72 case GrSamplerState::Filter::kNearest:
73 return wgpu::FilterMode::Nearest;
74 case GrSamplerState::Filter::kLinear:
75 return wgpu::FilterMode::Linear;
76 default:
77 SkASSERT(!"unsupported filter mode");
78 return wgpu::FilterMode::Nearest;
79 }
80 }
81
to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode)82 static wgpu::FilterMode to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode) {
83 switch (mode) {
84 case GrSamplerState::MipmapMode::kNone:
85 // Fall-through (Dawn does not have an equivalent for "None")
86 case GrSamplerState::MipmapMode::kNearest:
87 return wgpu::FilterMode::Nearest;
88 case GrSamplerState::MipmapMode::kLinear:
89 return wgpu::FilterMode::Linear;
90 default:
91 SkASSERT(!"unsupported filter mode");
92 return wgpu::FilterMode::Nearest;
93 }
94 }
95
to_dawn_address_mode(GrSamplerState::WrapMode wrapMode)96 static wgpu::AddressMode to_dawn_address_mode(GrSamplerState::WrapMode wrapMode) {
97 switch (wrapMode) {
98 case GrSamplerState::WrapMode::kClamp:
99 return wgpu::AddressMode::ClampToEdge;
100 case GrSamplerState::WrapMode::kRepeat:
101 return wgpu::AddressMode::Repeat;
102 case GrSamplerState::WrapMode::kMirrorRepeat:
103 return wgpu::AddressMode::MirrorRepeat;
104 case GrSamplerState::WrapMode::kClampToBorder:
105 SkASSERT(!"unsupported address mode");
106 }
107 SkASSERT(!"unsupported address mode");
108 return wgpu::AddressMode::ClampToEdge;
109 }
110
Make(const wgpu::Device & device,const GrContextOptions & options,GrDirectContext * direct)111 sk_sp<GrGpu> GrDawnGpu::Make(const wgpu::Device& device,
112 const GrContextOptions& options, GrDirectContext* direct) {
113 if (!device) {
114 return nullptr;
115 }
116
117 return sk_sp<GrGpu>(new GrDawnGpu(direct, options, device));
118 }
119
120 ////////////////////////////////////////////////////////////////////////////////
121
GrDawnGpu(GrDirectContext * direct,const GrContextOptions & options,const wgpu::Device & device)122 GrDawnGpu::GrDawnGpu(GrDirectContext* direct, const GrContextOptions& options,
123 const wgpu::Device& device)
124 : INHERITED(direct)
125 , fDevice(device)
126 , fQueue(device.GetQueue())
127 , fUniformRingBuffer(this, wgpu::BufferUsage::Uniform)
128 , fStagingBufferManager(this)
129 , fRenderPipelineCache(kMaxRenderPipelineEntries)
130 , fFinishCallbacks(this) {
131 this->initCapsAndCompiler(sk_make_sp<GrDawnCaps>(options));
132 }
133
~GrDawnGpu()134 GrDawnGpu::~GrDawnGpu() { this->finishOutstandingGpuWork(); }
135
disconnect(DisconnectType type)136 void GrDawnGpu::disconnect(DisconnectType type) {
137 if (DisconnectType::kCleanup == type) {
138 this->finishOutstandingGpuWork();
139 }
140 fStagingBufferManager.reset();
141 fQueue = nullptr;
142 fDevice = nullptr;
143 INHERITED::disconnect(type);
144 }
145
pipelineBuilder()146 GrThreadSafePipelineBuilder* GrDawnGpu::pipelineBuilder() {
147 return nullptr;
148 }
149
refPipelineBuilder()150 sk_sp<GrThreadSafePipelineBuilder> GrDawnGpu::refPipelineBuilder() {
151 return nullptr;
152 }
153
154 ///////////////////////////////////////////////////////////////////////////////
155
onGetOpsRenderPass(GrRenderTarget * rt,bool,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)156 GrOpsRenderPass* GrDawnGpu::onGetOpsRenderPass(
157 GrRenderTarget* rt,
158 bool /*useMSAASurface*/,
159 GrAttachment*,
160 GrSurfaceOrigin origin,
161 const SkIRect& bounds,
162 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
163 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
164 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
165 GrXferBarrierFlags renderPassXferBarriers) {
166 fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
167 return fOpsRenderPass.get();
168 }
169
170 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)171 sk_sp<GrGpuBuffer> GrDawnGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
172 GrAccessPattern accessPattern, const void* data) {
173 sk_sp<GrGpuBuffer> b(new GrDawnBuffer(this, size, type, accessPattern));
174 if (data && b) {
175 b->updateData(data, size);
176 }
177 return b;
178 }
179
180 ////////////////////////////////////////////////////////////////////////////////
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)181 bool GrDawnGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
182 GrColorType surfaceColorType, GrColorType srcColorType,
183 const GrMipLevel texels[], int mipLevelCount,
184 bool prepForTexSampling) {
185 GrDawnTexture* texture = static_cast<GrDawnTexture*>(surface->asTexture());
186 if (!texture) {
187 return false;
188 }
189 this->uploadTextureData(srcColorType, texels, mipLevelCount,
190 SkIRect::MakeXYWH(left, top, width, height), texture->texture());
191 if (mipLevelCount < texture->maxMipmapLevel() + 1) {
192 texture->markMipmapsDirty();
193 }
194 return true;
195 }
196
onTransferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)197 bool GrDawnGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
198 GrColorType textureColorType, GrColorType bufferColorType,
199 sk_sp<GrGpuBuffer> transferBuffer, size_t bufferOffset,
200 size_t rowBytes) {
201 SkASSERT(!"unimplemented");
202 return false;
203 }
204
onTransferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)205 bool GrDawnGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
206 GrColorType surfaceColorType, GrColorType bufferColorType,
207 sk_sp<GrGpuBuffer> transferBuffer, size_t offset) {
208 SkASSERT(!"unimplemented");
209 return false;
210 }
211
212 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected,int mipLevelCount,uint32_t levelClearMask)213 sk_sp<GrTexture> GrDawnGpu::onCreateTexture(SkISize dimensions,
214 const GrBackendFormat& backendFormat,
215 GrRenderable renderable,
216 int renderTargetSampleCnt,
217 SkBudgeted budgeted,
218 GrProtected,
219 int mipLevelCount,
220 uint32_t levelClearMask) {
221 if (levelClearMask) {
222 return nullptr;
223 }
224
225 wgpu::TextureFormat format;
226 if (!backendFormat.asDawnFormat(&format)) {
227 return nullptr;
228 }
229
230 GrMipmapStatus mipmapStatus =
231 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
232
233 return GrDawnTexture::Make(this, dimensions, format, renderable, renderTargetSampleCnt,
234 budgeted, mipLevelCount, mipmapStatus);
235 }
236
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat &,SkBudgeted,GrMipmapped,GrProtected,const void * data,size_t dataSize)237 sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat&,
238 SkBudgeted, GrMipmapped, GrProtected,
239 const void* data, size_t dataSize) {
240 SkASSERT(!"unimplemented");
241 return nullptr;
242 }
243
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)244 sk_sp<GrTexture> GrDawnGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
245 GrWrapOwnership ownership,
246 GrWrapCacheable cacheable,
247 GrIOType ioType) {
248 GrDawnTextureInfo info;
249 if (!backendTex.getDawnTextureInfo(&info)) {
250 return nullptr;
251 }
252
253 SkISize dimensions = { backendTex.width(), backendTex.height() };
254 return GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kNo, 1, cacheable, ioType,
255 info);
256 }
257
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)258 sk_sp<GrTexture> GrDawnGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
259 GrWrapOwnership ownership,
260 GrWrapCacheable cacheable) {
261 return nullptr;
262 }
263
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrWrapOwnership,GrWrapCacheable cacheable)264 sk_sp<GrTexture> GrDawnGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
265 int sampleCnt,
266 GrWrapOwnership,
267 GrWrapCacheable cacheable) {
268 GrDawnTextureInfo info;
269 if (!tex.getDawnTextureInfo(&info) || !info.fTexture) {
270 return nullptr;
271 }
272
273 SkISize dimensions = { tex.width(), tex.height() };
274 sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
275 if (sampleCnt < 1) {
276 return nullptr;
277 }
278
279 sk_sp<GrTexture> result = GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kYes,
280 sampleCnt, cacheable, kRW_GrIOType, info);
281 result->markMipmapsDirty();
282 return result;
283 }
284
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt)285 sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt) {
286 GrDawnRenderTargetInfo info;
287 if (!rt.getDawnRenderTargetInfo(&info) || !info.fTextureView) {
288 return nullptr;
289 }
290
291 SkISize dimensions = { rt.width(), rt.height() };
292 int sampleCnt = 1;
293 return GrDawnRenderTarget::MakeWrapped(this, dimensions, sampleCnt, info);
294 }
295
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)296 sk_sp<GrAttachment> GrDawnGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
297 SkISize dimensions, int numStencilSamples) {
298 fStats.incStencilAttachmentCreates();
299 return GrDawnAttachment::MakeStencil(this, dimensions, numStencilSamples);
300 }
301
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)302 GrBackendTexture GrDawnGpu::onCreateBackendTexture(SkISize dimensions,
303 const GrBackendFormat& backendFormat,
304 GrRenderable renderable,
305 GrMipmapped mipMapped,
306 GrProtected isProtected) {
307 wgpu::TextureFormat format;
308 if (!backendFormat.asDawnFormat(&format)) {
309 return GrBackendTexture();
310 }
311
312 wgpu::TextureDescriptor desc;
313 desc.usage =
314 wgpu::TextureUsage::Sampled |
315 wgpu::TextureUsage::CopySrc |
316 wgpu::TextureUsage::CopyDst;
317
318 if (GrRenderable::kYes == renderable) {
319 desc.usage |= wgpu::TextureUsage::RenderAttachment;
320 }
321
322 int numMipLevels = 1;
323 if (mipMapped == GrMipmapped::kYes) {
324 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
325 }
326
327 desc.size.width = dimensions.width();
328 desc.size.height = dimensions.height();
329 desc.size.depthOrArrayLayers = 1;
330 desc.format = format;
331 desc.mipLevelCount = numMipLevels;
332
333 wgpu::Texture tex = this->device().CreateTexture(&desc);
334
335 GrDawnTextureInfo info;
336 info.fTexture = tex;
337 info.fFormat = desc.format;
338 info.fLevelCount = desc.mipLevelCount;
339 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
340 }
341
uploadTextureData(GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,const SkIRect & rect,wgpu::Texture texture)342 void GrDawnGpu::uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[],
343 int mipLevelCount, const SkIRect& rect,
344 wgpu::Texture texture) {
345 uint32_t x = rect.x();
346 uint32_t y = rect.y();
347 uint32_t width = rect.width();
348 uint32_t height = rect.height();
349
350 for (int i = 0; i < mipLevelCount; i++) {
351 const void* src = texels[i].fPixels;
352 size_t srcRowBytes = texels[i].fRowBytes;
353 SkColorType colorType = GrColorTypeToSkColorType(srcColorType);
354 size_t trimRowBytes = width * SkColorTypeBytesPerPixel(colorType);
355 size_t dstRowBytes = GrDawnRoundRowBytes(trimRowBytes);
356 size_t size = dstRowBytes * height;
357 GrStagingBufferManager::Slice slice =
358 this->stagingBufferManager()->allocateStagingBufferSlice(size);
359 SkRectMemcpy(slice.fOffsetMapPtr, dstRowBytes, src, srcRowBytes, trimRowBytes, height);
360
361 wgpu::ImageCopyBuffer srcBuffer = {};
362 srcBuffer.buffer = static_cast<GrDawnBuffer*>(slice.fBuffer)->get();
363 srcBuffer.layout.offset = slice.fOffset;
364 srcBuffer.layout.bytesPerRow = dstRowBytes;
365 srcBuffer.layout.rowsPerImage = height;
366
367 wgpu::ImageCopyTexture dstTexture;
368 dstTexture.texture = texture;
369 dstTexture.mipLevel = i;
370 dstTexture.origin = {x, y, 0};
371
372 wgpu::Extent3D copySize = {width, height, 1};
373 this->getCopyEncoder().CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
374 x /= 2;
375 y /= 2;
376 width = std::max(1u, width / 2);
377 height = std::max(1u, height / 2);
378 }
379 }
380
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)381 bool GrDawnGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
382 sk_sp<GrRefCntedCallback> finishedCallback,
383 std::array<float, 4> color) {
384 GrDawnTextureInfo info;
385 SkAssertResult(backendTexture.getDawnTextureInfo(&info));
386
387 GrColorType colorType;
388 if (!GrDawnFormatToGrColorType(info.fFormat, &colorType)) {
389 return false;
390 }
391
392 size_t bpp = GrDawnBytesPerBlock(info.fFormat);
393 size_t baseLayerSize = bpp * backendTexture.width() * backendTexture.height();
394 SkAutoMalloc defaultStorage(baseLayerSize);
395 GrImageInfo imageInfo(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
396 GrClearImage(imageInfo, defaultStorage.get(), bpp * backendTexture.width(), color);
397
398 wgpu::Device device = this->device();
399 wgpu::CommandEncoder copyEncoder = this->getCopyEncoder();
400 int w = backendTexture.width(), h = backendTexture.height();
401 for (uint32_t i = 0; i < info.fLevelCount; i++) {
402 size_t origRowBytes = bpp * w;
403 size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
404 size_t size = rowBytes * h;
405 GrStagingBufferManager::Slice stagingBuffer =
406 this->stagingBufferManager()->allocateStagingBufferSlice(size);
407 if (rowBytes == origRowBytes) {
408 memcpy(stagingBuffer.fOffsetMapPtr, defaultStorage.get(), size);
409 } else {
410 const char* src = static_cast<const char*>(defaultStorage.get());
411 char* dst = static_cast<char*>(stagingBuffer.fOffsetMapPtr);
412 for (int row = 0; row < h; row++) {
413 memcpy(dst, src, origRowBytes);
414 dst += rowBytes;
415 src += origRowBytes;
416 }
417 }
418 wgpu::ImageCopyBuffer srcBuffer = {};
419 srcBuffer.buffer = static_cast<GrDawnBuffer*>(stagingBuffer.fBuffer)->get();
420 srcBuffer.layout.offset = stagingBuffer.fOffset;
421 srcBuffer.layout.bytesPerRow = rowBytes;
422 srcBuffer.layout.rowsPerImage = h;
423 wgpu::ImageCopyTexture dstTexture;
424 dstTexture.texture = info.fTexture;
425 dstTexture.mipLevel = i;
426 dstTexture.origin = {0, 0, 0};
427 wgpu::Extent3D copySize = {(uint32_t)w, (uint32_t)h, 1};
428 copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
429 w = std::max(1, w / 2);
430 h = std::max(1, h / 2);
431 }
432 return true;
433 }
434
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat &,GrMipmapped,GrProtected)435 GrBackendTexture GrDawnGpu::onCreateCompressedBackendTexture(
436 SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) {
437 return {};
438 }
439
onUpdateCompressedBackendTexture(const GrBackendTexture &,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)440 bool GrDawnGpu::onUpdateCompressedBackendTexture(const GrBackendTexture&,
441 sk_sp<GrRefCntedCallback> finishedCallback,
442 const void* data,
443 size_t size) {
444 return false;
445 }
446
deleteBackendTexture(const GrBackendTexture & tex)447 void GrDawnGpu::deleteBackendTexture(const GrBackendTexture& tex) {
448 GrDawnTextureInfo info;
449 if (tex.getDawnTextureInfo(&info)) {
450 info.fTexture = nullptr;
451 }
452 }
453
compile(const GrProgramDesc &,const GrProgramInfo &)454 bool GrDawnGpu::compile(const GrProgramDesc&, const GrProgramInfo&) {
455 return false;
456 }
457
458 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const459 bool GrDawnGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
460 GrDawnTextureInfo info;
461 if (!tex.getDawnTextureInfo(&info)) {
462 return false;
463 }
464
465 return info.fTexture.Get();
466 }
467
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)468 GrBackendRenderTarget GrDawnGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
469 GrColorType colorType,
470 int sampleCnt,
471 GrProtected isProtected) {
472 if (dimensions.width() > this->caps()->maxTextureSize() ||
473 dimensions.height() > this->caps()->maxTextureSize()) {
474 return {};
475 }
476
477 // We don't support MSAA in this backend yet.
478 if (sampleCnt != 1) {
479 return {};
480 }
481
482 if (isProtected == GrProtected::kYes) {
483 return {};
484 }
485
486 wgpu::TextureFormat format;
487 if (!GrColorTypeToDawnFormat(colorType, &format)) {
488 return {};
489 }
490
491 wgpu::TextureDescriptor desc;
492 desc.usage =
493 wgpu::TextureUsage::CopySrc |
494 wgpu::TextureUsage::RenderAttachment;
495
496 desc.size.width = dimensions.width();
497 desc.size.height = dimensions.height();
498 desc.size.depthOrArrayLayers = 1;
499 desc.format = format;
500
501 wgpu::Texture tex = this->device().CreateTexture(&desc);
502
503 GrDawnRenderTargetInfo info;
504 info.fTextureView = tex.CreateView();
505 info.fFormat = desc.format;
506 info.fLevelCount = desc.mipLevelCount;
507
508 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 1, 0, info);
509 }
510
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)511 void GrDawnGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
512 GrDawnRenderTargetInfo info;
513 if (rt.getDawnRenderTargetInfo(&info)) {
514 info.fTextureView = nullptr;
515 }
516 }
517
518 #endif
519
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)520 void GrDawnGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
521 GrGpuFinishedContext finishedContext) {
522 fFinishCallbacks.add(finishedProc, finishedContext);
523 }
524
checkForCompletedStagingBuffers()525 void GrDawnGpu::checkForCompletedStagingBuffers() {
526 // We expect all the buffer maps to trigger in order of submission so we bail after the first
527 // non finished map since we always push new busy buffers to the back of our list.
528 while (!fBusyStagingBuffers.empty() && fBusyStagingBuffers.front()->isMapped()) {
529 fBusyStagingBuffers.pop_front();
530 }
531 }
532
waitOnAllBusyStagingBuffers()533 void GrDawnGpu::waitOnAllBusyStagingBuffers() {
534 while (!fBusyStagingBuffers.empty()) {
535 fDevice.Tick();
536 this->checkForCompletedStagingBuffers();
537 }
538 }
539
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)540 void GrDawnGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
541 fSubmittedStagingBuffers.push_back(std::move(buffer));
542 }
543
544
callback(WGPUQueueWorkDoneStatus status,void * userData)545 static void callback(WGPUQueueWorkDoneStatus status, void* userData) {
546 *static_cast<bool*>(userData) = true;
547 }
548
onSubmitToGpu(bool syncCpu)549 bool GrDawnGpu::onSubmitToGpu(bool syncCpu) {
550 this->flushCopyEncoder();
551 if (!fCommandBuffers.empty()) {
552 fQueue.Submit(fCommandBuffers.size(), &fCommandBuffers.front());
553 fCommandBuffers.clear();
554 }
555
556 this->moveStagingBuffersToBusyAndMapAsync();
557 if (syncCpu) {
558 bool called = false;
559 fDevice.GetQueue().OnSubmittedWorkDone(0, callback, &called);
560 while (!called) {
561 fDevice.Tick();
562 }
563 fFinishCallbacks.callAll(true);
564 }
565
566 this->checkForCompletedStagingBuffers();
567
568 return true;
569 }
570
get_dawn_texture_from_surface(GrSurface * src)571 static wgpu::Texture get_dawn_texture_from_surface(GrSurface* src) {
572 if (auto t = static_cast<GrDawnTexture*>(src->asTexture())) {
573 return t->texture();
574 } else {
575 return nullptr;
576 }
577 }
578
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)579 bool GrDawnGpu::onCopySurface(GrSurface* dst,
580 GrSurface* src,
581 const SkIRect& srcRect,
582 const SkIPoint& dstPoint) {
583 wgpu::Texture srcTexture = get_dawn_texture_from_surface(src);
584 wgpu::Texture dstTexture = get_dawn_texture_from_surface(dst);
585 if (!srcTexture || !dstTexture) {
586 return false;
587 }
588
589 uint32_t width = srcRect.width(), height = srcRect.height();
590
591 wgpu::ImageCopyTexture srcTextureView, dstTextureView;
592 srcTextureView.texture = srcTexture;
593 srcTextureView.origin = {(uint32_t) srcRect.x(), (uint32_t) srcRect.y(), 0};
594 dstTextureView.texture = dstTexture;
595 dstTextureView.origin = {(uint32_t) dstPoint.x(), (uint32_t) dstPoint.y(), 0};
596
597 wgpu::Extent3D copySize = {width, height, 1};
598 this->getCopyEncoder().CopyTextureToTexture(&srcTextureView, &dstTextureView, ©Size);
599 return true;
600 }
601
callback(WGPUBufferMapAsyncStatus status,void * userdata)602 static void callback(WGPUBufferMapAsyncStatus status, void* userdata) {
603 *static_cast<bool*>(userdata) = true;
604 }
605
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)606 bool GrDawnGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
607 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
608 size_t rowBytes) {
609 wgpu::Texture tex = get_dawn_texture_from_surface(surface);
610
611 if (!tex || 0 == rowBytes) {
612 return false;
613 }
614 size_t origRowBytes = rowBytes;
615 int origSizeInBytes = origRowBytes * height;
616 rowBytes = GrDawnRoundRowBytes(rowBytes);
617 int sizeInBytes = rowBytes * height;
618
619 wgpu::BufferDescriptor desc;
620 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
621 desc.size = sizeInBytes;
622
623 wgpu::Buffer buf = device().CreateBuffer(&desc);
624
625 wgpu::ImageCopyTexture srcTexture;
626 srcTexture.texture = tex;
627 srcTexture.origin = {(uint32_t) left, (uint32_t) top, 0};
628
629 wgpu::ImageCopyBuffer dstBuffer = {};
630 dstBuffer.buffer = buf;
631 dstBuffer.layout.offset = 0;
632 dstBuffer.layout.bytesPerRow = rowBytes;
633 dstBuffer.layout.rowsPerImage = height;
634
635 wgpu::Extent3D copySize = {(uint32_t) width, (uint32_t) height, 1};
636 this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, ©Size);
637 this->submitToGpu(true);
638
639 bool mapped = false;
640 buf.MapAsync(wgpu::MapMode::Read, 0, 0, callback, &mapped);
641 while (!mapped) {
642 device().Tick();
643 }
644 const void* readPixelsPtr = buf.GetConstMappedRange();
645
646 if (rowBytes == origRowBytes) {
647 memcpy(buffer, readPixelsPtr, origSizeInBytes);
648 } else {
649 const char* src = static_cast<const char*>(readPixelsPtr);
650 char* dst = static_cast<char*>(buffer);
651 for (int row = 0; row < height; row++) {
652 memcpy(dst, src, origRowBytes);
653 dst += origRowBytes;
654 src += rowBytes;
655 }
656 }
657 buf.Unmap();
658 return true;
659 }
660
onRegenerateMipMapLevels(GrTexture * tex)661 bool GrDawnGpu::onRegenerateMipMapLevels(GrTexture* tex) {
662 this->flushCopyEncoder();
663 GrDawnTexture* src = static_cast<GrDawnTexture*>(tex);
664 int srcWidth = tex->width();
665 int srcHeight = tex->height();
666
667 // SkMipmap doesn't include the base level in the level count so we have to add 1
668 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
669
670 // Create a temporary texture for mipmap generation, then copy to source.
671 // We have to do this even for renderable textures, since GrDawnRenderTarget currently only
672 // contains a view, not a texture.
673 wgpu::TextureDescriptor texDesc;
674 texDesc.usage = wgpu::TextureUsage::Sampled |
675 wgpu::TextureUsage::CopySrc |
676 wgpu::TextureUsage::RenderAttachment;
677 texDesc.size.width = (tex->width() + 1) / 2;
678 texDesc.size.height = (tex->height() + 1) / 2;
679 texDesc.size.depthOrArrayLayers = 1;
680 texDesc.mipLevelCount = levelCount - 1;
681 texDesc.format = src->format();
682 wgpu::Texture dstTexture = fDevice.CreateTexture(&texDesc);
683
684 const char* vs =
685 "layout(location = 0) out float2 texCoord;\n"
686 "float2 positions[4] = float2[4](float2(-1.0, 1.0),\n"
687 "float2(1.0, 1.0),\n"
688 "float2(-1.0, -1.0),\n"
689 "float2(1.0, -1.0));\n"
690 "float2 texCoords[4] = float2[4](float2(0.0, 0.0),\n"
691 "float2(1.0, 0.0),\n"
692 "float2(0.0, 1.0),\n"
693 "float2(1.0, 1.0));\n"
694 "void main() {\n"
695 " sk_Position = float4(positions[sk_VertexID], 0.0, 1.0);\n"
696 " texCoord = texCoords[sk_VertexID];\n"
697 "}\n";
698 SkSL::String vsSPIRV =
699 this->SkSLToSPIRV(vs, SkSL::ProgramKind::kVertex, false, 0, nullptr);
700
701 const char* fs =
702 "layout(set = 0, binding = 0) uniform sampler samp;\n"
703 "layout(set = 0, binding = 1) uniform texture2D tex;\n"
704 "layout(location = 0) in float2 texCoord;\n"
705 "void main() {\n"
706 " sk_FragColor = sample(makeSampler2D(tex, samp), texCoord);\n"
707 "}\n";
708 SkSL::String fsSPIRV =
709 this->SkSLToSPIRV(fs, SkSL::ProgramKind::kFragment, false, 0, nullptr);
710
711 wgpu::VertexState vertexState;
712 vertexState.module = this->createShaderModule(vsSPIRV);
713 vertexState.entryPoint = "main";
714 vertexState.bufferCount = 0;
715
716 wgpu::ColorTargetState colorTargetState;
717 colorTargetState.format = static_cast<GrDawnTexture*>(tex)->format();
718
719 wgpu::FragmentState fragmentState;
720 fragmentState.module = this->createShaderModule(fsSPIRV);
721 fragmentState.entryPoint = "main";
722 fragmentState.targetCount = 1;
723 fragmentState.targets = &colorTargetState;
724
725 wgpu::RenderPipelineDescriptor2 renderPipelineDesc;
726 renderPipelineDesc.vertex = vertexState;
727 renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
728 renderPipelineDesc.primitive.stripIndexFormat = wgpu::IndexFormat::Uint16;
729 renderPipelineDesc.fragment = &fragmentState;
730 wgpu::RenderPipeline pipeline = fDevice.CreateRenderPipeline2(&renderPipelineDesc);
731
732 wgpu::BindGroupLayout bgl = pipeline.GetBindGroupLayout(0);
733 wgpu::TextureViewDescriptor srcViewDesc;
734 srcViewDesc.mipLevelCount = 1;
735 wgpu::TextureView srcView = src->texture().CreateView(&srcViewDesc);
736 wgpu::SamplerDescriptor samplerDesc;
737 samplerDesc.minFilter = wgpu::FilterMode::Linear;
738 wgpu::Sampler sampler = fDevice.CreateSampler(&samplerDesc);
739 wgpu::CommandEncoder commandEncoder = fDevice.CreateCommandEncoder();
740 for (uint32_t mipLevel = 0; mipLevel < texDesc.mipLevelCount; mipLevel++) {
741 int dstWidth = std::max(1, srcWidth / 2);
742 int dstHeight = std::max(1, srcHeight / 2);
743 wgpu::TextureViewDescriptor dstViewDesc;
744 dstViewDesc.format = static_cast<GrDawnTexture*>(tex)->format();
745 dstViewDesc.dimension = wgpu::TextureViewDimension::e2D;
746 dstViewDesc.baseMipLevel = mipLevel;
747 dstViewDesc.mipLevelCount = 1;
748 wgpu::TextureView dstView = dstTexture.CreateView(&dstViewDesc);
749 wgpu::BindGroupEntry bge[2];
750 bge[0].binding = 0;
751 bge[0].sampler = sampler;
752 bge[1].binding = 1;
753 bge[1].textureView = srcView;
754 wgpu::BindGroupDescriptor bgDesc;
755 bgDesc.layout = bgl;
756 bgDesc.entryCount = 2;
757 bgDesc.entries = bge;
758 wgpu::BindGroup bindGroup = fDevice.CreateBindGroup(&bgDesc);
759 wgpu::RenderPassColorAttachmentDescriptor colorAttachment;
760 colorAttachment.attachment = dstView;
761 colorAttachment.clearColor = { 0.0f, 0.0f, 0.0f, 0.0f };
762 colorAttachment.loadOp = wgpu::LoadOp::Load;
763 colorAttachment.storeOp = wgpu::StoreOp::Store;
764 wgpu::RenderPassColorAttachmentDescriptor* colorAttachments = { &colorAttachment };
765 wgpu::RenderPassDescriptor renderPassDesc;
766 renderPassDesc.colorAttachmentCount = 1;
767 renderPassDesc.colorAttachments = colorAttachments;
768 wgpu::RenderPassEncoder rpe = commandEncoder.BeginRenderPass(&renderPassDesc);
769 rpe.SetPipeline(pipeline);
770 rpe.SetBindGroup(0, bindGroup);
771 rpe.Draw(4, 1, 0, 0);
772 rpe.EndPass();
773
774 wgpu::Extent3D copySize = {(uint32_t)dstWidth, (uint32_t)dstHeight, 1};
775 wgpu::ImageCopyTexture srcCopyView;
776 srcCopyView.texture = dstTexture;
777 srcCopyView.mipLevel = mipLevel;
778 wgpu::ImageCopyTexture dstCopyView;
779 dstCopyView.mipLevel = mipLevel + 1;
780 dstCopyView.texture = src->texture();
781 commandEncoder.CopyTextureToTexture(&srcCopyView, &dstCopyView, ©Size);
782
783 srcHeight = dstHeight;
784 srcWidth = dstWidth;
785 srcView = dstView;
786 }
787 fCommandBuffers.push_back(commandEncoder.Finish());
788 return true;
789 }
790
submit(GrOpsRenderPass * renderPass)791 void GrDawnGpu::submit(GrOpsRenderPass* renderPass) {
792 this->flushCopyEncoder();
793 static_cast<GrDawnOpsRenderPass*>(renderPass)->submit();
794 }
795
insertFence()796 GrFence SK_WARN_UNUSED_RESULT GrDawnGpu::insertFence() {
797 wgpu::FenceDescriptor desc;
798 wgpu::Fence fence = fQueue.CreateFence(&desc);
799 return reinterpret_cast<GrFence>(new Fence(fDevice, fence));
800 }
801
waitFence(GrFence fence)802 bool GrDawnGpu::waitFence(GrFence fence) {
803 return reinterpret_cast<Fence*>(fence)->check();
804 }
805
deleteFence(GrFence fence) const806 void GrDawnGpu::deleteFence(GrFence fence) const {
807 delete reinterpret_cast<Fence*>(fence);
808 }
809
makeSemaphore(bool isOwned)810 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrDawnGpu::makeSemaphore(bool isOwned) {
811 SkASSERT(!"unimplemented");
812 return nullptr;
813 }
814
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)815 std::unique_ptr<GrSemaphore> GrDawnGpu::wrapBackendSemaphore(
816 const GrBackendSemaphore& semaphore,
817 GrResourceProvider::SemaphoreWrapType wrapType,
818 GrWrapOwnership ownership) {
819 SkASSERT(!"unimplemented");
820 return nullptr;
821 }
822
insertSemaphore(GrSemaphore * semaphore)823 void GrDawnGpu::insertSemaphore(GrSemaphore* semaphore) {
824 SkASSERT(!"unimplemented");
825 }
826
waitSemaphore(GrSemaphore * semaphore)827 void GrDawnGpu::waitSemaphore(GrSemaphore* semaphore) {
828 SkASSERT(!"unimplemented");
829 }
830
checkFinishProcs()831 void GrDawnGpu::checkFinishProcs() {
832 fFinishCallbacks.check();
833 }
834
finishOutstandingGpuWork()835 void GrDawnGpu::finishOutstandingGpuWork() {
836 this->waitOnAllBusyStagingBuffers();
837 }
838
prepareTextureForCrossContextUsage(GrTexture * texture)839 std::unique_ptr<GrSemaphore> GrDawnGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
840 SkASSERT(!"unimplemented");
841 return nullptr;
842 }
843
getOrCreateRenderPipeline(GrRenderTarget * rt,const GrProgramInfo & programInfo)844 sk_sp<GrDawnProgram> GrDawnGpu::getOrCreateRenderPipeline(
845 GrRenderTarget* rt,
846 const GrProgramInfo& programInfo) {
847 GrProgramDesc desc = this->caps()->makeDesc(rt, programInfo);
848 if (!desc.isValid()) {
849 return nullptr;
850 }
851
852 if (sk_sp<GrDawnProgram>* program = fRenderPipelineCache.find(desc)) {
853 return *program;
854 }
855
856 wgpu::TextureFormat colorFormat;
857 SkAssertResult(programInfo.backendFormat().asDawnFormat(&colorFormat));
858
859 wgpu::TextureFormat stencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
860 bool hasDepthStencil = rt->getStencilAttachment() != nullptr;
861
862 sk_sp<GrDawnProgram> program = GrDawnProgramBuilder::Build(
863 this, rt, programInfo, colorFormat,
864 hasDepthStencil, stencilFormat, &desc);
865 fRenderPipelineCache.insert(desc, program);
866 return program;
867 }
868
getOrCreateSampler(GrSamplerState samplerState)869 wgpu::Sampler GrDawnGpu::getOrCreateSampler(GrSamplerState samplerState) {
870 auto i = fSamplers.find(samplerState);
871 if (i != fSamplers.end()) {
872 return i->second;
873 }
874 wgpu::SamplerDescriptor desc;
875 desc.addressModeU = to_dawn_address_mode(samplerState.wrapModeX());
876 desc.addressModeV = to_dawn_address_mode(samplerState.wrapModeY());
877 desc.addressModeW = wgpu::AddressMode::ClampToEdge;
878 desc.magFilter = desc.minFilter = to_dawn_filter_mode(samplerState.filter());
879 desc.mipmapFilter = to_dawn_mipmap_mode(samplerState.mipmapMode());
880 wgpu::Sampler sampler = device().CreateSampler(&desc);
881 fSamplers.insert(std::pair<GrSamplerState, wgpu::Sampler>(samplerState, sampler));
882 return sampler;
883 }
884
allocateUniformRingBufferSlice(int size)885 GrDawnRingBuffer::Slice GrDawnGpu::allocateUniformRingBufferSlice(int size) {
886 return fUniformRingBuffer.allocate(size);
887 }
888
appendCommandBuffer(wgpu::CommandBuffer commandBuffer)889 void GrDawnGpu::appendCommandBuffer(wgpu::CommandBuffer commandBuffer) {
890 if (commandBuffer) {
891 fCommandBuffers.push_back(commandBuffer);
892 }
893 }
894
getCopyEncoder()895 wgpu::CommandEncoder GrDawnGpu::getCopyEncoder() {
896 if (!fCopyEncoder) {
897 fCopyEncoder = fDevice.CreateCommandEncoder();
898 }
899 return fCopyEncoder;
900 }
901
flushCopyEncoder()902 void GrDawnGpu::flushCopyEncoder() {
903 if (fCopyEncoder) {
904 fCommandBuffers.push_back(fCopyEncoder.Finish());
905 fCopyEncoder = nullptr;
906 }
907 }
908
moveStagingBuffersToBusyAndMapAsync()909 void GrDawnGpu::moveStagingBuffersToBusyAndMapAsync() {
910 for (size_t i = 0; i < fSubmittedStagingBuffers.size(); ++i) {
911 GrDawnBuffer* buffer = static_cast<GrDawnBuffer*>(fSubmittedStagingBuffers[i].get());
912 buffer->mapWriteAsync();
913 fBusyStagingBuffers.push_back(std::move(fSubmittedStagingBuffers[i]));
914 }
915 fSubmittedStagingBuffers.clear();
916 }
917
SkSLToSPIRV(const char * shaderString,SkSL::ProgramKind kind,bool flipY,uint32_t rtHeightOffset,SkSL::Program::Inputs * inputs)918 SkSL::String GrDawnGpu::SkSLToSPIRV(const char* shaderString, SkSL::ProgramKind kind, bool flipY,
919 uint32_t rtHeightOffset, SkSL::Program::Inputs* inputs) {
920 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
921 SkSL::Program::Settings settings;
922 settings.fFlipY = flipY;
923 settings.fRTHeightOffset = rtHeightOffset;
924 settings.fRTHeightBinding = 0;
925 settings.fRTHeightSet = 0;
926 std::unique_ptr<SkSL::Program> program = this->shaderCompiler()->convertProgram(
927 kind,
928 shaderString,
929 settings);
930 if (!program) {
931 errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
932 return "";
933 }
934 if (inputs) {
935 *inputs = program->fInputs;
936 }
937 SkSL::String code;
938 if (!this->shaderCompiler()->toSPIRV(*program, &code)) {
939 errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
940 return "";
941 }
942 return code;
943 }
944
createShaderModule(const SkSL::String & spirvSource)945 wgpu::ShaderModule GrDawnGpu::createShaderModule(const SkSL::String& spirvSource) {
946 wgpu::ShaderModuleSPIRVDescriptor desc;
947 desc.codeSize = spirvSource.size() / 4;
948 desc.code = reinterpret_cast<const uint32_t*>(spirvSource.c_str());
949
950 wgpu::ShaderModuleDescriptor smDesc;
951 smDesc.nextInChain = &desc;
952
953 return fDevice.CreateShaderModule(&smDesc);
954 }
955