1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/dawn/GrDawnGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkConvertPixels.h"
15 #include "src/gpu/GrDataUtils.h"
16 #include "src/gpu/GrDirectContextPriv.h"
17 #include "src/gpu/GrGeometryProcessor.h"
18 #include "src/gpu/GrGpuResourceCacheAccess.h"
19 #include "src/gpu/GrPipeline.h"
20 #include "src/gpu/GrRenderTarget.h"
21 #include "src/gpu/GrSemaphore.h"
22 #include "src/gpu/GrStencilSettings.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/GrThreadSafePipelineBuilder.h"
25 #include "src/gpu/dawn/GrDawnAttachment.h"
26 #include "src/gpu/dawn/GrDawnBuffer.h"
27 #include "src/gpu/dawn/GrDawnCaps.h"
28 #include "src/gpu/dawn/GrDawnOpsRenderPass.h"
29 #include "src/gpu/dawn/GrDawnProgramBuilder.h"
30 #include "src/gpu/dawn/GrDawnRenderTarget.h"
31 #include "src/gpu/dawn/GrDawnTexture.h"
32 #include "src/gpu/dawn/GrDawnUtil.h"
33
34 #include "src/core/SkAutoMalloc.h"
35 #include "src/core/SkMipmap.h"
36 #include "src/sksl/SkSLCompiler.h"
37
38 #if !defined(SK_BUILD_FOR_WIN)
39 #include <unistd.h>
40 #endif // !defined(SK_BUILD_FOR_WIN)
41
42 static const int kMaxRenderPipelineEntries = 1024;
43
44 namespace {
45
46 class Fence {
47 public:
Fence(const wgpu::Device & device)48 Fence(const wgpu::Device& device)
49 : fDevice(device), fCalled(false) {
50 device.GetQueue().OnSubmittedWorkDone(0, callback, this);
51 }
52
callback(WGPUQueueWorkDoneStatus status,void * userData)53 static void callback(WGPUQueueWorkDoneStatus status, void* userData) {
54 Fence* fence = static_cast<Fence*>(userData);
55 fence->fCalled = true;
56 }
57
check()58 bool check() {
59 fDevice.Tick();
60 return fCalled;
61 }
62
63 private:
64 wgpu::Device fDevice;
65 bool fCalled;
66 };
67
68 }
69
to_dawn_filter_mode(GrSamplerState::Filter filter)70 static wgpu::FilterMode to_dawn_filter_mode(GrSamplerState::Filter filter) {
71 switch (filter) {
72 case GrSamplerState::Filter::kNearest:
73 return wgpu::FilterMode::Nearest;
74 case GrSamplerState::Filter::kLinear:
75 return wgpu::FilterMode::Linear;
76 default:
77 SkASSERT(!"unsupported filter mode");
78 return wgpu::FilterMode::Nearest;
79 }
80 }
81
to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode)82 static wgpu::FilterMode to_dawn_mipmap_mode(GrSamplerState::MipmapMode mode) {
83 switch (mode) {
84 case GrSamplerState::MipmapMode::kNone:
85 // Fall-through (Dawn does not have an equivalent for "None")
86 case GrSamplerState::MipmapMode::kNearest:
87 return wgpu::FilterMode::Nearest;
88 case GrSamplerState::MipmapMode::kLinear:
89 return wgpu::FilterMode::Linear;
90 default:
91 SkASSERT(!"unsupported filter mode");
92 return wgpu::FilterMode::Nearest;
93 }
94 }
95
to_dawn_address_mode(GrSamplerState::WrapMode wrapMode)96 static wgpu::AddressMode to_dawn_address_mode(GrSamplerState::WrapMode wrapMode) {
97 switch (wrapMode) {
98 case GrSamplerState::WrapMode::kClamp:
99 return wgpu::AddressMode::ClampToEdge;
100 case GrSamplerState::WrapMode::kRepeat:
101 return wgpu::AddressMode::Repeat;
102 case GrSamplerState::WrapMode::kMirrorRepeat:
103 return wgpu::AddressMode::MirrorRepeat;
104 case GrSamplerState::WrapMode::kClampToBorder:
105 SkASSERT(!"unsupported address mode");
106 }
107 SkASSERT(!"unsupported address mode");
108 return wgpu::AddressMode::ClampToEdge;
109 }
110
Make(const wgpu::Device & device,const GrContextOptions & options,GrDirectContext * direct)111 sk_sp<GrGpu> GrDawnGpu::Make(const wgpu::Device& device,
112 const GrContextOptions& options, GrDirectContext* direct) {
113 if (!device) {
114 return nullptr;
115 }
116
117 return sk_sp<GrGpu>(new GrDawnGpu(direct, options, device));
118 }
119
120 ////////////////////////////////////////////////////////////////////////////////
121
GrDawnGpu(GrDirectContext * direct,const GrContextOptions & options,const wgpu::Device & device)122 GrDawnGpu::GrDawnGpu(GrDirectContext* direct, const GrContextOptions& options,
123 const wgpu::Device& device)
124 : INHERITED(direct)
125 , fDevice(device)
126 , fQueue(device.GetQueue())
127 , fUniformRingBuffer(this, wgpu::BufferUsage::Uniform)
128 , fStagingBufferManager(this)
129 , fRenderPipelineCache(kMaxRenderPipelineEntries)
130 , fFinishCallbacks(this) {
131 this->initCapsAndCompiler(sk_make_sp<GrDawnCaps>(options));
132 }
133
~GrDawnGpu()134 GrDawnGpu::~GrDawnGpu() { this->finishOutstandingGpuWork(); }
135
disconnect(DisconnectType type)136 void GrDawnGpu::disconnect(DisconnectType type) {
137 if (DisconnectType::kCleanup == type) {
138 this->finishOutstandingGpuWork();
139 }
140 fStagingBufferManager.reset();
141 fQueue = nullptr;
142 fDevice = nullptr;
143 INHERITED::disconnect(type);
144 }
145
pipelineBuilder()146 GrThreadSafePipelineBuilder* GrDawnGpu::pipelineBuilder() {
147 return nullptr;
148 }
149
refPipelineBuilder()150 sk_sp<GrThreadSafePipelineBuilder> GrDawnGpu::refPipelineBuilder() {
151 return nullptr;
152 }
153
154 ///////////////////////////////////////////////////////////////////////////////
155
onGetOpsRenderPass(GrRenderTarget * rt,bool,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)156 GrOpsRenderPass* GrDawnGpu::onGetOpsRenderPass(
157 GrRenderTarget* rt,
158 bool /*useMSAASurface*/,
159 GrAttachment*,
160 GrSurfaceOrigin origin,
161 const SkIRect& bounds,
162 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
163 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
164 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
165 GrXferBarrierFlags renderPassXferBarriers) {
166 fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
167 return fOpsRenderPass.get();
168 }
169
170 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)171 sk_sp<GrGpuBuffer> GrDawnGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
172 GrAccessPattern accessPattern, const void* data) {
173 sk_sp<GrGpuBuffer> b(new GrDawnBuffer(this, size, type, accessPattern));
174 if (data && b) {
175 b->updateData(data, size);
176 }
177 return b;
178 }
179
180 ////////////////////////////////////////////////////////////////////////////////
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)181 bool GrDawnGpu::onWritePixels(GrSurface* surface,
182 SkIRect rect,
183 GrColorType surfaceColorType,
184 GrColorType srcColorType,
185 const GrMipLevel texels[],
186 int mipLevelCount,
187 bool prepForTexSampling) {
188 GrDawnTexture* texture = static_cast<GrDawnTexture*>(surface->asTexture());
189 if (!texture) {
190 return false;
191 }
192 this->uploadTextureData(srcColorType, texels, mipLevelCount, rect, texture->texture());
193 if (mipLevelCount < texture->maxMipmapLevel() + 1) {
194 texture->markMipmapsDirty();
195 }
196 return true;
197 }
198
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)199 bool GrDawnGpu::onTransferPixelsTo(GrTexture* texture,
200 SkIRect rect,
201 GrColorType textureColorType,
202 GrColorType bufferColorType,
203 sk_sp<GrGpuBuffer> transferBuffer,
204 size_t bufferOffset,
205 size_t rowBytes) {
206 SkASSERT(!"unimplemented");
207 return false;
208 }
209
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)210 bool GrDawnGpu::onTransferPixelsFrom(GrSurface* surface,
211 SkIRect rect,
212 GrColorType surfaceColorType,
213 GrColorType bufferColorType,
214 sk_sp<GrGpuBuffer> transferBuffer,
215 size_t offset) {
216 SkASSERT(!"unimplemented");
217 return false;
218 }
219
220 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected,int mipLevelCount,uint32_t levelClearMask)221 sk_sp<GrTexture> GrDawnGpu::onCreateTexture(SkISize dimensions,
222 const GrBackendFormat& backendFormat,
223 GrRenderable renderable,
224 int renderTargetSampleCnt,
225 SkBudgeted budgeted,
226 GrProtected,
227 int mipLevelCount,
228 uint32_t levelClearMask) {
229 if (levelClearMask) {
230 return nullptr;
231 }
232
233 wgpu::TextureFormat format;
234 if (!backendFormat.asDawnFormat(&format)) {
235 return nullptr;
236 }
237
238 GrMipmapStatus mipmapStatus =
239 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
240
241 return GrDawnTexture::Make(this, dimensions, format, renderable, renderTargetSampleCnt,
242 budgeted, mipLevelCount, mipmapStatus);
243 }
244
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat &,SkBudgeted,GrMipmapped,GrProtected,const void * data,size_t dataSize)245 sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat&,
246 SkBudgeted, GrMipmapped, GrProtected,
247 const void* data, size_t dataSize) {
248 SkASSERT(!"unimplemented");
249 return nullptr;
250 }
251
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat &,SkBudgeted,GrMipmapped,GrProtected,OH_NativeBuffer * nativeBuffer,size_t bufferSize)252 sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat&,
253 SkBudgeted, GrMipmapped, GrProtected,
254 OH_NativeBuffer* nativeBuffer,
255 size_t bufferSize) {
256 SkASSERT(!"unimplemented");
257 return nullptr;
258 }
259
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)260 sk_sp<GrTexture> GrDawnGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
261 GrWrapOwnership ownership,
262 GrWrapCacheable cacheable,
263 GrIOType ioType) {
264 GrDawnTextureInfo info;
265 if (!backendTex.getDawnTextureInfo(&info)) {
266 return nullptr;
267 }
268
269 SkISize dimensions = { backendTex.width(), backendTex.height() };
270 return GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kNo, 1, cacheable, ioType,
271 info);
272 }
273
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)274 sk_sp<GrTexture> GrDawnGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
275 GrWrapOwnership ownership,
276 GrWrapCacheable cacheable) {
277 return nullptr;
278 }
279
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrWrapOwnership,GrWrapCacheable cacheable)280 sk_sp<GrTexture> GrDawnGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
281 int sampleCnt,
282 GrWrapOwnership,
283 GrWrapCacheable cacheable) {
284 GrDawnTextureInfo info;
285 if (!tex.getDawnTextureInfo(&info) || !info.fTexture) {
286 return nullptr;
287 }
288
289 SkISize dimensions = { tex.width(), tex.height() };
290 sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
291 if (sampleCnt < 1) {
292 return nullptr;
293 }
294
295 sk_sp<GrTexture> result = GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kYes,
296 sampleCnt, cacheable, kRW_GrIOType, info);
297 result->markMipmapsDirty();
298 return result;
299 }
300
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt)301 sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt) {
302 GrDawnRenderTargetInfo info;
303 if (!rt.getDawnRenderTargetInfo(&info) || !info.fTextureView) {
304 return nullptr;
305 }
306
307 SkISize dimensions = { rt.width(), rt.height() };
308 int sampleCnt = 1;
309 return GrDawnRenderTarget::MakeWrapped(this, dimensions, sampleCnt, info);
310 }
311
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)312 sk_sp<GrAttachment> GrDawnGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
313 SkISize dimensions, int numStencilSamples) {
314 fStats.incStencilAttachmentCreates();
315 return GrDawnAttachment::MakeStencil(this, dimensions, numStencilSamples);
316 }
317
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)318 GrBackendTexture GrDawnGpu::onCreateBackendTexture(SkISize dimensions,
319 const GrBackendFormat& backendFormat,
320 GrRenderable renderable,
321 GrMipmapped mipMapped,
322 GrProtected isProtected) {
323 wgpu::TextureFormat format;
324 if (!backendFormat.asDawnFormat(&format)) {
325 return GrBackendTexture();
326 }
327
328 wgpu::TextureDescriptor desc;
329 desc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
330 wgpu::TextureUsage::CopyDst;
331
332 if (GrRenderable::kYes == renderable) {
333 desc.usage |= wgpu::TextureUsage::RenderAttachment;
334 }
335
336 int numMipLevels = 1;
337 if (mipMapped == GrMipmapped::kYes) {
338 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
339 }
340
341 desc.size.width = dimensions.width();
342 desc.size.height = dimensions.height();
343 desc.size.depthOrArrayLayers = 1;
344 desc.format = format;
345 desc.mipLevelCount = numMipLevels;
346
347 wgpu::Texture tex = this->device().CreateTexture(&desc);
348
349 GrDawnTextureInfo info;
350 info.fTexture = tex;
351 info.fFormat = desc.format;
352 info.fLevelCount = desc.mipLevelCount;
353 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
354 }
355
uploadTextureData(GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,const SkIRect & rect,wgpu::Texture texture)356 void GrDawnGpu::uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[],
357 int mipLevelCount, const SkIRect& rect,
358 wgpu::Texture texture) {
359 uint32_t x = rect.x();
360 uint32_t y = rect.y();
361 uint32_t width = rect.width();
362 uint32_t height = rect.height();
363
364 for (int i = 0; i < mipLevelCount; i++) {
365 const void* src = texels[i].fPixels;
366 size_t srcRowBytes = texels[i].fRowBytes;
367 SkColorType colorType = GrColorTypeToSkColorType(srcColorType);
368 size_t trimRowBytes = width * SkColorTypeBytesPerPixel(colorType);
369 size_t dstRowBytes = GrDawnRoundRowBytes(trimRowBytes);
370 size_t size = dstRowBytes * height;
371 GrStagingBufferManager::Slice slice =
372 this->stagingBufferManager()->allocateStagingBufferSlice(size);
373 SkRectMemcpy(slice.fOffsetMapPtr, dstRowBytes, src, srcRowBytes, trimRowBytes, height);
374
375 wgpu::ImageCopyBuffer srcBuffer = {};
376 srcBuffer.buffer = static_cast<GrDawnBuffer*>(slice.fBuffer)->get();
377 srcBuffer.layout.offset = slice.fOffset;
378 srcBuffer.layout.bytesPerRow = dstRowBytes;
379 srcBuffer.layout.rowsPerImage = height;
380
381 wgpu::ImageCopyTexture dstTexture;
382 dstTexture.texture = texture;
383 dstTexture.mipLevel = i;
384 dstTexture.origin = {x, y, 0};
385
386 wgpu::Extent3D copySize = {width, height, 1};
387 this->getCopyEncoder().CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
388 x /= 2;
389 y /= 2;
390 width = std::max(1u, width / 2);
391 height = std::max(1u, height / 2);
392 }
393 }
394
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)395 bool GrDawnGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
396 sk_sp<GrRefCntedCallback> finishedCallback,
397 std::array<float, 4> color) {
398 GrDawnTextureInfo info;
399 SkAssertResult(backendTexture.getDawnTextureInfo(&info));
400
401 GrColorType colorType;
402 if (!GrDawnFormatToGrColorType(info.fFormat, &colorType)) {
403 return false;
404 }
405
406 size_t bpp = GrDawnBytesPerBlock(info.fFormat);
407 size_t baseLayerSize = bpp * backendTexture.width() * backendTexture.height();
408 SkAutoMalloc defaultStorage(baseLayerSize);
409 GrImageInfo imageInfo(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
410 GrClearImage(imageInfo, defaultStorage.get(), bpp * backendTexture.width(), color);
411
412 wgpu::Device device = this->device();
413 wgpu::CommandEncoder copyEncoder = this->getCopyEncoder();
414 int w = backendTexture.width(), h = backendTexture.height();
415 for (uint32_t i = 0; i < info.fLevelCount; i++) {
416 size_t origRowBytes = bpp * w;
417 size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
418 size_t size = rowBytes * h;
419 GrStagingBufferManager::Slice stagingBuffer =
420 this->stagingBufferManager()->allocateStagingBufferSlice(size);
421 if (rowBytes == origRowBytes) {
422 memcpy(stagingBuffer.fOffsetMapPtr, defaultStorage.get(), size);
423 } else {
424 const char* src = static_cast<const char*>(defaultStorage.get());
425 char* dst = static_cast<char*>(stagingBuffer.fOffsetMapPtr);
426 for (int row = 0; row < h; row++) {
427 memcpy(dst, src, origRowBytes);
428 dst += rowBytes;
429 src += origRowBytes;
430 }
431 }
432 wgpu::ImageCopyBuffer srcBuffer = {};
433 srcBuffer.buffer = static_cast<GrDawnBuffer*>(stagingBuffer.fBuffer)->get();
434 srcBuffer.layout.offset = stagingBuffer.fOffset;
435 srcBuffer.layout.bytesPerRow = rowBytes;
436 srcBuffer.layout.rowsPerImage = h;
437 wgpu::ImageCopyTexture dstTexture;
438 dstTexture.texture = info.fTexture;
439 dstTexture.mipLevel = i;
440 dstTexture.origin = {0, 0, 0};
441 wgpu::Extent3D copySize = {(uint32_t)w, (uint32_t)h, 1};
442 copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
443 w = std::max(1, w / 2);
444 h = std::max(1, h / 2);
445 }
446 return true;
447 }
448
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat &,GrMipmapped,GrProtected)449 GrBackendTexture GrDawnGpu::onCreateCompressedBackendTexture(
450 SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) {
451 return {};
452 }
453
onUpdateCompressedBackendTexture(const GrBackendTexture &,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)454 bool GrDawnGpu::onUpdateCompressedBackendTexture(const GrBackendTexture&,
455 sk_sp<GrRefCntedCallback> finishedCallback,
456 const void* data,
457 size_t size) {
458 return false;
459 }
460
deleteBackendTexture(const GrBackendTexture & tex)461 void GrDawnGpu::deleteBackendTexture(const GrBackendTexture& tex) {
462 GrDawnTextureInfo info;
463 if (tex.getDawnTextureInfo(&info)) {
464 info.fTexture = nullptr;
465 }
466 }
467
compile(const GrProgramDesc &,const GrProgramInfo &)468 bool GrDawnGpu::compile(const GrProgramDesc&, const GrProgramInfo&) {
469 return false;
470 }
471
472 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const473 bool GrDawnGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
474 GrDawnTextureInfo info;
475 if (!tex.getDawnTextureInfo(&info)) {
476 return false;
477 }
478
479 return info.fTexture.Get();
480 }
481
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)482 GrBackendRenderTarget GrDawnGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
483 GrColorType colorType,
484 int sampleCnt,
485 GrProtected isProtected) {
486 if (dimensions.width() > this->caps()->maxTextureSize() ||
487 dimensions.height() > this->caps()->maxTextureSize()) {
488 return {};
489 }
490
491 // We don't support MSAA in this backend yet.
492 if (sampleCnt != 1) {
493 return {};
494 }
495
496 if (isProtected == GrProtected::kYes) {
497 return {};
498 }
499
500 wgpu::TextureFormat format;
501 if (!GrColorTypeToDawnFormat(colorType, &format)) {
502 return {};
503 }
504
505 wgpu::TextureDescriptor desc;
506 desc.usage =
507 wgpu::TextureUsage::CopySrc |
508 wgpu::TextureUsage::RenderAttachment;
509
510 desc.size.width = dimensions.width();
511 desc.size.height = dimensions.height();
512 desc.size.depthOrArrayLayers = 1;
513 desc.format = format;
514
515 wgpu::Texture tex = this->device().CreateTexture(&desc);
516
517 GrDawnRenderTargetInfo info;
518 info.fTextureView = tex.CreateView();
519 info.fFormat = desc.format;
520 info.fLevelCount = desc.mipLevelCount;
521
522 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 1, 0, info);
523 }
524
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)525 void GrDawnGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
526 GrDawnRenderTargetInfo info;
527 if (rt.getDawnRenderTargetInfo(&info)) {
528 info.fTextureView = nullptr;
529 }
530 }
531
532 #endif
533
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)534 void GrDawnGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
535 GrGpuFinishedContext finishedContext) {
536 fFinishCallbacks.add(finishedProc, finishedContext);
537 }
538
checkForCompletedStagingBuffers()539 void GrDawnGpu::checkForCompletedStagingBuffers() {
540 // We expect all the buffer maps to trigger in order of submission so we bail after the first
541 // non finished map since we always push new busy buffers to the back of our list.
542 while (!fBusyStagingBuffers.empty() && fBusyStagingBuffers.front()->isMapped()) {
543 fBusyStagingBuffers.pop_front();
544 }
545 }
546
waitOnAllBusyStagingBuffers()547 void GrDawnGpu::waitOnAllBusyStagingBuffers() {
548 while (!fBusyStagingBuffers.empty()) {
549 fDevice.Tick();
550 this->checkForCompletedStagingBuffers();
551 }
552 }
553
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)554 void GrDawnGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
555 fSubmittedStagingBuffers.push_back(std::move(buffer));
556 }
557
558
callback(WGPUQueueWorkDoneStatus status,void * userData)559 static void callback(WGPUQueueWorkDoneStatus status, void* userData) {
560 *static_cast<bool*>(userData) = true;
561 }
562
onSubmitToGpu(bool syncCpu)563 bool GrDawnGpu::onSubmitToGpu(bool syncCpu) {
564 this->flushCopyEncoder();
565 if (!fCommandBuffers.empty()) {
566 fQueue.Submit(fCommandBuffers.size(), &fCommandBuffers.front());
567 fCommandBuffers.clear();
568 }
569
570 this->moveStagingBuffersToBusyAndMapAsync();
571 if (syncCpu) {
572 bool called = false;
573 fDevice.GetQueue().OnSubmittedWorkDone(0, callback, &called);
574 while (!called) {
575 fDevice.Tick();
576 }
577 fFinishCallbacks.callAll(true);
578 }
579
580 this->checkForCompletedStagingBuffers();
581
582 return true;
583 }
584
get_dawn_texture_from_surface(GrSurface * src)585 static wgpu::Texture get_dawn_texture_from_surface(GrSurface* src) {
586 if (auto t = static_cast<GrDawnTexture*>(src->asTexture())) {
587 return t->texture();
588 } else {
589 return nullptr;
590 }
591 }
592
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)593 bool GrDawnGpu::onCopySurface(GrSurface* dst,
594 GrSurface* src,
595 const SkIRect& srcRect,
596 const SkIPoint& dstPoint) {
597 wgpu::Texture srcTexture = get_dawn_texture_from_surface(src);
598 wgpu::Texture dstTexture = get_dawn_texture_from_surface(dst);
599 if (!srcTexture || !dstTexture) {
600 return false;
601 }
602
603 uint32_t width = srcRect.width(), height = srcRect.height();
604
605 wgpu::ImageCopyTexture srcTextureView, dstTextureView;
606 srcTextureView.texture = srcTexture;
607 srcTextureView.origin = {(uint32_t) srcRect.x(), (uint32_t) srcRect.y(), 0};
608 dstTextureView.texture = dstTexture;
609 dstTextureView.origin = {(uint32_t) dstPoint.x(), (uint32_t) dstPoint.y(), 0};
610
611 wgpu::Extent3D copySize = {width, height, 1};
612 this->getCopyEncoder().CopyTextureToTexture(&srcTextureView, &dstTextureView, ©Size);
613 return true;
614 }
615
callback(WGPUBufferMapAsyncStatus status,void * userdata)616 static void callback(WGPUBufferMapAsyncStatus status, void* userdata) {
617 *static_cast<bool*>(userdata) = true;
618 }
619
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)620 bool GrDawnGpu::onReadPixels(GrSurface* surface,
621 SkIRect rect,
622 GrColorType surfaceColorType,
623 GrColorType dstColorType,
624 void* buffer,
625 size_t rowBytes) {
626 wgpu::Texture tex = get_dawn_texture_from_surface(surface);
627
628 if (!tex || 0 == rowBytes) {
629 return false;
630 }
631 size_t origRowBytes = rowBytes;
632 int origSizeInBytes = origRowBytes*rect.height();
633 rowBytes = GrDawnRoundRowBytes(rowBytes);
634 int sizeInBytes = rowBytes*rect.height();
635
636 wgpu::BufferDescriptor desc;
637 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
638 desc.size = sizeInBytes;
639
640 wgpu::Buffer buf = device().CreateBuffer(&desc);
641
642 wgpu::ImageCopyTexture srcTexture;
643 srcTexture.texture = tex;
644 srcTexture.origin = {(uint32_t) rect.left(), (uint32_t) rect.top(), 0};
645
646 wgpu::ImageCopyBuffer dstBuffer = {};
647 dstBuffer.buffer = buf;
648 dstBuffer.layout.offset = 0;
649 dstBuffer.layout.bytesPerRow = rowBytes;
650 dstBuffer.layout.rowsPerImage = rect.height();
651
652 wgpu::Extent3D copySize = {(uint32_t) rect.width(), (uint32_t) rect.height(), 1};
653 this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, ©Size);
654 this->submitToGpu(true);
655
656 bool mapped = false;
657 buf.MapAsync(wgpu::MapMode::Read, 0, 0, callback, &mapped);
658 while (!mapped) {
659 device().Tick();
660 }
661 const void* readPixelsPtr = buf.GetConstMappedRange();
662
663 if (rowBytes == origRowBytes) {
664 memcpy(buffer, readPixelsPtr, origSizeInBytes);
665 } else {
666 const char* src = static_cast<const char*>(readPixelsPtr);
667 char* dst = static_cast<char*>(buffer);
668 for (int row = 0; row < rect.height(); row++) {
669 memcpy(dst, src, origRowBytes);
670 dst += origRowBytes;
671 src += rowBytes;
672 }
673 }
674 buf.Unmap();
675 return true;
676 }
677
onRegenerateMipMapLevels(GrTexture * tex)678 bool GrDawnGpu::onRegenerateMipMapLevels(GrTexture* tex) {
679 this->flushCopyEncoder();
680 GrDawnTexture* src = static_cast<GrDawnTexture*>(tex);
681 int srcWidth = tex->width();
682 int srcHeight = tex->height();
683
684 // SkMipmap doesn't include the base level in the level count so we have to add 1
685 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
686
687 // Create a temporary texture for mipmap generation, then copy to source.
688 // We have to do this even for renderable textures, since GrDawnRenderTarget currently only
689 // contains a view, not a texture.
690 wgpu::TextureDescriptor texDesc;
691 texDesc.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
692 wgpu::TextureUsage::RenderAttachment;
693 texDesc.size.width = (tex->width() + 1) / 2;
694 texDesc.size.height = (tex->height() + 1) / 2;
695 texDesc.size.depthOrArrayLayers = 1;
696 texDesc.mipLevelCount = levelCount - 1;
697 texDesc.format = src->format();
698 wgpu::Texture dstTexture = fDevice.CreateTexture(&texDesc);
699
700 const char* vs =
701 "layout(location = 0) out float2 texCoord;\n"
702 "float2 positions[4] = float2[4](float2(-1.0, 1.0),\n"
703 "float2(1.0, 1.0),\n"
704 "float2(-1.0, -1.0),\n"
705 "float2(1.0, -1.0));\n"
706 "float2 texCoords[4] = float2[4](float2(0.0, 0.0),\n"
707 "float2(1.0, 0.0),\n"
708 "float2(0.0, 1.0),\n"
709 "float2(1.0, 1.0));\n"
710 "void main() {\n"
711 " sk_Position = float4(positions[sk_VertexID], 0.0, 1.0);\n"
712 " texCoord = texCoords[sk_VertexID];\n"
713 "}\n";
714 SkSL::String vsSPIRV = this->SkSLToSPIRV(vs,
715 SkSL::ProgramKind::kVertex,
716 /*rtFlipOffset*/ 0,
717 nullptr);
718
719 const char* fs =
720 "layout(set = 0, binding = 0) uniform sampler samp;\n"
721 "layout(set = 0, binding = 1) uniform texture2D tex;\n"
722 "layout(location = 0) in float2 texCoord;\n"
723 "void main() {\n"
724 " sk_FragColor = sample(makeSampler2D(tex, samp), texCoord);\n"
725 "}\n";
726 SkSL::String fsSPIRV = this->SkSLToSPIRV(fs,
727 SkSL::ProgramKind::kFragment,
728 /*rtFlipOffset=*/ 0,
729 nullptr);
730
731 wgpu::VertexState vertexState;
732 vertexState.module = this->createShaderModule(vsSPIRV);
733 vertexState.entryPoint = "main";
734 vertexState.bufferCount = 0;
735
736 wgpu::ColorTargetState colorTargetState;
737 colorTargetState.format = static_cast<GrDawnTexture*>(tex)->format();
738
739 wgpu::FragmentState fragmentState;
740 fragmentState.module = this->createShaderModule(fsSPIRV);
741 fragmentState.entryPoint = "main";
742 fragmentState.targetCount = 1;
743 fragmentState.targets = &colorTargetState;
744
745 wgpu::RenderPipelineDescriptor renderPipelineDesc;
746 renderPipelineDesc.vertex = vertexState;
747 renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleStrip;
748 renderPipelineDesc.primitive.stripIndexFormat = wgpu::IndexFormat::Uint16;
749 renderPipelineDesc.fragment = &fragmentState;
750 wgpu::RenderPipeline pipeline = fDevice.CreateRenderPipeline(&renderPipelineDesc);
751
752 wgpu::BindGroupLayout bgl = pipeline.GetBindGroupLayout(0);
753 wgpu::TextureViewDescriptor srcViewDesc;
754 srcViewDesc.mipLevelCount = 1;
755 wgpu::TextureView srcView = src->texture().CreateView(&srcViewDesc);
756 wgpu::SamplerDescriptor samplerDesc;
757 samplerDesc.minFilter = wgpu::FilterMode::Linear;
758 wgpu::Sampler sampler = fDevice.CreateSampler(&samplerDesc);
759 wgpu::CommandEncoder commandEncoder = fDevice.CreateCommandEncoder();
760 for (uint32_t mipLevel = 0; mipLevel < texDesc.mipLevelCount; mipLevel++) {
761 int dstWidth = std::max(1, srcWidth / 2);
762 int dstHeight = std::max(1, srcHeight / 2);
763 wgpu::TextureViewDescriptor dstViewDesc;
764 dstViewDesc.format = static_cast<GrDawnTexture*>(tex)->format();
765 dstViewDesc.dimension = wgpu::TextureViewDimension::e2D;
766 dstViewDesc.baseMipLevel = mipLevel;
767 dstViewDesc.mipLevelCount = 1;
768 wgpu::TextureView dstView = dstTexture.CreateView(&dstViewDesc);
769 wgpu::BindGroupEntry bge[2];
770 bge[0].binding = 0;
771 bge[0].sampler = sampler;
772 bge[1].binding = 1;
773 bge[1].textureView = srcView;
774 wgpu::BindGroupDescriptor bgDesc;
775 bgDesc.layout = bgl;
776 bgDesc.entryCount = 2;
777 bgDesc.entries = bge;
778 wgpu::BindGroup bindGroup = fDevice.CreateBindGroup(&bgDesc);
779 wgpu::RenderPassColorAttachment colorAttachment;
780 colorAttachment.view = dstView;
781 colorAttachment.clearColor = { 0.0f, 0.0f, 0.0f, 0.0f };
782 colorAttachment.loadOp = wgpu::LoadOp::Load;
783 colorAttachment.storeOp = wgpu::StoreOp::Store;
784 wgpu::RenderPassColorAttachment* colorAttachments = { &colorAttachment };
785 wgpu::RenderPassDescriptor renderPassDesc;
786 renderPassDesc.colorAttachmentCount = 1;
787 renderPassDesc.colorAttachments = colorAttachments;
788 wgpu::RenderPassEncoder rpe = commandEncoder.BeginRenderPass(&renderPassDesc);
789 rpe.SetPipeline(pipeline);
790 rpe.SetBindGroup(0, bindGroup);
791 rpe.Draw(4, 1, 0, 0);
792 rpe.EndPass();
793
794 wgpu::Extent3D copySize = {(uint32_t)dstWidth, (uint32_t)dstHeight, 1};
795 wgpu::ImageCopyTexture srcCopyView;
796 srcCopyView.texture = dstTexture;
797 srcCopyView.mipLevel = mipLevel;
798 wgpu::ImageCopyTexture dstCopyView;
799 dstCopyView.mipLevel = mipLevel + 1;
800 dstCopyView.texture = src->texture();
801 commandEncoder.CopyTextureToTexture(&srcCopyView, &dstCopyView, ©Size);
802
803 srcHeight = dstHeight;
804 srcWidth = dstWidth;
805 srcView = dstView;
806 }
807 fCommandBuffers.push_back(commandEncoder.Finish());
808 return true;
809 }
810
submit(GrOpsRenderPass * renderPass)811 void GrDawnGpu::submit(GrOpsRenderPass* renderPass) {
812 this->flushCopyEncoder();
813 static_cast<GrDawnOpsRenderPass*>(renderPass)->submit();
814 }
815
insertFence()816 GrFence SK_WARN_UNUSED_RESULT GrDawnGpu::insertFence() {
817 return reinterpret_cast<GrFence>(new Fence(fDevice));
818 }
819
waitFence(GrFence fence)820 bool GrDawnGpu::waitFence(GrFence fence) {
821 return reinterpret_cast<Fence*>(fence)->check();
822 }
823
deleteFence(GrFence fence) const824 void GrDawnGpu::deleteFence(GrFence fence) const {
825 delete reinterpret_cast<Fence*>(fence);
826 }
827
makeSemaphore(bool isOwned)828 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrDawnGpu::makeSemaphore(bool isOwned) {
829 SkASSERT(!"unimplemented");
830 return nullptr;
831 }
832
wrapBackendSemaphore(const GrBackendSemaphore &,GrSemaphoreWrapType,GrWrapOwnership)833 std::unique_ptr<GrSemaphore> GrDawnGpu::wrapBackendSemaphore(const GrBackendSemaphore& /* sema */,
834 GrSemaphoreWrapType /* wrapType */,
835 GrWrapOwnership /* ownership */) {
836 SkASSERT(!"unimplemented");
837 return nullptr;
838 }
839
insertSemaphore(GrSemaphore * semaphore)840 void GrDawnGpu::insertSemaphore(GrSemaphore* semaphore) {
841 SkASSERT(!"unimplemented");
842 }
843
waitSemaphore(GrSemaphore * semaphore)844 void GrDawnGpu::waitSemaphore(GrSemaphore* semaphore) {
845 SkASSERT(!"unimplemented");
846 }
847
checkFinishProcs()848 void GrDawnGpu::checkFinishProcs() {
849 fFinishCallbacks.check();
850 }
851
finishOutstandingGpuWork()852 void GrDawnGpu::finishOutstandingGpuWork() {
853 this->waitOnAllBusyStagingBuffers();
854 }
855
prepareTextureForCrossContextUsage(GrTexture * texture)856 std::unique_ptr<GrSemaphore> GrDawnGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
857 SkASSERT(!"unimplemented");
858 return nullptr;
859 }
860
getOrCreateRenderPipeline(GrRenderTarget * rt,const GrProgramInfo & programInfo)861 sk_sp<GrDawnProgram> GrDawnGpu::getOrCreateRenderPipeline(
862 GrRenderTarget* rt,
863 const GrProgramInfo& programInfo) {
864 GrProgramDesc desc = this->caps()->makeDesc(rt, programInfo);
865 if (!desc.isValid()) {
866 return nullptr;
867 }
868
869 if (sk_sp<GrDawnProgram>* program = fRenderPipelineCache.find(desc)) {
870 return *program;
871 }
872
873 wgpu::TextureFormat colorFormat;
874 SkAssertResult(programInfo.backendFormat().asDawnFormat(&colorFormat));
875
876 wgpu::TextureFormat stencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
877 bool hasDepthStencil = rt->getStencilAttachment() != nullptr;
878
879 sk_sp<GrDawnProgram> program = GrDawnProgramBuilder::Build(
880 this, rt, programInfo, colorFormat,
881 hasDepthStencil, stencilFormat, &desc);
882 fRenderPipelineCache.insert(desc, program);
883 return program;
884 }
885
getOrCreateSampler(GrSamplerState samplerState)886 wgpu::Sampler GrDawnGpu::getOrCreateSampler(GrSamplerState samplerState) {
887 auto i = fSamplers.find(samplerState);
888 if (i != fSamplers.end()) {
889 return i->second;
890 }
891 wgpu::SamplerDescriptor desc;
892 desc.addressModeU = to_dawn_address_mode(samplerState.wrapModeX());
893 desc.addressModeV = to_dawn_address_mode(samplerState.wrapModeY());
894 desc.addressModeW = wgpu::AddressMode::ClampToEdge;
895 desc.magFilter = desc.minFilter = to_dawn_filter_mode(samplerState.filter());
896 desc.mipmapFilter = to_dawn_mipmap_mode(samplerState.mipmapMode());
897 wgpu::Sampler sampler = device().CreateSampler(&desc);
898 fSamplers.insert(std::pair<GrSamplerState, wgpu::Sampler>(samplerState, sampler));
899 return sampler;
900 }
901
allocateUniformRingBufferSlice(int size)902 GrDawnRingBuffer::Slice GrDawnGpu::allocateUniformRingBufferSlice(int size) {
903 return fUniformRingBuffer.allocate(size);
904 }
905
appendCommandBuffer(wgpu::CommandBuffer commandBuffer)906 void GrDawnGpu::appendCommandBuffer(wgpu::CommandBuffer commandBuffer) {
907 if (commandBuffer) {
908 fCommandBuffers.push_back(commandBuffer);
909 }
910 }
911
getCopyEncoder()912 wgpu::CommandEncoder GrDawnGpu::getCopyEncoder() {
913 if (!fCopyEncoder) {
914 fCopyEncoder = fDevice.CreateCommandEncoder();
915 }
916 return fCopyEncoder;
917 }
918
flushCopyEncoder()919 void GrDawnGpu::flushCopyEncoder() {
920 if (fCopyEncoder) {
921 fCommandBuffers.push_back(fCopyEncoder.Finish());
922 fCopyEncoder = nullptr;
923 }
924 }
925
moveStagingBuffersToBusyAndMapAsync()926 void GrDawnGpu::moveStagingBuffersToBusyAndMapAsync() {
927 for (size_t i = 0; i < fSubmittedStagingBuffers.size(); ++i) {
928 GrDawnBuffer* buffer = static_cast<GrDawnBuffer*>(fSubmittedStagingBuffers[i].get());
929 buffer->mapWriteAsync();
930 fBusyStagingBuffers.push_back(std::move(fSubmittedStagingBuffers[i]));
931 }
932 fSubmittedStagingBuffers.clear();
933 }
934
SkSLToSPIRV(const char * shaderString,SkSL::ProgramKind kind,uint32_t rtFlipOffset,SkSL::Program::Inputs * inputs)935 SkSL::String GrDawnGpu::SkSLToSPIRV(const char* shaderString,
936 SkSL::ProgramKind kind,
937 uint32_t rtFlipOffset,
938 SkSL::Program::Inputs* inputs) {
939 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
940 SkSL::Program::Settings settings;
941 settings.fRTFlipOffset = rtFlipOffset;
942 settings.fRTFlipBinding = 0;
943 settings.fRTFlipSet = 0;
944 std::unique_ptr<SkSL::Program> program = this->shaderCompiler()->convertProgram(
945 kind,
946 shaderString,
947 settings);
948 if (!program) {
949 errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
950 return "";
951 }
952 if (inputs) {
953 *inputs = program->fInputs;
954 }
955 SkSL::String code;
956 if (!this->shaderCompiler()->toSPIRV(*program, &code)) {
957 errorHandler->compileError(shaderString, this->shaderCompiler()->errorText().c_str());
958 return "";
959 }
960 return code;
961 }
962
createShaderModule(const SkSL::String & spirvSource)963 wgpu::ShaderModule GrDawnGpu::createShaderModule(const SkSL::String& spirvSource) {
964 wgpu::ShaderModuleSPIRVDescriptor desc;
965 desc.codeSize = spirvSource.size() / 4;
966 desc.code = reinterpret_cast<const uint32_t*>(spirvSource.c_str());
967
968 wgpu::ShaderModuleDescriptor smDesc;
969 smDesc.nextInChain = &desc;
970
971 return fDevice.CreateShaderModule(&smDesc);
972 }
973