1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/dawn/GrDawnGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "src/gpu/GrGeometryProcessor.h"
14 #include "src/gpu/GrGpuResourceCacheAccess.h"
15 #include "src/gpu/GrMesh.h"
16 #include "src/gpu/GrPipeline.h"
17 #include "src/gpu/GrRenderTargetPriv.h"
18 #include "src/gpu/GrSemaphore.h"
19 #include "src/gpu/GrStencilSettings.h"
20 #include "src/gpu/GrTexturePriv.h"
21 #include "src/gpu/dawn/GrDawnBuffer.h"
22 #include "src/gpu/dawn/GrDawnCaps.h"
23 #include "src/gpu/dawn/GrDawnOpsRenderPass.h"
24 #include "src/gpu/dawn/GrDawnProgramBuilder.h"
25 #include "src/gpu/dawn/GrDawnRenderTarget.h"
26 #include "src/gpu/dawn/GrDawnStencilAttachment.h"
27 #include "src/gpu/dawn/GrDawnTexture.h"
28 #include "src/gpu/dawn/GrDawnUtil.h"
29
30 #include "src/core/SkAutoMalloc.h"
31 #include "src/core/SkMipMap.h"
32 #include "src/sksl/SkSLCompiler.h"
33
34 #if !defined(SK_BUILD_FOR_WIN)
35 #include <unistd.h>
36 #endif // !defined(SK_BUILD_FOR_WIN)
37
38 const int kMaxRenderPipelineEntries = 1024;
39
to_dawn_filter_mode(GrSamplerState::Filter filter)40 static wgpu::FilterMode to_dawn_filter_mode(GrSamplerState::Filter filter) {
41 switch (filter) {
42 case GrSamplerState::Filter::kNearest:
43 return wgpu::FilterMode::Nearest;
44 case GrSamplerState::Filter::kBilerp:
45 case GrSamplerState::Filter::kMipMap:
46 return wgpu::FilterMode::Linear;
47 default:
48 SkASSERT(!"unsupported filter mode");
49 return wgpu::FilterMode::Nearest;
50 }
51 }
52
to_dawn_address_mode(GrSamplerState::WrapMode wrapMode)53 static wgpu::AddressMode to_dawn_address_mode(GrSamplerState::WrapMode wrapMode) {
54 switch (wrapMode) {
55 case GrSamplerState::WrapMode::kClamp:
56 return wgpu::AddressMode::ClampToEdge;
57 case GrSamplerState::WrapMode::kRepeat:
58 return wgpu::AddressMode::Repeat;
59 case GrSamplerState::WrapMode::kMirrorRepeat:
60 return wgpu::AddressMode::MirrorRepeat;
61 case GrSamplerState::WrapMode::kClampToBorder:
62 SkASSERT(!"unsupported address mode");
63 }
64 SkASSERT(!"unsupported address mode");
65 return wgpu::AddressMode::ClampToEdge;
66
67 }
68
Make(const wgpu::Device & device,const GrContextOptions & options,GrContext * context)69 sk_sp<GrGpu> GrDawnGpu::Make(const wgpu::Device& device,
70 const GrContextOptions& options, GrContext* context) {
71 if (!device) {
72 return nullptr;
73 }
74
75 return sk_sp<GrGpu>(new GrDawnGpu(context, options, device));
76 }
77
78 ////////////////////////////////////////////////////////////////////////////////
79
GrDawnGpu(GrContext * context,const GrContextOptions & options,const wgpu::Device & device)80 GrDawnGpu::GrDawnGpu(GrContext* context, const GrContextOptions& options,
81 const wgpu::Device& device)
82 : INHERITED(context)
83 , fDevice(device)
84 , fQueue(device.CreateQueue())
85 , fCompiler(new SkSL::Compiler())
86 , fUniformRingBuffer(this, wgpu::BufferUsage::Uniform)
87 , fRenderPipelineCache(kMaxRenderPipelineEntries)
88 , fStagingManager(fDevice) {
89 fCaps.reset(new GrDawnCaps(options));
90 }
91
~GrDawnGpu()92 GrDawnGpu::~GrDawnGpu() {
93 }
94
95
disconnect(DisconnectType type)96 void GrDawnGpu::disconnect(DisconnectType type) {
97 SkASSERT(!"unimplemented");
98 }
99
100 ///////////////////////////////////////////////////////////////////////////////
101
getOpsRenderPass(GrRenderTarget * rt,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies)102 GrOpsRenderPass* GrDawnGpu::getOpsRenderPass(
103 GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
104 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
105 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
106 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
107 fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
108 return fOpsRenderPass.get();
109 }
110
111 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)112 sk_sp<GrGpuBuffer> GrDawnGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
113 GrAccessPattern accessPattern, const void* data) {
114 sk_sp<GrGpuBuffer> b(new GrDawnBuffer(this, size, type, accessPattern));
115 if (data && b) {
116 b->updateData(data, size);
117 }
118 return b;
119 }
120
121 ////////////////////////////////////////////////////////////////////////////////
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)122 bool GrDawnGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
123 GrColorType surfaceColorType, GrColorType srcColorType,
124 const GrMipLevel texels[], int mipLevelCount,
125 bool prepForTexSampling) {
126 GrDawnTexture* texture = static_cast<GrDawnTexture*>(surface->asTexture());
127 if (!texture) {
128 return false;
129 }
130 texture->upload(srcColorType, texels, mipLevelCount,
131 SkIRect::MakeXYWH(left, top, width, height), this->getCopyEncoder());
132 return true;
133 }
134
onTransferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType textureColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t bufferOffset,size_t rowBytes)135 bool GrDawnGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
136 GrColorType textureColorType, GrColorType bufferColorType,
137 GrGpuBuffer* transferBuffer, size_t bufferOffset,
138 size_t rowBytes) {
139 SkASSERT(!"unimplemented");
140 return false;
141 }
142
onTransferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)143 bool GrDawnGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
144 GrColorType surfaceColorType, GrColorType bufferColorType,
145 GrGpuBuffer* transferBuffer, size_t offset) {
146 SkASSERT(!"unimplemented");
147 return false;
148 }
149
150 ////////////////////////////////////////////////////////////////////////////////
onCreateTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected,int mipLevelCount,uint32_t levelClearMask)151 sk_sp<GrTexture> GrDawnGpu::onCreateTexture(SkISize dimensions,
152 const GrBackendFormat& backendFormat,
153 GrRenderable renderable,
154 int renderTargetSampleCnt,
155 SkBudgeted budgeted,
156 GrProtected,
157 int mipLevelCount,
158 uint32_t levelClearMask) {
159 SkASSERT(!levelClearMask);
160 wgpu::TextureFormat format;
161 if (!backendFormat.asDawnFormat(&format)) {
162 return nullptr;
163 }
164
165 GrMipMapsStatus mipMapsStatus =
166 mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated;
167
168 return GrDawnTexture::Make(this, dimensions, format, renderable, renderTargetSampleCnt,
169 budgeted, mipLevelCount, mipMapsStatus);
170 }
171
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat &,SkBudgeted,GrMipMapped,GrProtected,const void * data,size_t dataSize)172 sk_sp<GrTexture> GrDawnGpu::onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat&,
173 SkBudgeted, GrMipMapped, GrProtected,
174 const void* data, size_t dataSize) {
175 SkASSERT(!"unimplemented");
176 return nullptr;
177 }
178
onWrapBackendTexture(const GrBackendTexture & backendTex,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)179 sk_sp<GrTexture> GrDawnGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
180 GrColorType colorType,
181 GrWrapOwnership ownership,
182 GrWrapCacheable cacheable,
183 GrIOType ioType) {
184 GrDawnTextureInfo info;
185 if (!backendTex.getDawnTextureInfo(&info)) {
186 return nullptr;
187 }
188
189 SkISize dimensions = { backendTex.width(), backendTex.height() };
190 GrMipMapsStatus status = GrMipMapsStatus::kNotAllocated;
191 return GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kNo, 1, status, cacheable,
192 ioType, info);
193 }
194
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)195 sk_sp<GrTexture> GrDawnGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
196 GrWrapOwnership ownership,
197 GrWrapCacheable cacheable) {
198 return nullptr;
199 }
200
201
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrColorType colorType,GrWrapOwnership,GrWrapCacheable cacheable)202 sk_sp<GrTexture> GrDawnGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
203 int sampleCnt, GrColorType colorType,
204 GrWrapOwnership,
205 GrWrapCacheable cacheable) {
206 GrDawnTextureInfo info;
207 if (!tex.getDawnTextureInfo(&info) || !info.fTexture) {
208 return nullptr;
209 }
210
211 SkISize dimensions = { tex.width(), tex.height() };
212 sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
213 if (sampleCnt < 1) {
214 return nullptr;
215 }
216
217 GrMipMapsStatus status = GrMipMapsStatus::kNotAllocated;
218 return GrDawnTexture::MakeWrapped(this, dimensions, GrRenderable::kYes, sampleCnt, status,
219 cacheable, kRW_GrIOType, info);
220 }
221
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt,GrColorType colorType)222 sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt,
223 GrColorType colorType) {
224 GrDawnRenderTargetInfo info;
225 if (!rt.getDawnRenderTargetInfo(&info) || !info.fTextureView) {
226 return nullptr;
227 }
228
229 SkISize dimensions = { rt.width(), rt.height() };
230 int sampleCnt = 1;
231 return GrDawnRenderTarget::MakeWrapped(this, dimensions, sampleCnt, info);
232 }
233
onWrapBackendTextureAsRenderTarget(const GrBackendTexture & tex,int sampleCnt,GrColorType colorType)234 sk_sp<GrRenderTarget> GrDawnGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
235 int sampleCnt,
236 GrColorType colorType) {
237 GrDawnTextureInfo textureInfo;
238 if (!tex.getDawnTextureInfo(&textureInfo) || !textureInfo.fTexture) {
239 return nullptr;
240 }
241
242 SkISize dimensions = { tex.width(), tex.height() };
243 sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.getBackendFormat());
244 if (sampleCnt < 1) {
245 return nullptr;
246 }
247
248 GrDawnRenderTargetInfo info(textureInfo);
249 return GrDawnRenderTarget::MakeWrapped(this, dimensions, sampleCnt, info);
250 }
251
createStencilAttachmentForRenderTarget(const GrRenderTarget * rt,int width,int height,int numStencilSamples)252 GrStencilAttachment* GrDawnGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
253 int width,
254 int height,
255 int numStencilSamples) {
256 GrDawnStencilAttachment* stencil(GrDawnStencilAttachment::Create(this,
257 width,
258 height,
259 numStencilSamples));
260 fStats.incStencilAttachmentCreates();
261 return stencil;
262 }
263
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & backendFormat,GrRenderable renderable,GrMipMapped mipMapped,GrProtected isProtected,const BackendTextureData * data)264 GrBackendTexture GrDawnGpu::onCreateBackendTexture(SkISize dimensions,
265 const GrBackendFormat& backendFormat,
266 GrRenderable renderable,
267 GrMipMapped mipMapped,
268 GrProtected isProtected,
269 const BackendTextureData* data) {
270 wgpu::TextureFormat format;
271 if (!backendFormat.asDawnFormat(&format)) {
272 return GrBackendTexture();
273 }
274
275 // FIXME: Dawn doesn't support mipmapped render targets (yet).
276 if (mipMapped == GrMipMapped::kYes && GrRenderable::kYes == renderable) {
277 return GrBackendTexture();
278 }
279
280 wgpu::TextureDescriptor desc;
281 desc.usage =
282 wgpu::TextureUsage::Sampled |
283 wgpu::TextureUsage::CopySrc |
284 wgpu::TextureUsage::CopyDst;
285
286 if (GrRenderable::kYes == renderable) {
287 desc.usage |= wgpu::TextureUsage::OutputAttachment;
288 }
289
290 int numMipLevels = 1;
291 if (mipMapped == GrMipMapped::kYes) {
292 numMipLevels = SkMipMap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
293 }
294
295 desc.size.width = dimensions.width();
296 desc.size.height = dimensions.height();
297 desc.size.depth = 1;
298 desc.format = format;
299 desc.mipLevelCount = numMipLevels;
300
301 wgpu::Texture tex = this->device().CreateTexture(&desc);
302
303 size_t bpp = GrDawnBytesPerPixel(format);
304 size_t baseLayerSize = bpp * dimensions.width() * dimensions.height();
305 const void* pixels;
306 SkAutoMalloc defaultStorage(baseLayerSize);
307 if (data && data->type() == BackendTextureData::Type::kPixmaps) {
308 pixels = data->pixmap(0).addr();
309 } else {
310 pixels = defaultStorage.get();
311 memset(defaultStorage.get(), 0, baseLayerSize);
312 }
313 wgpu::Device device = this->device();
314 wgpu::CommandEncoder copyEncoder = fDevice.CreateCommandEncoder();
315 int w = dimensions.width(), h = dimensions.height();
316 for (uint32_t i = 0; i < desc.mipLevelCount; i++) {
317 size_t origRowBytes = bpp * w;
318 size_t rowBytes = GrDawnRoundRowBytes(origRowBytes);
319 size_t size = rowBytes * h;
320 GrDawnStagingBuffer* stagingBuffer = this->getStagingBuffer(size);
321 if (rowBytes == origRowBytes) {
322 memcpy(stagingBuffer->fData, pixels, size);
323 } else {
324 const char* src = static_cast<const char*>(pixels);
325 char* dst = static_cast<char*>(stagingBuffer->fData);
326 for (int row = 0; row < h; row++) {
327 memcpy(dst, src, origRowBytes);
328 dst += rowBytes;
329 src += origRowBytes;
330 }
331 }
332 wgpu::Buffer buffer = stagingBuffer->fBuffer;
333 buffer.Unmap();
334 stagingBuffer->fData = nullptr;
335 wgpu::BufferCopyView srcBuffer;
336 srcBuffer.buffer = buffer;
337 srcBuffer.offset = 0;
338 srcBuffer.rowPitch = rowBytes;
339 srcBuffer.imageHeight = h;
340 wgpu::TextureCopyView dstTexture;
341 dstTexture.texture = tex;
342 dstTexture.mipLevel = i;
343 dstTexture.origin = {0, 0, 0};
344 wgpu::Extent3D copySize = {(uint32_t) w, (uint32_t) h, 1};
345 copyEncoder.CopyBufferToTexture(&srcBuffer, &dstTexture, ©Size);
346 w = std::max(1, w / 2);
347 h = std::max(1, h / 2);
348 }
349 wgpu::CommandBuffer cmdBuf = copyEncoder.Finish();
350 fQueue.Submit(1, &cmdBuf);
351 GrDawnTextureInfo info;
352 info.fTexture = tex;
353 info.fFormat = desc.format;
354 info.fLevelCount = desc.mipLevelCount;
355 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
356 }
357
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat &,GrMipMapped,GrProtected,const BackendTextureData *)358 GrBackendTexture GrDawnGpu::onCreateCompressedBackendTexture(SkISize dimensions,
359 const GrBackendFormat&,
360 GrMipMapped,
361 GrProtected,
362 const BackendTextureData*) {
363 return {};
364 }
365
deleteBackendTexture(const GrBackendTexture & tex)366 void GrDawnGpu::deleteBackendTexture(const GrBackendTexture& tex) {
367 GrDawnTextureInfo info;
368 if (tex.getDawnTextureInfo(&info)) {
369 info.fTexture = nullptr;
370 }
371 }
372
compile(const GrProgramDesc &,const GrProgramInfo &)373 bool GrDawnGpu::compile(const GrProgramDesc&, const GrProgramInfo&) {
374 return false;
375 }
376
377 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const378 bool GrDawnGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
379 GrDawnTextureInfo info;
380 if (!tex.getDawnTextureInfo(&info)) {
381 return false;
382 }
383
384 return info.fTexture.Get();
385 }
386
createTestingOnlyBackendRenderTarget(int width,int height,GrColorType colorType)387 GrBackendRenderTarget GrDawnGpu::createTestingOnlyBackendRenderTarget(int width, int height,
388 GrColorType colorType) {
389
390 if (width > this->caps()->maxTextureSize() || height > this->caps()->maxTextureSize()) {
391 return GrBackendRenderTarget();
392 }
393
394 wgpu::TextureFormat format;
395 if (!GrColorTypeToDawnFormat(colorType, &format)) {
396 return GrBackendRenderTarget();
397 }
398
399 wgpu::TextureDescriptor desc;
400 desc.usage =
401 wgpu::TextureUsage::CopySrc |
402 wgpu::TextureUsage::OutputAttachment;
403
404 desc.size.width = width;
405 desc.size.height = height;
406 desc.size.depth = 1;
407 desc.format = format;
408
409 wgpu::Texture tex = this->device().CreateTexture(&desc);
410
411 GrDawnRenderTargetInfo info;
412 info.fTextureView = tex.CreateView();
413 info.fFormat = desc.format;
414 info.fLevelCount = desc.mipLevelCount;
415 return GrBackendRenderTarget(width, height, 1, 0, info);
416 }
417
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)418 void GrDawnGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
419 GrDawnRenderTargetInfo info;
420 if (rt.getDawnRenderTargetInfo(&info)) {
421 info.fTextureView = nullptr;
422 }
423 }
424
testingOnly_flushGpuAndSync()425 void GrDawnGpu::testingOnly_flushGpuAndSync() {
426 this->flush();
427 }
428
429 #endif
430
flush()431 void GrDawnGpu::flush() {
432 this->flushCopyEncoder();
433 if (!fCommandBuffers.empty()) {
434 fQueue.Submit(fCommandBuffers.size(), &fCommandBuffers.front());
435 fCommandBuffers.clear();
436 }
437 fStagingManager.mapBusyList();
438 fDevice.Tick();
439 }
440
onFinishFlush(GrSurfaceProxy * [],int n,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrPrepareForExternalIORequests &)441 bool GrDawnGpu::onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
442 const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
443 this->flush();
444 return true;
445 }
446
get_dawn_texture_from_surface(GrSurface * src)447 static wgpu::Texture get_dawn_texture_from_surface(GrSurface* src) {
448 if (auto t = static_cast<GrDawnTexture*>(src->asTexture())) {
449 return t->texture();
450 } else {
451 return nullptr;
452 }
453 }
454
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)455 bool GrDawnGpu::onCopySurface(GrSurface* dst,
456 GrSurface* src,
457 const SkIRect& srcRect,
458 const SkIPoint& dstPoint) {
459 wgpu::Texture srcTexture = get_dawn_texture_from_surface(src);
460 wgpu::Texture dstTexture = get_dawn_texture_from_surface(dst);
461 if (!srcTexture || !dstTexture) {
462 return false;
463 }
464
465 uint32_t width = srcRect.width(), height = srcRect.height();
466
467 wgpu::TextureCopyView srcTextureView, dstTextureView;
468 srcTextureView.texture = srcTexture;
469 srcTextureView.origin = {(uint32_t) srcRect.x(), (uint32_t) srcRect.y(), 0};
470 dstTextureView.texture = dstTexture;
471 dstTextureView.origin = {(uint32_t) dstPoint.x(), (uint32_t) dstPoint.y(), 0};
472
473 wgpu::Extent3D copySize = {width, height, 1};
474 this->getCopyEncoder().CopyTextureToTexture(&srcTextureView, &dstTextureView, ©Size);
475 return true;
476 }
477
callback(WGPUBufferMapAsyncStatus status,const void * data,uint64_t dataLength,void * userdata)478 static void callback(WGPUBufferMapAsyncStatus status, const void* data, uint64_t dataLength,
479 void* userdata) {
480 (*reinterpret_cast<const void**>(userdata)) = data;
481 }
482
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)483 bool GrDawnGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
484 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
485 size_t rowBytes) {
486 wgpu::Texture tex = get_dawn_texture_from_surface(surface);
487 SkASSERT(tex);
488
489 if (0 == rowBytes) {
490 return false;
491 }
492 size_t origRowBytes = rowBytes;
493 int origSizeInBytes = origRowBytes * height;
494 rowBytes = GrDawnRoundRowBytes(rowBytes);
495 int sizeInBytes = rowBytes * height;
496
497 wgpu::BufferDescriptor desc;
498 desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
499 desc.size = sizeInBytes;
500
501 wgpu::Buffer buf = device().CreateBuffer(&desc);
502
503 wgpu::TextureCopyView srcTexture;
504 srcTexture.texture = tex;
505 srcTexture.origin = {(uint32_t) left, (uint32_t) top, 0};
506
507 wgpu::BufferCopyView dstBuffer;
508 dstBuffer.buffer = buf;
509 dstBuffer.offset = 0;
510 dstBuffer.rowPitch = rowBytes;
511 dstBuffer.imageHeight = height;
512
513 wgpu::Extent3D copySize = {(uint32_t) width, (uint32_t) height, 1};
514 this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, ©Size);
515 flush();
516
517 const void *readPixelsPtr = nullptr;
518 buf.MapReadAsync(callback, &readPixelsPtr);
519 while (!readPixelsPtr) {
520 device().Tick();
521 }
522
523 if (rowBytes == origRowBytes) {
524 memcpy(buffer, readPixelsPtr, origSizeInBytes);
525 } else {
526 const char* src = static_cast<const char*>(readPixelsPtr);
527 char* dst = static_cast<char*>(buffer);
528 for (int row = 0; row < height; row++) {
529 memcpy(dst, src, origRowBytes);
530 dst += origRowBytes;
531 src += rowBytes;
532 }
533 }
534 buf.Unmap();
535 return true;
536 }
537
onRegenerateMipMapLevels(GrTexture *)538 bool GrDawnGpu::onRegenerateMipMapLevels(GrTexture*) {
539 SkASSERT(!"unimplemented");
540 return false;
541 }
542
submit(GrOpsRenderPass * renderPass)543 void GrDawnGpu::submit(GrOpsRenderPass* renderPass) {
544 this->flushCopyEncoder();
545 static_cast<GrDawnOpsRenderPass*>(renderPass)->submit();
546 }
547
insertFence()548 GrFence SK_WARN_UNUSED_RESULT GrDawnGpu::insertFence() {
549 SkASSERT(!"unimplemented");
550 return GrFence();
551 }
552
waitFence(GrFence fence,uint64_t timeout)553 bool GrDawnGpu::waitFence(GrFence fence, uint64_t timeout) {
554 SkASSERT(!"unimplemented");
555 return false;
556 }
557
deleteFence(GrFence fence) const558 void GrDawnGpu::deleteFence(GrFence fence) const {
559 SkASSERT(!"unimplemented");
560 }
561
makeSemaphore(bool isOwned)562 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrDawnGpu::makeSemaphore(bool isOwned) {
563 SkASSERT(!"unimplemented");
564 return nullptr;
565 }
566
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)567 std::unique_ptr<GrSemaphore> GrDawnGpu::wrapBackendSemaphore(
568 const GrBackendSemaphore& semaphore,
569 GrResourceProvider::SemaphoreWrapType wrapType,
570 GrWrapOwnership ownership) {
571 SkASSERT(!"unimplemented");
572 return nullptr;
573 }
574
insertSemaphore(GrSemaphore * semaphore)575 void GrDawnGpu::insertSemaphore(GrSemaphore* semaphore) {
576 SkASSERT(!"unimplemented");
577 }
578
waitSemaphore(GrSemaphore * semaphore)579 void GrDawnGpu::waitSemaphore(GrSemaphore* semaphore) {
580 SkASSERT(!"unimplemented");
581 }
582
checkFinishProcs()583 void GrDawnGpu::checkFinishProcs() {
584 SkASSERT(!"unimplemented");
585 }
586
prepareTextureForCrossContextUsage(GrTexture * texture)587 std::unique_ptr<GrSemaphore> GrDawnGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
588 SkASSERT(!"unimplemented");
589 return nullptr;
590 }
591
getOrCreateRenderPipeline(GrRenderTarget * rt,const GrProgramInfo & programInfo)592 sk_sp<GrDawnProgram> GrDawnGpu::getOrCreateRenderPipeline(
593 GrRenderTarget* rt,
594 const GrProgramInfo& programInfo) {
595
596 GrProgramDesc desc = this->caps()->makeDesc(rt, programInfo);
597 if (!desc.isValid()) {
598 return nullptr;
599 }
600
601 if (sk_sp<GrDawnProgram>* program = fRenderPipelineCache.find(desc)) {
602 return *program;
603 }
604
605 wgpu::TextureFormat colorFormat;
606 SkAssertResult(programInfo.backendFormat().asDawnFormat(&colorFormat));
607
608 wgpu::TextureFormat stencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
609 bool hasDepthStencil = rt->renderTargetPriv().getStencilAttachment() != nullptr;
610
611 sk_sp<GrDawnProgram> program = GrDawnProgramBuilder::Build(
612 this, rt, programInfo, colorFormat,
613 hasDepthStencil, stencilFormat, &desc);
614 fRenderPipelineCache.insert(desc, program);
615 return program;
616 }
617
getOrCreateSampler(GrSamplerState samplerState)618 wgpu::Sampler GrDawnGpu::getOrCreateSampler(GrSamplerState samplerState) {
619 auto i = fSamplers.find(samplerState);
620 if (i != fSamplers.end()) {
621 return i->second;
622 }
623 wgpu::SamplerDescriptor desc;
624 desc.addressModeU = to_dawn_address_mode(samplerState.wrapModeX());
625 desc.addressModeV = to_dawn_address_mode(samplerState.wrapModeY());
626 desc.addressModeW = wgpu::AddressMode::ClampToEdge;
627 desc.magFilter = desc.minFilter = to_dawn_filter_mode(samplerState.filter());
628 desc.mipmapFilter = wgpu::FilterMode::Linear;
629 desc.lodMinClamp = 0.0f;
630 desc.lodMaxClamp = 1000.0f;
631 desc.compare = wgpu::CompareFunction::Never;
632 wgpu::Sampler sampler = device().CreateSampler(&desc);
633 fSamplers.insert(std::pair<GrSamplerState, wgpu::Sampler>(samplerState, sampler));
634 return sampler;
635 }
636
allocateUniformRingBufferSlice(int size)637 GrDawnRingBuffer::Slice GrDawnGpu::allocateUniformRingBufferSlice(int size) {
638 return fUniformRingBuffer.allocate(size);
639 }
640
getStagingBuffer(size_t size)641 GrDawnStagingBuffer* GrDawnGpu::getStagingBuffer(size_t size) {
642 return fStagingManager.findOrCreateStagingBuffer(size);
643 }
644
appendCommandBuffer(wgpu::CommandBuffer commandBuffer)645 void GrDawnGpu::appendCommandBuffer(wgpu::CommandBuffer commandBuffer) {
646 if (commandBuffer) {
647 fCommandBuffers.push_back(commandBuffer);
648 }
649 }
650
getCopyEncoder()651 wgpu::CommandEncoder GrDawnGpu::getCopyEncoder() {
652 if (!fCopyEncoder) {
653 fCopyEncoder = fDevice.CreateCommandEncoder();
654 }
655 return fCopyEncoder;
656 }
657
flushCopyEncoder()658 void GrDawnGpu::flushCopyEncoder() {
659 if (fCopyEncoder) {
660 fCommandBuffers.push_back(fCopyEncoder.Finish());
661 fCopyEncoder = nullptr;
662 }
663 }
664