1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkGpu.h"
9
10 #include "include/core/SkImageInfo.h"
11 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
12 #include "include/core/SkLog.h"
13 #endif
14 #include "include/core/SkPoint.h"
15 #include "include/core/SkRect.h"
16 #include "include/core/SkSamplingOptions.h"
17 #include "include/core/SkSize.h"
18 #include "include/core/SkSurface.h"
19 #include "include/core/SkTextureCompressionType.h"
20 #include "include/core/SkTypes.h"
21 #include "include/gpu/GpuTypes.h"
22 #include "include/gpu/MutableTextureState.h"
23 #include "include/gpu/ganesh/GrBackendSurface.h"
24 #include "include/gpu/ganesh/GrDirectContext.h"
25 #include "include/gpu/ganesh/vk/GrVkBackendSemaphore.h"
26 #include "include/gpu/ganesh/vk/GrVkBackendSurface.h"
27 #include "include/gpu/ganesh/vk/GrVkTypes.h"
28 #include "include/gpu/vk/VulkanBackendContext.h"
29 #include "include/gpu/vk/VulkanExtensions.h"
30 #include "include/gpu/vk/VulkanMemoryAllocator.h"
31 #include "include/gpu/vk/VulkanMutableTextureState.h"
32 #include "include/gpu/vk/VulkanTypes.h"
33 #include "include/private/base/SkDebug.h"
34 #include "include/private/base/SkTemplates.h"
35 #include "include/private/base/SkTo.h"
36 #include "include/private/gpu/vk/SkiaVulkan.h"
37 #include "src/base/SkRectMemcpy.h"
38 #include "src/core/SkCompressedDataUtils.h"
39 #include "src/core/SkMipmap.h"
40 #include "src/core/SkTraceEvent.h"
41 #include "src/base/SkUtils.h"
42 #include "src/gpu/DataUtils.h"
43 #include "src/gpu/RefCntedCallback.h"
44 #include "src/gpu/ganesh/GrBackendUtils.h"
45 #include "src/gpu/ganesh/GrBuffer.h"
46 #include "src/gpu/ganesh/GrCaps.h"
47 #include "src/gpu/ganesh/GrDataUtils.h"
48 #include "src/gpu/ganesh/GrDirectContextPriv.h"
49 #include "src/gpu/ganesh/GrGpuBuffer.h"
50 #include "src/gpu/ganesh/GrImageInfo.h"
51 #include "src/gpu/ganesh/GrPixmap.h"
52 #include "src/gpu/ganesh/GrProgramInfo.h"
53 #include "src/gpu/ganesh/GrRenderTarget.h"
54 #include "src/gpu/ganesh/GrResourceProvider.h"
55 #include "src/gpu/ganesh/GrSurface.h"
56 #include "src/gpu/ganesh/GrSurfaceProxy.h"
57 #include "src/gpu/ganesh/GrTexture.h"
58 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
59 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
60 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
61 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
62 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
63 #include "src/gpu/ganesh/vk/GrVkImage.h"
64 #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h"
65 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
66 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
67 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
68 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
69 #include "src/gpu/ganesh/vk/GrVkTexture.h"
70 #include "src/gpu/ganesh/vk/GrVkTextureRenderTarget.h"
71 #include "src/gpu/ganesh/vk/GrVkUtil.h"
72 #include "src/gpu/vk/VulkanInterface.h"
73 #include "src/gpu/vk/VulkanMemory.h"
74 #include "src/gpu/vk/VulkanUtilsPriv.h"
75
76 #include <algorithm>
77 #include <cstring>
78 #include <functional>
79 #include <utility>
80
81 class GrAttachment;
82 class GrBackendSemaphore;
83 class GrManagedResource;
84 class GrProgramDesc;
85 class GrSemaphore;
86 struct GrContextOptions;
87
88 #if defined(SK_USE_VMA)
89 #include "src/gpu/vk/vulkanmemoryallocator/VulkanMemoryAllocatorPriv.h"
90 #endif
91
92 using namespace skia_private;
93
94 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
95 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
96
97 constexpr uint8_t ASTC_HEADER_SIZE = 16;
98
Make(const skgpu::VulkanBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)99 std::unique_ptr<GrGpu> GrVkGpu::Make(const skgpu::VulkanBackendContext& backendContext,
100 const GrContextOptions& options,
101 GrDirectContext* direct) {
102 if (backendContext.fInstance == VK_NULL_HANDLE ||
103 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
104 backendContext.fDevice == VK_NULL_HANDLE ||
105 backendContext.fQueue == VK_NULL_HANDLE) {
106 return nullptr;
107 }
108 if (!backendContext.fGetProc) {
109 return nullptr;
110 }
111
112 skgpu::VulkanExtensions ext;
113 const skgpu::VulkanExtensions* extensions = &ext;
114 if (backendContext.fVkExtensions) {
115 extensions = backendContext.fVkExtensions;
116 }
117
118 uint32_t instanceVersion = 0;
119 uint32_t physDevVersion = 0;
120 sk_sp<const skgpu::VulkanInterface> interface =
121 skgpu::MakeInterface(backendContext, extensions, &instanceVersion, &physDevVersion);
122 if (!interface) {
123 return nullptr;
124 }
125
126 sk_sp<GrVkCaps> caps;
127 if (backendContext.fDeviceFeatures2) {
128 caps.reset(new GrVkCaps(options,
129 interface.get(),
130 backendContext.fPhysicalDevice,
131 *backendContext.fDeviceFeatures2,
132 instanceVersion,
133 physDevVersion,
134 *extensions,
135 backendContext.fProtectedContext));
136 } else if (backendContext.fDeviceFeatures) {
137 VkPhysicalDeviceFeatures2 features2;
138 features2.pNext = nullptr;
139 features2.features = *backendContext.fDeviceFeatures;
140 caps.reset(new GrVkCaps(options,
141 interface.get(),
142 backendContext.fPhysicalDevice,
143 features2,
144 instanceVersion,
145 physDevVersion,
146 *extensions,
147 backendContext.fProtectedContext));
148 } else {
149 VkPhysicalDeviceFeatures2 features;
150 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
151 caps.reset(new GrVkCaps(options,
152 interface.get(),
153 backendContext.fPhysicalDevice,
154 features,
155 instanceVersion,
156 physDevVersion,
157 *extensions,
158 backendContext.fProtectedContext));
159 }
160
161 if (!caps) {
162 return nullptr;
163 }
164
165 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
166 #if defined(SK_USE_VMA)
167 if (!memoryAllocator) {
168 // We were not given a memory allocator at creation
169 memoryAllocator =
170 skgpu::VulkanMemoryAllocators::Make(backendContext,
171 skgpu::ThreadSafe::kNo,
172 options.fVulkanVMALargeHeapBlockSize);
173 }
174 #endif
175 if (!memoryAllocator) {
176 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
177 return nullptr;
178 }
179 const size_t maxBlockCount = SkGetVmaBlockCountMax(); // limit memory hols for vma cache
180 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocatorCacheImage =
181 skgpu::VulkanMemoryAllocators::Make(backendContext,
182 skgpu::ThreadSafe::kNo,
183 options.fVulkanVMALargeHeapBlockSize, maxBlockCount);
184 if (!memoryAllocatorCacheImage) {
185 SkDEBUGFAIL("No supplied vulkan memory allocator for cache image and unable to create one internally.");
186 return nullptr;
187 }
188
189 std::unique_ptr<GrVkGpu> vkGpu(new GrVkGpu(direct,
190 backendContext,
191 std::move(caps),
192 interface,
193 instanceVersion,
194 physDevVersion,
195 std::move(memoryAllocator),
196 std::move(memoryAllocatorCacheImage)));
197 if (backendContext.fProtectedContext == GrProtected::kYes &&
198 !vkGpu->vkCaps().supportsProtectedContent()) {
199 return nullptr;
200 }
201 return vkGpu;
202 }
203
204 ////////////////////////////////////////////////////////////////////////////////
205
GrVkGpu(GrDirectContext * direct,const skgpu::VulkanBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const skgpu::VulkanInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator,sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocatorCacheImage)206 GrVkGpu::GrVkGpu(GrDirectContext* direct,
207 const skgpu::VulkanBackendContext& backendContext,
208 sk_sp<GrVkCaps> caps,
209 sk_sp<const skgpu::VulkanInterface> interface,
210 uint32_t instanceVersion,
211 uint32_t physicalDeviceVersion,
212 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator,
213 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocatorCacheImage)
214 : INHERITED(direct)
215 , fInterface(std::move(interface))
216 , fMemoryAllocator(std::move(memoryAllocator))
217 , fMemoryAllocatorCacheImage(std::move(memoryAllocatorCacheImage))
218 , fVkCaps(std::move(caps))
219 , fPhysicalDevice(backendContext.fPhysicalDevice)
220 , fDevice(backendContext.fDevice)
221 , fQueue(backendContext.fQueue)
222 , fQueueIndex(backendContext.fGraphicsQueueIndex)
223 , fResourceProvider(this)
224 , fStagingBufferManager(this)
225 , fDisconnected(false)
226 , fProtectedContext(backendContext.fProtectedContext)
227 , fDeviceLostContext(backendContext.fDeviceLostContext)
228 , fDeviceLostProc(backendContext.fDeviceLostProc) {
229 SkASSERT(fMemoryAllocator);
230 SkASSERT(fMemoryAllocatorCacheImage);
231
232 this->initCaps(fVkCaps);
233
234 VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
235 VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
236
237 fResourceProvider.init();
238
239 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
240 if (fMainCmdPool) {
241 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
242 SkASSERT(this->currentCommandBuffer());
243 this->currentCommandBuffer()->begin(this);
244 }
245 }
246
destroyResources()247 void GrVkGpu::destroyResources() {
248 if (fMainCmdPool) {
249 fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
250 fMainCmdPool->close();
251 }
252
253 // wait for all commands to finish
254 this->finishOutstandingGpuWork();
255
256 if (fMainCmdPool) {
257 fMainCmdPool->unref();
258 fMainCmdPool = nullptr;
259 }
260
261 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
262 fSemaphoresToWaitOn[i]->unref();
263 }
264 fSemaphoresToWaitOn.clear();
265
266 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
267 fSemaphoresToSignal[i]->unref();
268 }
269 fSemaphoresToSignal.clear();
270
271 fStagingBufferManager.reset();
272
273 fMSAALoadManager.destroyResources(this);
274
275 // must call this just before we destroy the command pool and VkDevice
276 fResourceProvider.destroyResources();
277 }
278
~GrVkGpu()279 GrVkGpu::~GrVkGpu() {
280 if (!fDisconnected) {
281 this->destroyResources();
282 }
283 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
284 // clients can continue to delete backend textures even after a context has been abandoned.
285 fMemoryAllocator.reset();
286 fMemoryAllocatorCacheImage.reset();
287 }
288
289
disconnect(DisconnectType type)290 void GrVkGpu::disconnect(DisconnectType type) {
291 INHERITED::disconnect(type);
292 if (!fDisconnected) {
293 this->destroyResources();
294
295 fSemaphoresToWaitOn.clear();
296 fSemaphoresToSignal.clear();
297 fMainCmdBuffer = nullptr;
298 fDisconnected = true;
299 }
300 }
301
pipelineBuilder()302 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
303 return fResourceProvider.pipelineStateCache();
304 }
305
refPipelineBuilder()306 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
307 return fResourceProvider.refPipelineStateCache();
308 }
309
310 ///////////////////////////////////////////////////////////////////////////////
311
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)312 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
313 GrRenderTarget* rt,
314 bool useMSAASurface,
315 GrAttachment* stencil,
316 GrSurfaceOrigin origin,
317 const SkIRect& bounds,
318 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
319 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
320 const TArray<GrSurfaceProxy*, true>& sampledProxies,
321 GrXferBarrierFlags renderPassXferBarriers) {
322 if (!fCachedOpsRenderPass) {
323 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
324 }
325
326 // For the given render target and requested render pass features we need to find a compatible
327 // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
328 // is compatible, but that is part of the framebuffer that we get here.
329 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
330
331 SkASSERT(!useMSAASurface ||
332 rt->numSamples() > 1 ||
333 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
334 vkRT->resolveAttachment() &&
335 vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
336
337 // Covert the GrXferBarrierFlags into render pass self dependency flags
338 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
339 if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
340 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
341 }
342 if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
343 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
344 }
345
346 // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
347 // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
348 // case we also need to update the color load/store ops since we don't want to ever load or
349 // store the msaa color attachment, but may need to for the resolve attachment.
350 GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
351 bool withResolve = false;
352 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
353 GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
354 if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
355 withResolve = true;
356 localColorInfo.fStoreOp = GrStoreOp::kDiscard;
357 if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
358 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
359 localColorInfo.fLoadOp = GrLoadOp::kDiscard;
360 } else {
361 resolveInfo.fLoadOp = GrLoadOp::kDiscard;
362 }
363 }
364
365 // Get the framebuffer to use for the render pass
366 sk_sp<GrVkFramebuffer> framebuffer;
367 if (vkRT->wrapsSecondaryCommandBuffer()) {
368 framebuffer = vkRT->externalFramebuffer();
369 } else {
370 auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
371 loadFromResolve);
372 framebuffer = sk_ref_sp(fb);
373 }
374 if (!framebuffer) {
375 return nullptr;
376 }
377
378 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
379 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
380 sampledProxies)) {
381 return nullptr;
382 }
383 return fCachedOpsRenderPass.get();
384 }
385
submitCommandBuffer(const GrSubmitInfo & submitInfo)386 bool GrVkGpu::submitCommandBuffer(const GrSubmitInfo& submitInfo) {
387 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
388 if (!this->currentCommandBuffer()) {
389 return false;
390 }
391 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
392
393 if (!this->currentCommandBuffer()->hasWork() && submitInfo.fSync == GrSyncCpu::kNo &&
394 fSemaphoresToSignal.empty() && fSemaphoresToWaitOn.empty()) {
395 // We may have added finished procs during the flush call. Since there is no actual work
396 // we are not submitting the command buffer and may never come back around to submit it.
397 // Thus we call all current finished procs manually, since the work has technically
398 // finished.
399 this->currentCommandBuffer()->callFinishedProcs();
400 SkASSERT(fDrawables.empty());
401 fResourceProvider.checkCommandBuffers();
402 return true;
403 }
404
405 fMainCmdBuffer->end(this);
406 SkASSERT(fMainCmdPool);
407 fMainCmdPool->close();
408 bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
409 fSemaphoresToWaitOn, submitInfo);
410
411 if (didSubmit && submitInfo.fSync == GrSyncCpu::kYes) {
412 fMainCmdBuffer->forceSync(this);
413 }
414
415 // We must delete any drawables that had to wait until submit to destroy.
416 fDrawables.clear();
417
418 // If we didn't submit the command buffer then we did not wait on any semaphores. We will
419 // continue to hold onto these semaphores and wait on them during the next command buffer
420 // submission.
421 if (didSubmit) {
422 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
423 fSemaphoresToWaitOn[i]->unref();
424 }
425 fSemaphoresToWaitOn.clear();
426 }
427
428 // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
429 // not try to recover the work that wasn't submitted and instead just drop it all. The client
430 // will be notified that the semaphores were not submit so that they will not try to wait on
431 // them.
432 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
433 fSemaphoresToSignal[i]->unref();
434 }
435 fSemaphoresToSignal.clear();
436
437 // Release old command pool and create a new one
438 fMainCmdPool->unref();
439 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
440 if (fMainCmdPool) {
441 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
442 SkASSERT(fMainCmdBuffer);
443 fMainCmdBuffer->begin(this);
444 } else {
445 fMainCmdBuffer = nullptr;
446 }
447 // We must wait to call checkCommandBuffers until after we get a new command buffer. The
448 // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
449 // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
450 // one that was just submitted.
451 fResourceProvider.checkCommandBuffers();
452 return didSubmit;
453 }
454
455 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern)456 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size,
457 GrGpuBufferType type,
458 GrAccessPattern accessPattern) {
459 #ifdef SK_DEBUG
460 switch (type) {
461 case GrGpuBufferType::kVertex:
462 case GrGpuBufferType::kIndex:
463 case GrGpuBufferType::kDrawIndirect:
464 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
465 accessPattern == kStatic_GrAccessPattern);
466 break;
467 case GrGpuBufferType::kXferCpuToGpu:
468 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
469 break;
470 case GrGpuBufferType::kXferGpuToCpu:
471 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
472 accessPattern == kStream_GrAccessPattern);
473 break;
474 case GrGpuBufferType::kUniform:
475 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
476 break;
477 }
478 #endif
479 return GrVkBuffer::Make(this, size, type, accessPattern);
480 }
481
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)482 bool GrVkGpu::onWritePixels(GrSurface* surface,
483 SkIRect rect,
484 GrColorType surfaceColorType,
485 GrColorType srcColorType,
486 const GrMipLevel texels[],
487 int mipLevelCount,
488 bool prepForTexSampling) {
489 GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
490 if (!texture) {
491 return false;
492 }
493 GrVkImage* texImage = texture->textureImage();
494
495 // Make sure we have at least the base level
496 if (!mipLevelCount || !texels[0].fPixels) {
497 return false;
498 }
499
500 SkASSERT(!skgpu::VkFormatIsCompressed(texImage->imageFormat()));
501 bool success = false;
502 bool linearTiling = texImage->isLinearTiled();
503 if (linearTiling) {
504 if (mipLevelCount > 1) {
505 SkDebugf("Can't upload mipmap data to linear tiled texture");
506 return false;
507 }
508 if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
509 // Need to change the layout to general in order to perform a host write
510 texImage->setImageLayout(this,
511 VK_IMAGE_LAYOUT_GENERAL,
512 VK_ACCESS_HOST_WRITE_BIT,
513 VK_PIPELINE_STAGE_HOST_BIT,
514 false);
515 GrSubmitInfo submitInfo;
516 submitInfo.fSync = GrSyncCpu::kYes;
517 if (!this->submitCommandBuffer(submitInfo)) {
518 return false;
519 }
520 }
521 success = this->uploadTexDataLinear(texImage,
522 rect,
523 srcColorType,
524 texels[0].fPixels,
525 texels[0].fRowBytes);
526 } else {
527 SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
528 success = this->uploadTexDataOptimal(texImage,
529 rect,
530 srcColorType,
531 texels,
532 mipLevelCount);
533 if (1 == mipLevelCount) {
534 texture->markMipmapsDirty();
535 }
536 }
537
538 if (prepForTexSampling) {
539 texImage->setImageLayout(this,
540 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
541 VK_ACCESS_SHADER_READ_BIT,
542 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
543 false);
544 }
545
546 return success;
547 }
548
549 // When we update vertex/index buffers via transfers we assume that they may have been used
550 // previously in draws and will be used again in draws afterwards. So we put a barrier before and
551 // after. If we had a mechanism for gathering the buffers that will be used in a GrVkOpsRenderPass
552 // *before* we begin a subpass we could do this lazily and non-redundantly by tracking the "last
553 // usage" on the GrVkBuffer. Then Pass 1 draw, xfer, xfer, xfer, Pass 2 draw would insert just two
554 // barriers: one before the first xfer and one before Pass 2. Currently, we'd use six barriers.
555 // Pass false as "after" before the transfer and true after the transfer.
add_transfer_dst_buffer_mem_barrier(GrVkGpu * gpu,GrVkBuffer * dst,size_t offset,size_t size,bool after)556 static void add_transfer_dst_buffer_mem_barrier(GrVkGpu* gpu,
557 GrVkBuffer* dst,
558 size_t offset,
559 size_t size,
560 bool after) {
561 if (dst->intendedType() != GrGpuBufferType::kIndex &&
562 dst->intendedType() != GrGpuBufferType::kVertex) {
563 return;
564 }
565
566 VkAccessFlags srcAccessMask = dst->intendedType() == GrGpuBufferType::kIndex
567 ? VK_ACCESS_INDEX_READ_BIT
568 : VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
569 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
570
571 VkPipelineStageFlagBits srcPipelineStageFlags = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
572 VkPipelineStageFlagBits dstPipelineStageFlags = VK_PIPELINE_STAGE_TRANSFER_BIT;
573
574 if (after) {
575 using std::swap;
576 swap(srcAccessMask, dstAccessMask );
577 swap(srcPipelineStageFlags, dstPipelineStageFlags);
578 }
579
580 VkBufferMemoryBarrier bufferMemoryBarrier = {
581 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
582 nullptr, // pNext
583 srcAccessMask, // srcAccessMask
584 dstAccessMask, // dstAccessMask
585 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
586 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
587 dst->vkBuffer(), // buffer
588 offset, // offset
589 size, // size
590 };
591
592 gpu->addBufferMemoryBarrier(srcPipelineStageFlags,
593 dstPipelineStageFlags,
594 /*byRegion=*/false,
595 &bufferMemoryBarrier);
596 }
597
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)598 bool GrVkGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
599 size_t srcOffset,
600 sk_sp<GrGpuBuffer> dst,
601 size_t dstOffset,
602 size_t size) {
603 if (!this->currentCommandBuffer()) {
604 return false;
605 }
606
607 VkBufferCopy copyRegion;
608 copyRegion.srcOffset = srcOffset;
609 copyRegion.dstOffset = dstOffset;
610 copyRegion.size = size;
611
612 add_transfer_dst_buffer_mem_barrier(this,
613 static_cast<GrVkBuffer*>(dst.get()),
614 dstOffset,
615 size,
616 /*after=*/false);
617 this->currentCommandBuffer()->copyBuffer(this, std::move(src), dst, 1, ©Region);
618 add_transfer_dst_buffer_mem_barrier(this,
619 static_cast<GrVkBuffer*>(dst.get()),
620 dstOffset,
621 size,
622 /*after=*/true);
623
624 return true;
625 }
626
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)627 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
628 SkIRect rect,
629 GrColorType surfaceColorType,
630 GrColorType bufferColorType,
631 sk_sp<GrGpuBuffer> transferBuffer,
632 size_t bufferOffset,
633 size_t rowBytes) {
634 if (!this->currentCommandBuffer()) {
635 return false;
636 }
637
638 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
639 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
640 return false;
641 }
642
643 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
644 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
645 return false;
646 }
647 GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
648 if (!tex) {
649 return false;
650 }
651 GrVkImage* vkImage = tex->textureImage();
652 VkFormat format = vkImage->imageFormat();
653
654 // Can't transfer compressed data
655 SkASSERT(!skgpu::VkFormatIsCompressed(format));
656
657 if (!transferBuffer) {
658 return false;
659 }
660
661 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
662 return false;
663 }
664 SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
665
666 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
667
668 // Set up copy region
669 VkBufferImageCopy region;
670 memset(®ion, 0, sizeof(VkBufferImageCopy));
671 region.bufferOffset = bufferOffset;
672 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
673 region.bufferImageHeight = 0;
674 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
675 region.imageOffset = { rect.left(), rect.top(), 0 };
676 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
677
678 // Change layout of our target so it can be copied to
679 vkImage->setImageLayout(this,
680 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
681 VK_ACCESS_TRANSFER_WRITE_BIT,
682 VK_PIPELINE_STAGE_TRANSFER_BIT,
683 false);
684
685 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
686
687 // Copy the buffer to the image.
688 this->currentCommandBuffer()->copyBufferToImage(this,
689 vkBuffer->vkBuffer(),
690 vkImage,
691 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
692 1,
693 ®ion);
694 this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
695
696 tex->markMipmapsDirty();
697 return true;
698 }
699
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)700 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
701 SkIRect rect,
702 GrColorType surfaceColorType,
703 GrColorType bufferColorType,
704 sk_sp<GrGpuBuffer> transferBuffer,
705 size_t offset) {
706 if (!this->currentCommandBuffer()) {
707 return false;
708 }
709 SkASSERT(surface);
710 SkASSERT(transferBuffer);
711 if (fProtectedContext == GrProtected::kYes) {
712 return false;
713 }
714
715 GrVkImage* srcImage;
716 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
717 // Reading from render targets that wrap a secondary command buffer is not allowed since
718 // it would require us to know the VkImage, which we don't have, as well as need us to
719 // stop and start the VkRenderPass which we don't have access to.
720 if (rt->wrapsSecondaryCommandBuffer()) {
721 return false;
722 }
723 if (!rt->nonMSAAAttachment()) {
724 return false;
725 }
726 srcImage = rt->nonMSAAAttachment();
727 } else {
728 SkASSERT(surface->asTexture());
729 srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
730 }
731
732 VkFormat format = srcImage->imageFormat();
733 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
734 return false;
735 }
736 SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
737
738 // Set up copy region
739 VkBufferImageCopy region;
740 memset(®ion, 0, sizeof(VkBufferImageCopy));
741 region.bufferOffset = offset;
742 region.bufferRowLength = rect.width();
743 region.bufferImageHeight = 0;
744 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
745 region.imageOffset = {rect.left(), rect.top(), 0};
746 region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
747
748 srcImage->setImageLayout(this,
749 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
750 VK_ACCESS_TRANSFER_READ_BIT,
751 VK_PIPELINE_STAGE_TRANSFER_BIT,
752 false);
753
754 this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
755 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
756 transferBuffer, 1, ®ion);
757
758 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
759 // Make sure the copy to buffer has finished.
760 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
761 VK_ACCESS_HOST_READ_BIT,
762 VK_PIPELINE_STAGE_TRANSFER_BIT,
763 VK_PIPELINE_STAGE_HOST_BIT,
764 false);
765 return true;
766 }
767
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)768 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
769 const SkIPoint& dstPoint) {
770 if (!this->currentCommandBuffer()) {
771 return;
772 }
773
774 SkASSERT(dst);
775 SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
776
777 VkImageResolve resolveInfo;
778 resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
779 resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
780 resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
781 resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
782 resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
783
784 GrVkImage* dstImage;
785 GrRenderTarget* dstRT = dst->asRenderTarget();
786 GrTexture* dstTex = dst->asTexture();
787 if (dstTex) {
788 dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
789 } else {
790 SkASSERT(dst->asRenderTarget());
791 dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
792 }
793 SkASSERT(dstImage);
794
795 dstImage->setImageLayout(this,
796 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
797 VK_ACCESS_TRANSFER_WRITE_BIT,
798 VK_PIPELINE_STAGE_TRANSFER_BIT,
799 false);
800
801 src->colorAttachment()->setImageLayout(this,
802 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
803 VK_ACCESS_TRANSFER_READ_BIT,
804 VK_PIPELINE_STAGE_TRANSFER_BIT,
805 false);
806 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
807 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
808 this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
809 &resolveInfo);
810 }
811
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)812 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
813 SkASSERT(target->numSamples() > 1);
814 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
815 SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
816
817 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
818 // We would have resolved the RT during the render pass;
819 return;
820 }
821
822 this->resolveImage(target, rt, resolveRect,
823 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
824 }
825
uploadTexDataLinear(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const void * data,size_t rowBytes)826 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
827 SkIRect rect,
828 GrColorType dataColorType,
829 const void* data,
830 size_t rowBytes) {
831 SkASSERT(data);
832 SkASSERT(texImage->isLinearTiled());
833
834 SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
835
836 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
837 size_t trimRowBytes = rect.width() * bpp;
838
839 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
840 VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
841 const VkImageSubresource subres = {
842 VK_IMAGE_ASPECT_COLOR_BIT,
843 0, // mipLevel
844 0, // arraySlice
845 };
846 VkSubresourceLayout layout;
847
848 const skgpu::VulkanInterface* interface = this->vkInterface();
849
850 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
851 texImage->image(),
852 &subres,
853 &layout));
854
855 const skgpu::VulkanAlloc& alloc = texImage->alloc();
856 if (VK_NULL_HANDLE == alloc.fMemory) {
857 return false;
858 }
859 VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
860 VkDeviceSize size = rect.height()*layout.rowPitch;
861 SkASSERT(size + offset <= alloc.fSize);
862 auto checkResult = [this](VkResult result) {
863 return this->checkVkResult(result);
864 };
865 auto allocator = this->memoryAllocator();
866 void* mapPtr = skgpu::VulkanMemory::MapAlloc(allocator, alloc, checkResult);
867 if (!mapPtr) {
868 return false;
869 }
870 mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
871
872 SkRectMemcpy(mapPtr,
873 static_cast<size_t>(layout.rowPitch),
874 data,
875 rowBytes,
876 trimRowBytes,
877 rect.height());
878
879 skgpu::VulkanMemory::FlushMappedAlloc(allocator, alloc, offset, size, checkResult);
880 skgpu::VulkanMemory::UnmapAlloc(allocator, alloc);
881
882 return true;
883 }
884
885 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
886 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,TArray<VkBufferImageCopy> * regions,TArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkTextureCompressionType compression,VkFormat vkFormat,SkISize dimensions,skgpu::Mipmapped mipmapped)887 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
888 TArray<VkBufferImageCopy>* regions,
889 TArray<size_t>* individualMipOffsets,
890 GrStagingBufferManager::Slice* slice,
891 SkTextureCompressionType compression,
892 VkFormat vkFormat,
893 SkISize dimensions,
894 skgpu::Mipmapped mipmapped) {
895 SkASSERT(compression != SkTextureCompressionType::kNone);
896 int numMipLevels = 1;
897 if (mipmapped == skgpu::Mipmapped::kYes) {
898 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
899 }
900
901 regions->reserve_exact(regions->size() + numMipLevels);
902 individualMipOffsets->reserve_exact(individualMipOffsets->size() + numMipLevels);
903
904 size_t bytesPerBlock = skgpu::VkFormatBytesPerBlock(vkFormat);
905
906 size_t bufferSize = SkCompressedDataSize(
907 compression, dimensions, individualMipOffsets, mipmapped == skgpu::Mipmapped::kYes);
908 SkASSERT(individualMipOffsets->size() == numMipLevels);
909
910 // Get a staging buffer slice to hold our mip data.
911 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
912 size_t alignment = bytesPerBlock;
913 switch (alignment & 0b11) {
914 case 0: break; // alignment is already a multiple of 4.
915 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
916 default: alignment *= 4; break; // alignment is not a multiple of 2.
917 }
918 *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
919 if (!slice->fBuffer) {
920 return 0;
921 }
922
923 for (int i = 0; i < numMipLevels; ++i) {
924 VkBufferImageCopy& region = regions->push_back();
925 memset(®ion, 0, sizeof(VkBufferImageCopy));
926 region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
927 SkISize revisedDimensions = skgpu::CompressedDimensions(compression, dimensions);
928 region.bufferRowLength = revisedDimensions.width();
929 region.bufferImageHeight = revisedDimensions.height();
930 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
931 region.imageOffset = {0, 0, 0};
932 region.imageExtent = {SkToU32(dimensions.width()),
933 SkToU32(dimensions.height()), 1};
934
935 dimensions = {std::max(1, dimensions.width() /2),
936 std::max(1, dimensions.height()/2)};
937 }
938
939 return bufferSize;
940 }
941
fill_in_compressed_regions(TArray<VkBufferImageCopy> * regions,TArray<size_t> * individualMipOffsets,SkTextureCompressionType compression,SkISize dimensions,skgpu::Mipmapped mipmapped)942 static size_t fill_in_compressed_regions(TArray<VkBufferImageCopy>* regions,
943 TArray<size_t>* individualMipOffsets,
944 SkTextureCompressionType compression,
945 SkISize dimensions,
946 skgpu::Mipmapped mipmapped) {
947 SkASSERT(regions);
948 SkASSERT(individualMipOffsets);
949 SkASSERT(compression != SkTextureCompressionType::kNone);
950
951 int mipmapLevelCount = 1;
952 if (mipmapped == skgpu::Mipmapped::kYes) {
953 mipmapLevelCount = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
954 }
955
956 regions->reserve_exact(regions->size() + mipmapLevelCount);
957 individualMipOffsets->reserve_exact(individualMipOffsets->size() + mipmapLevelCount);
958
959 size_t bufferSize = SkCompressedDataSize(compression,
960 dimensions,
961 individualMipOffsets,
962 mipmapped == skgpu::Mipmapped::kYes);
963 SkASSERT(individualMipOffsets->count() == numMipLevels);
964
965 for (int i = 0; i < mipmapLevelCount; ++i) {
966 VkBufferImageCopy& region = regions->push_back();
967 region.bufferOffset = (*individualMipOffsets)[i];
968 if (compression == SkTextureCompressionType::kASTC_RGBA8_4x4 ||
969 compression == SkTextureCompressionType::kASTC_RGBA8_6x6 ||
970 compression == SkTextureCompressionType::kASTC_RGBA8_8x8) {
971 region.bufferOffset += ASTC_HEADER_SIZE;
972 }
973 SkISize compressedDimensions = skgpu::CompressedDimensions(compression, dimensions);
974 region.bufferRowLength = compressedDimensions.width();
975 region.bufferImageHeight = compressedDimensions.height();
976 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
977 region.imageOffset = {0, 0, 0};
978 region.imageExtent.width = SkToU32(dimensions.width());
979 region.imageExtent.height = SkToU32(dimensions.height());
980 region.imageExtent.depth = 1;
981 dimensions = {std::max(1, dimensions.width() / 2),
982 std::max(1, dimensions.height() / 2)};
983 }
984
985 return bufferSize;
986 }
987
uploadTexDataOptimal(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)988 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
989 SkIRect rect,
990 GrColorType dataColorType,
991 const GrMipLevel texels[],
992 int mipLevelCount) {
993 if (!this->currentCommandBuffer()) {
994 return false;
995 }
996
997 SkASSERT(!texImage->isLinearTiled());
998 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
999 SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
1000
1001 // We assume that if the texture has mip levels, we either upload to all the levels or just the
1002 // first.
1003 SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
1004
1005 SkASSERT(!rect.isEmpty());
1006
1007 SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
1008
1009 SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
1010 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
1011
1012 // texels is const.
1013 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
1014 // Because of this we need to make a non-const shallow copy of texels.
1015 AutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
1016 std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
1017
1018 TArray<size_t> individualMipOffsets;
1019 size_t combinedBufferSize;
1020 if (mipLevelCount > 1) {
1021 combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
1022 rect.size(),
1023 &individualMipOffsets,
1024 mipLevelCount);
1025 } else {
1026 SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
1027 combinedBufferSize = rect.width()*rect.height()*bpp;
1028 individualMipOffsets.push_back(0);
1029 }
1030 SkASSERT(combinedBufferSize);
1031
1032 // Get a staging buffer slice to hold our mip data.
1033 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
1034 size_t alignment = bpp;
1035 switch (alignment & 0b11) {
1036 case 0: break; // alignment is already a multiple of 4.
1037 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
1038 default: alignment *= 4; break; // alignment is not a multiple of 2.
1039 }
1040 GrStagingBufferManager::Slice slice =
1041 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1042 if (!slice.fBuffer) {
1043 return false;
1044 }
1045
1046 int uploadLeft = rect.left();
1047 int uploadTop = rect.top();
1048
1049 char* buffer = (char*) slice.fOffsetMapPtr;
1050 TArray<VkBufferImageCopy> regions(mipLevelCount);
1051
1052 int currentWidth = rect.width();
1053 int currentHeight = rect.height();
1054 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1055 if (texelsShallowCopy[currentMipLevel].fPixels) {
1056 const size_t trimRowBytes = currentWidth * bpp;
1057 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1058
1059 // copy data into the buffer, skipping the trailing bytes
1060 char* dst = buffer + individualMipOffsets[currentMipLevel];
1061 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1062 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
1063
1064 VkBufferImageCopy& region = regions.push_back();
1065 memset(®ion, 0, sizeof(VkBufferImageCopy));
1066 region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1067 region.bufferRowLength = currentWidth;
1068 region.bufferImageHeight = currentHeight;
1069 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1070 region.imageOffset = {uploadLeft, uploadTop, 0};
1071 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1072 }
1073
1074 currentWidth = std::max(1, currentWidth/2);
1075 currentHeight = std::max(1, currentHeight/2);
1076 }
1077
1078 // Change layout of our target so it can be copied to
1079 texImage->setImageLayout(this,
1080 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1081 VK_ACCESS_TRANSFER_WRITE_BIT,
1082 VK_PIPELINE_STAGE_TRANSFER_BIT,
1083 false);
1084
1085 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1086 // because we don't need the command buffer to ref the buffer here. The reason being is that
1087 // the buffer is coming from the staging manager and the staging manager will make sure the
1088 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1089 // upload in the frame.
1090 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1091 this->currentCommandBuffer()->copyBufferToImage(this,
1092 vkBuffer->vkBuffer(),
1093 texImage,
1094 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1095 regions.size(),
1096 regions.begin());
1097 return true;
1098 }
1099
1100 // It's probably possible to roll this into uploadTexDataOptimal,
1101 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage * uploadTexture,SkTextureCompressionType compression,VkFormat vkFormat,SkISize dimensions,skgpu::Mipmapped mipmapped,const void * data,size_t dataSize)1102 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1103 SkTextureCompressionType compression,
1104 VkFormat vkFormat,
1105 SkISize dimensions,
1106 skgpu::Mipmapped mipmapped,
1107 const void* data,
1108 size_t dataSize) {
1109 if (!this->currentCommandBuffer()) {
1110 return false;
1111 }
1112 SkASSERT(data);
1113 SkASSERT(!uploadTexture->isLinearTiled());
1114 // For now the assumption is that our rect is the entire texture.
1115 // Compressed textures are read-only so this should be a reasonable assumption.
1116 SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1117 dimensions.fHeight == uploadTexture->height());
1118
1119 if (dimensions.fWidth == 0 || dimensions.fHeight == 0) {
1120 return false;
1121 }
1122
1123 SkASSERT(uploadTexture->imageFormat() == vkFormat);
1124 SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1125
1126
1127 GrStagingBufferManager::Slice slice;
1128 TArray<VkBufferImageCopy> regions;
1129 TArray<size_t> individualMipOffsets;
1130 SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1131 ®ions,
1132 &individualMipOffsets,
1133 &slice,
1134 compression,
1135 vkFormat,
1136 dimensions,
1137 mipmapped);
1138 if (!slice.fBuffer) {
1139 return false;
1140 }
1141 SkASSERT(dataSize == combinedBufferSize);
1142
1143 {
1144 char* buffer = (char*)slice.fOffsetMapPtr;
1145 memcpy(buffer, data, dataSize);
1146 }
1147
1148 // Change layout of our target so it can be copied to
1149 uploadTexture->setImageLayout(this,
1150 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1151 VK_ACCESS_TRANSFER_WRITE_BIT,
1152 VK_PIPELINE_STAGE_TRANSFER_BIT,
1153 false);
1154
1155 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1156 // because we don't need the command buffer to ref the buffer here. The reason being is that
1157 // the buffer is coming from the staging manager and the staging manager will make sure the
1158 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1159 // upload in the frame.
1160 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1161 this->currentCommandBuffer()->copyBufferToImage(this,
1162 vkBuffer->vkBuffer(),
1163 uploadTexture,
1164 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1165 regions.size(),
1166 regions.begin());
1167
1168 return true;
1169 }
1170
uploadTexDataCompressed(GrVkImage * uploadTexture,SkTextureCompressionType compression,VkFormat vkFormat,SkISize dimensions,skgpu::Mipmapped mipMapped,OH_NativeBuffer * nativeBuffer,size_t bufferSize)1171 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1172 SkTextureCompressionType compression, VkFormat vkFormat,
1173 SkISize dimensions, skgpu::Mipmapped mipMapped,
1174 OH_NativeBuffer* nativeBuffer, size_t bufferSize) {
1175 if (!this->currentCommandBuffer()) {
1176 return false;
1177 }
1178 SkASSERT(uploadTexture);
1179 SkASSERT(nativeBuffer);
1180 SkASSERT(!uploadTexture->isLinearTiled());
1181
1182 if (dimensions.width() == 0 || dimensions.height() == 0) {
1183 return false;
1184 }
1185 SkASSERT(dimensions.width() == uploadTexture->width() && dimensions.height() == uploadTexture->height());
1186
1187 SkASSERT(uploadTexture->imageFormat() == vkFormat);
1188 SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1189
1190 TArray<VkBufferImageCopy> regions;
1191 TArray<size_t> individualMipOffsets;
1192 SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(®ions, &individualMipOffsets,
1193 compression, dimensions, mipMapped);
1194 SkASSERT(bufferSize == combinedBufferSize);
1195
1196 // Import external memory.
1197 sk_sp<GrVkBuffer> vkBuffer = GrVkBuffer::MakeFromOHNativeBuffer(this, nativeBuffer, bufferSize,
1198 GrGpuBufferType::kXferCpuToGpu,
1199 kDynamic_GrAccessPattern);
1200
1201 if (vkBuffer == nullptr) {
1202 SkDebugf("Can't make vkbuffer from native buffer");
1203 return false;
1204 }
1205
1206 // Change image layout so it can be copied to.
1207 uploadTexture->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1208 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1209
1210 // Copy the buffer to the image.
1211 this->currentCommandBuffer()->copyBufferToImage(this, vkBuffer->vkBuffer(), uploadTexture,
1212 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1213 regions.size(), regions.begin());
1214 this->takeOwnershipOfBuffer(std::move(vkBuffer));
1215
1216 return true;
1217 }
1218
1219 ////////////////////////////////////////////////////////////////////////////////
1220 // TODO: make this take a skgpu::Mipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)1221 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1222 const GrBackendFormat& format,
1223 GrRenderable renderable,
1224 int renderTargetSampleCnt,
1225 skgpu::Budgeted budgeted,
1226 GrProtected isProtected,
1227 int mipLevelCount,
1228 uint32_t levelClearMask,
1229 std::string_view label) {
1230 VkFormat pixelFormat;
1231 SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1232 SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1233 SkASSERT(mipLevelCount > 0);
1234
1235 HITRACE_OHOS_NAME_FMT_ALWAYS("onCreateTexture width = %d, height = %d",
1236 dimensions.width(), dimensions.height());
1237 GrMipmapStatus mipmapStatus =
1238 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1239
1240 sk_sp<GrVkTexture> tex;
1241 if (renderable == GrRenderable::kYes) {
1242 tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1243 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1244 mipmapStatus, isProtected, label);
1245 } else {
1246 tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1247 mipLevelCount, isProtected, mipmapStatus, label);
1248 }
1249
1250 if (!tex) {
1251 return nullptr;
1252 }
1253
1254 if (levelClearMask) {
1255 if (!this->currentCommandBuffer()) {
1256 return nullptr;
1257 }
1258 STArray<1, VkImageSubresourceRange> ranges;
1259 bool inRange = false;
1260 GrVkImage* texImage = tex->textureImage();
1261 for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1262 if (levelClearMask & (1U << i)) {
1263 if (inRange) {
1264 ranges.back().levelCount++;
1265 } else {
1266 auto& range = ranges.push_back();
1267 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1268 range.baseArrayLayer = 0;
1269 range.baseMipLevel = i;
1270 range.layerCount = 1;
1271 range.levelCount = 1;
1272 inRange = true;
1273 }
1274 } else if (inRange) {
1275 inRange = false;
1276 }
1277 }
1278 SkASSERT(!ranges.empty());
1279 static constexpr VkClearColorValue kZeroClearColor = {};
1280 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1281 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1282 this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1283 ranges.size(), ranges.begin());
1284 }
1285 return tex;
1286 }
1287
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)1288 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1289 const GrBackendFormat& format,
1290 skgpu::Budgeted budgeted,
1291 skgpu::Mipmapped mipmapped,
1292 GrProtected isProtected,
1293 const void* data,
1294 size_t dataSize) {
1295 VkFormat pixelFormat;
1296 SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1297 SkASSERT(skgpu::VkFormatIsCompressed(pixelFormat));
1298
1299 int numMipLevels = 1;
1300 if (mipmapped == skgpu::Mipmapped::kYes) {
1301 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1302 }
1303
1304 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
1305 ? GrMipmapStatus::kValid
1306 : GrMipmapStatus::kNotAllocated;
1307
1308 auto tex = GrVkTexture::MakeNewTexture(this,
1309 budgeted,
1310 dimensions,
1311 pixelFormat,
1312 numMipLevels,
1313 isProtected,
1314 mipmapStatus,
1315 /*label=*/"VkGpu_CreateCompressedTexture");
1316 if (!tex) {
1317 return nullptr;
1318 }
1319
1320 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1321 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1322 dimensions, mipmapped, data, dataSize)) {
1323 return nullptr;
1324 }
1325
1326 return tex;
1327 }
1328
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipMapped,GrProtected isProtected,OH_NativeBuffer * nativeBuffer,size_t bufferSize)1329 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1330 const GrBackendFormat& format,
1331 skgpu::Budgeted budgeted,
1332 skgpu::Mipmapped mipMapped,
1333 GrProtected isProtected,
1334 OH_NativeBuffer* nativeBuffer,
1335 size_t bufferSize) {
1336 VkFormat pixelFormat;
1337 SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1338 SkASSERT(skgpu::VkFormatIsCompressed(pixelFormat));
1339
1340 int mipmapLevelCount = 1;
1341 GrMipmapStatus mipmapStatus = GrMipmapStatus::kNotAllocated;
1342 if (mipMapped == skgpu::Mipmapped::kYes) {
1343 mipmapLevelCount = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1344 mipmapStatus = GrMipmapStatus::kValid;
1345 }
1346
1347 sk_sp<GrVkTexture> texture = GrVkTexture::MakeNewTexture(
1348 this, budgeted, dimensions, pixelFormat, mipmapLevelCount,
1349 isProtected, mipmapStatus, /*label=*/"VkGpu_CreateCompressedTextureFromOHNativeBuffer");
1350 if (!texture) {
1351 return nullptr;
1352 }
1353
1354 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1355 if (!this->uploadTexDataCompressed(texture->textureImage(), compression, pixelFormat,
1356 dimensions, mipMapped, nativeBuffer, bufferSize)) {
1357 return nullptr;
1358 }
1359
1360 return std::move(texture);
1361 }
1362
1363 ////////////////////////////////////////////////////////////////////////////////
1364
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1365 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1366 VkDeviceSize offset, VkDeviceSize size) {
1367 if (!this->currentCommandBuffer()) {
1368 return false;
1369 }
1370 add_transfer_dst_buffer_mem_barrier(this,
1371 static_cast<GrVkBuffer*>(buffer.get()),
1372 offset,
1373 size,
1374 /*after=*/false);
1375 this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1376 add_transfer_dst_buffer_mem_barrier(this,
1377 static_cast<GrVkBuffer*>(buffer.get()),
1378 offset,
1379 size,
1380 /*after=*/true);
1381
1382 return true;
1383 }
1384
zeroBuffer(sk_sp<GrGpuBuffer> buffer)1385 bool GrVkGpu::zeroBuffer(sk_sp<GrGpuBuffer> buffer) {
1386 if (!this->currentCommandBuffer()) {
1387 return false;
1388 }
1389
1390 add_transfer_dst_buffer_mem_barrier(this,
1391 static_cast<GrVkBuffer*>(buffer.get()),
1392 /*offset=*/0,
1393 buffer->size(),
1394 /*after=*/false);
1395 this->currentCommandBuffer()->fillBuffer(this,
1396 buffer,
1397 /*offset=*/0,
1398 buffer->size(),
1399 /*data=*/0);
1400 add_transfer_dst_buffer_mem_barrier(this,
1401 static_cast<GrVkBuffer*>(buffer.get()),
1402 /*offset=*/0,
1403 buffer->size(),
1404 /*after=*/true);
1405
1406 return true;
1407 }
1408
1409 ////////////////////////////////////////////////////////////////////////////////
1410
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1411 static bool check_image_info(const GrVkCaps& caps,
1412 const GrVkImageInfo& info,
1413 bool needsAllocation,
1414 uint32_t graphicsQueueIndex) {
1415 if (VK_NULL_HANDLE == info.fImage) {
1416 return false;
1417 }
1418
1419 if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1420 return false;
1421 }
1422
1423 if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1424 return false;
1425 }
1426
1427 if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1428 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1429 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1430 if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1431 if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1432 return false;
1433 }
1434 } else {
1435 return false;
1436 }
1437 }
1438
1439 if (info.fYcbcrConversionInfo.isValid()) {
1440 if (!caps.supportsYcbcrConversion()) {
1441 return false;
1442 }
1443 if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1444 return true;
1445 }
1446 }
1447
1448 // We currently require everything to be made with transfer bits set
1449 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1450 !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1451 return false;
1452 }
1453
1454 return true;
1455 }
1456
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1457 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1458 // We don't support directly importing multisampled textures for sampling from shaders.
1459 if (info.fSampleCount != 1) {
1460 return false;
1461 }
1462
1463 if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1464 return true;
1465 }
1466 if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1467 if (!caps.isVkFormatTexturable(info.fFormat)) {
1468 return false;
1469 }
1470 } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1471 if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1472 return false;
1473 }
1474 } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1475 if (!caps.supportsDRMFormatModifiers()) {
1476 return false;
1477 }
1478 // To be technically correct we should query the vulkan support for VkFormat and
1479 // drmFormatModifier pairs to confirm the required feature support is there. However, we
1480 // currently don't have our caps and format tables set up to do this effeciently. So
1481 // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1482 // up using valid features (checked below). In practice this should all be safe because
1483 // currently we are setting all drm format modifier textures to have a
1484 // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1485 // a shader. The video decoder isn't going to give us VkImages that don't support being
1486 // sampled.
1487 } else {
1488 SkUNREACHABLE;
1489 }
1490
1491 // We currently require all textures to be made with sample support
1492 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1493 return false;
1494 }
1495
1496 return true;
1497 }
1498
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1499 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1500 if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1501 return false;
1502 }
1503 if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1504 return false;
1505 }
1506 return true;
1507 }
1508
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1509 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1510 GrWrapOwnership ownership,
1511 GrWrapCacheable cacheable,
1512 GrIOType ioType) {
1513 GrVkImageInfo imageInfo;
1514 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1515 return nullptr;
1516 }
1517
1518 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1519 this->queueIndex())) {
1520 return nullptr;
1521 }
1522
1523 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1524 return nullptr;
1525 }
1526
1527 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1528 return nullptr;
1529 }
1530
1531 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1532 SkASSERT(mutableState);
1533 return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1534 ioType, imageInfo, std::move(mutableState));
1535 }
1536
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1537 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1538 GrWrapOwnership ownership,
1539 GrWrapCacheable cacheable) {
1540 return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1541 }
1542
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1543 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1544 int sampleCnt,
1545 GrWrapOwnership ownership,
1546 GrWrapCacheable cacheable) {
1547 GrVkImageInfo imageInfo;
1548 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1549 return nullptr;
1550 }
1551
1552 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1553 this->queueIndex())) {
1554 return nullptr;
1555 }
1556
1557 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1558 return nullptr;
1559 }
1560 // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1561 // the wrapped VkImage.
1562 bool resolveOnly = sampleCnt > 1;
1563 if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1564 return nullptr;
1565 }
1566
1567 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1568 return nullptr;
1569 }
1570
1571 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1572
1573 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1574 SkASSERT(mutableState);
1575
1576 return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1577 sampleCnt, ownership, cacheable,
1578 imageInfo,
1579 std::move(mutableState));
1580 }
1581
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1582 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1583 GrVkImageInfo info;
1584 if (!GrBackendRenderTargets::GetVkImageInfo(backendRT, &info)) {
1585 return nullptr;
1586 }
1587
1588 if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1589 return nullptr;
1590 }
1591
1592 // We will always render directly to this VkImage.
1593 static bool kResolveOnly = false;
1594 if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1595 return nullptr;
1596 }
1597
1598 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1599 return nullptr;
1600 }
1601
1602 sk_sp<skgpu::MutableTextureState> mutableState = backendRT.getMutableState();
1603 SkASSERT(mutableState);
1604
1605 sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1606 this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1607
1608 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1609 SkASSERT(!backendRT.stencilBits());
1610 if (tgt) {
1611 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1612 }
1613
1614 return tgt;
1615 }
1616
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1617 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1618 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1619 int maxSize = this->caps()->maxTextureSize();
1620 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1621 return nullptr;
1622 }
1623
1624 GrBackendFormat backendFormat = GrBackendFormats::MakeVk(vkInfo.fFormat);
1625 if (!backendFormat.isValid()) {
1626 return nullptr;
1627 }
1628 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1629 if (!sampleCnt) {
1630 return nullptr;
1631 }
1632
1633 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1634 }
1635
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkImage * src,const SkIRect & srcRect)1636 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1637 const GrVkRenderPass& renderPass,
1638 GrAttachment* dst,
1639 GrVkImage* src,
1640 const SkIRect& srcRect) {
1641 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1642 }
1643
onRegenerateMipMapLevels(GrTexture * tex)1644 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1645 if (!this->currentCommandBuffer()) {
1646 return false;
1647 }
1648 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1649 // don't do anything for linearly tiled textures (can't have mipmaps)
1650 if (vkTex->isLinearTiled()) {
1651 SkDebugf("Trying to create mipmap for linear tiled texture");
1652 return false;
1653 }
1654 SkASSERT(tex->textureType() == GrTextureType::k2D);
1655
1656 // determine if we can blit to and from this format
1657 const GrVkCaps& caps = this->vkCaps();
1658 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1659 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1660 !caps.mipmapSupport()) {
1661 return false;
1662 }
1663
1664 int width = tex->width();
1665 int height = tex->height();
1666 VkImageBlit blitRegion;
1667 memset(&blitRegion, 0, sizeof(VkImageBlit));
1668
1669 // SkMipmap doesn't include the base level in the level count so we have to add 1
1670 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1671 SkASSERT(levelCount == vkTex->mipLevels());
1672
1673 // change layout of the layers so we can write to them.
1674 vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1675 VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1676
1677 // setup memory barrier
1678 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1679 VkImageMemoryBarrier imageMemoryBarrier = {
1680 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1681 nullptr, // pNext
1682 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1683 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1684 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout
1685 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1686 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1687 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1688 vkTex->image(), // image
1689 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1690 };
1691
1692 // Blit the miplevels
1693 uint32_t mipLevel = 1;
1694 while (mipLevel < levelCount) {
1695 int prevWidth = width;
1696 int prevHeight = height;
1697 width = std::max(1, width / 2);
1698 height = std::max(1, height / 2);
1699
1700 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1701 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1702 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1703
1704 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1705 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1706 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1707 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1708 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1709 blitRegion.dstOffsets[1] = { width, height, 1 };
1710 this->currentCommandBuffer()->blitImage(this,
1711 vkTex->resource(),
1712 vkTex->image(),
1713 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1714 vkTex->resource(),
1715 vkTex->image(),
1716 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1717 1,
1718 &blitRegion,
1719 VK_FILTER_LINEAR);
1720 ++mipLevel;
1721 }
1722 if (levelCount > 1) {
1723 // This barrier logically is not needed, but it changes the final level to the same layout
1724 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1725 // layouts and future layout changes easier. The alternative here would be to track layout
1726 // and memory accesses per layer which doesn't seem work it.
1727 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1728 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1729 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1730 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1731 }
1732 return true;
1733 }
1734
1735 ////////////////////////////////////////////////////////////////////////////////
1736
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1737 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1738 SkISize dimensions, int numStencilSamples) {
1739 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1740
1741 fStats.incStencilAttachmentCreates();
1742 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1743 }
1744
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless memoryless)1745 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1746 const GrBackendFormat& format,
1747 int numSamples,
1748 GrProtected isProtected,
1749 GrMemoryless memoryless) {
1750 VkFormat pixelFormat;
1751 SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1752 SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1753 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1754
1755 fStats.incMSAAAttachmentCreates();
1756 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1757 }
1758
1759 ////////////////////////////////////////////////////////////////////////////////
1760
copy_src_data(char * mapPtr,VkFormat vkFormat,const TArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1761 bool copy_src_data(char* mapPtr,
1762 VkFormat vkFormat,
1763 const TArray<size_t>& individualMipOffsets,
1764 const GrPixmap srcData[],
1765 int numMipLevels) {
1766 SkASSERT(srcData && numMipLevels);
1767 SkASSERT(!skgpu::VkFormatIsCompressed(vkFormat));
1768 SkASSERT(individualMipOffsets.size() == numMipLevels);
1769 SkASSERT(mapPtr);
1770
1771 size_t bytesPerPixel = skgpu::VkFormatBytesPerBlock(vkFormat);
1772
1773 for (int level = 0; level < numMipLevels; ++level) {
1774 const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1775
1776 SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1777 srcData[level].addr(), srcData[level].rowBytes(),
1778 trimRB, srcData[level].height());
1779 }
1780 return true;
1781 }
1782
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrVkImageInfo * info,GrProtected isProtected)1783 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1784 SkISize dimensions,
1785 int sampleCnt,
1786 GrTexturable texturable,
1787 GrRenderable renderable,
1788 skgpu::Mipmapped mipmapped,
1789 GrVkImageInfo* info,
1790 GrProtected isProtected) {
1791 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1792
1793 if (fProtectedContext != isProtected) {
1794 return false;
1795 }
1796
1797 if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1798 return false;
1799 }
1800
1801 // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1802 if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1803 return false;
1804 }
1805
1806 if (renderable == GrRenderable::kYes) {
1807 sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1808 if (!sampleCnt) {
1809 return false;
1810 }
1811 }
1812
1813
1814 int numMipLevels = 1;
1815 if (mipmapped == skgpu::Mipmapped::kYes) {
1816 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1817 }
1818
1819 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1820 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1821 if (texturable == GrTexturable::kYes) {
1822 usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1823 }
1824 if (renderable == GrRenderable::kYes) {
1825 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1826 // We always make our render targets support being used as input attachments
1827 usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1828 }
1829
1830 GrVkImage::ImageDesc imageDesc;
1831 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1832 imageDesc.fFormat = vkFormat;
1833 imageDesc.fWidth = dimensions.width();
1834 imageDesc.fHeight = dimensions.height();
1835 imageDesc.fLevels = numMipLevels;
1836 imageDesc.fSamples = sampleCnt;
1837 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1838 imageDesc.fUsageFlags = usageFlags;
1839 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1840 imageDesc.fIsProtected = fProtectedContext;
1841
1842 if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1843 SkDebugf("Failed to init image info\n");
1844 return false;
1845 }
1846
1847 return true;
1848 }
1849
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)1850 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1851 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1852 std::array<float, 4> color) {
1853 GrVkImageInfo info;
1854 SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTexture, &info));
1855
1856 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1857 SkASSERT(mutableState);
1858 sk_sp<GrVkTexture> texture =
1859 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1860 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1861 kRW_GrIOType, info, std::move(mutableState));
1862 if (!texture) {
1863 return false;
1864 }
1865 GrVkImage* texImage = texture->textureImage();
1866
1867 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1868 if (!cmdBuffer) {
1869 return false;
1870 }
1871
1872 texImage->setImageLayout(this,
1873 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1874 VK_ACCESS_TRANSFER_WRITE_BIT,
1875 VK_PIPELINE_STAGE_TRANSFER_BIT,
1876 false);
1877
1878 // CmdClearColorImage doesn't work for compressed formats
1879 SkASSERT(!skgpu::VkFormatIsCompressed(info.fFormat));
1880
1881 VkClearColorValue vkColor;
1882 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1883 // uint32 union members in those cases.
1884 vkColor.float32[0] = color[0];
1885 vkColor.float32[1] = color[1];
1886 vkColor.float32[2] = color[2];
1887 vkColor.float32[3] = color[3];
1888 VkImageSubresourceRange range;
1889 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1890 range.baseArrayLayer = 0;
1891 range.baseMipLevel = 0;
1892 range.layerCount = 1;
1893 range.levelCount = info.fLevelCount;
1894 cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1895
1896 // Change image layout to shader read since if we use this texture as a borrowed
1897 // texture within Ganesh we require that its layout be set to that
1898 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1899 VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1900 false);
1901
1902 if (finishedCallback) {
1903 this->addFinishedCallback(std::move(finishedCallback));
1904 }
1905 return true;
1906 }
1907
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)1908 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1909 const GrBackendFormat& format,
1910 GrRenderable renderable,
1911 skgpu::Mipmapped mipmapped,
1912 GrProtected isProtected,
1913 std::string_view label) {
1914 const GrVkCaps& caps = this->vkCaps();
1915
1916 if (fProtectedContext != isProtected) {
1917 return {};
1918 }
1919
1920 VkFormat vkFormat;
1921 if (!GrBackendFormats::AsVkFormat(format, &vkFormat)) {
1922 return {};
1923 }
1924
1925 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1926 if (!caps.isVkFormatTexturable(vkFormat)) {
1927 return {};
1928 }
1929
1930 if (skgpu::VkFormatNeedsYcbcrSampler(vkFormat)) {
1931 return {};
1932 }
1933
1934 GrVkImageInfo info;
1935 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1936 renderable, mipmapped, &info, isProtected)) {
1937 return {};
1938 }
1939
1940 return GrBackendTextures::MakeVk(dimensions.width(), dimensions.height(), info);
1941 }
1942
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)1943 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1944 const GrBackendFormat& format,
1945 skgpu::Mipmapped mipmapped,
1946 GrProtected isProtected) {
1947 return this->onCreateBackendTexture(dimensions,
1948 format,
1949 GrRenderable::kNo,
1950 mipmapped,
1951 isProtected,
1952 /*label=*/"VkGpu_CreateCompressedBackendTexture");
1953 }
1954
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)1955 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1956 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1957 const void* data,
1958 size_t size) {
1959 GrVkImageInfo info;
1960 SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTexture, &info));
1961
1962 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1963 SkASSERT(mutableState);
1964 sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1965 backendTexture.dimensions(),
1966 kBorrow_GrWrapOwnership,
1967 GrWrapCacheable::kNo,
1968 kRW_GrIOType,
1969 info,
1970 std::move(mutableState));
1971 if (!texture) {
1972 return false;
1973 }
1974
1975 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1976 if (!cmdBuffer) {
1977 return false;
1978 }
1979 GrVkImage* image = texture->textureImage();
1980 image->setImageLayout(this,
1981 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1982 VK_ACCESS_TRANSFER_WRITE_BIT,
1983 VK_PIPELINE_STAGE_TRANSFER_BIT,
1984 false);
1985
1986 SkTextureCompressionType compression =
1987 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1988
1989 TArray<VkBufferImageCopy> regions;
1990 TArray<size_t> individualMipOffsets;
1991 GrStagingBufferManager::Slice slice;
1992
1993 fill_in_compressed_regions(&fStagingBufferManager,
1994 ®ions,
1995 &individualMipOffsets,
1996 &slice,
1997 compression,
1998 info.fFormat,
1999 backendTexture.dimensions(),
2000 backendTexture.fMipmapped);
2001
2002 if (!slice.fBuffer) {
2003 return false;
2004 }
2005
2006 memcpy(slice.fOffsetMapPtr, data, size);
2007
2008 cmdBuffer->addGrSurface(texture);
2009 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
2010 // because we don't need the command buffer to ref the buffer here. The reason being is that
2011 // the buffer is coming from the staging manager and the staging manager will make sure the
2012 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
2013 // every upload in the frame.
2014 cmdBuffer->copyBufferToImage(this,
2015 static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
2016 image,
2017 image->currentLayout(),
2018 regions.size(),
2019 regions.begin());
2020
2021 // Change image layout to shader read since if we use this texture as a borrowed
2022 // texture within Ganesh we require that its layout be set to that
2023 image->setImageLayout(this,
2024 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2025 VK_ACCESS_SHADER_READ_BIT,
2026 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2027 false);
2028
2029 if (finishedCallback) {
2030 this->addFinishedCallback(std::move(finishedCallback));
2031 }
2032 return true;
2033 }
2034
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,VkImageLayout newLayout,uint32_t newQueueFamilyIndex)2035 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
2036 VkImageLayout newLayout,
2037 uint32_t newQueueFamilyIndex) {
2038 // Even though internally we use this helper for getting src access flags and stages they
2039 // can also be used for general dst flags since we don't know exactly what the client
2040 // plans on using the image for.
2041 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
2042 newLayout = image->currentLayout();
2043 }
2044 VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
2045 VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
2046
2047 uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
2048 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
2049 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
2050 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
2051 };
2052 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
2053 // It is illegal to have both the new and old queue be special queue families (i.e. external
2054 // or foreign).
2055 return;
2056 }
2057
2058 image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
2059 newQueueFamilyIndex);
2060 }
2061
setBackendSurfaceState(GrVkImageInfo info,sk_sp<skgpu::MutableTextureState> currentState,SkISize dimensions,VkImageLayout newLayout,uint32_t newQueueFamilyIndex,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)2062 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
2063 sk_sp<skgpu::MutableTextureState> currentState,
2064 SkISize dimensions,
2065 VkImageLayout newLayout,
2066 uint32_t newQueueFamilyIndex,
2067 skgpu::MutableTextureState* previousState,
2068 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2069 sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
2070 dimensions,
2071 info,
2072 std::move(currentState),
2073 GrVkImage::UsageFlags::kColorAttachment,
2074 kBorrow_GrWrapOwnership,
2075 GrWrapCacheable::kNo,
2076 "VkGpu_SetBackendSurfaceState",
2077 /*forSecondaryCB=*/false);
2078 SkASSERT(texture);
2079 if (!texture) {
2080 return false;
2081 }
2082 if (previousState) {
2083 previousState->set(*texture->getMutableState());
2084 }
2085 set_layout_and_queue_from_mutable_state(this, texture.get(), newLayout, newQueueFamilyIndex);
2086 if (finishedCallback) {
2087 this->addFinishedCallback(std::move(finishedCallback));
2088 }
2089 return true;
2090 }
2091
setBackendTextureState(const GrBackendTexture & backendTeture,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)2092 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
2093 const skgpu::MutableTextureState& newState,
2094 skgpu::MutableTextureState* previousState,
2095 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2096 GrVkImageInfo info;
2097 SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTeture, &info));
2098 sk_sp<skgpu::MutableTextureState> currentState = backendTeture.getMutableState();
2099 SkASSERT(currentState);
2100 SkASSERT(newState.isValid() && newState.backend() == skgpu::BackendApi::kVulkan);
2101 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
2102 skgpu::MutableTextureStates::GetVkImageLayout(newState),
2103 skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState),
2104 previousState,
2105 std::move(finishedCallback));
2106 }
2107
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)2108 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
2109 const skgpu::MutableTextureState& newState,
2110 skgpu::MutableTextureState* previousState,
2111 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2112 GrVkImageInfo info;
2113 SkAssertResult(GrBackendRenderTargets::GetVkImageInfo(backendRenderTarget, &info));
2114 sk_sp<skgpu::MutableTextureState> currentState = backendRenderTarget.getMutableState();
2115 SkASSERT(currentState);
2116 SkASSERT(newState.backend() == skgpu::BackendApi::kVulkan);
2117 return this->setBackendSurfaceState(info, std::move(currentState),
2118 backendRenderTarget.dimensions(),
2119 skgpu::MutableTextureStates::GetVkImageLayout(newState),
2120 skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState),
2121 previousState, std::move(finishedCallback));
2122 }
2123
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)2124 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
2125 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2126 VkPipelineStageFlags dstStage;
2127 VkAccessFlags dstAccess;
2128 if (barrierType == kBlend_GrXferBarrierType) {
2129 dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
2130 dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
2131 } else {
2132 SkASSERT(barrierType == kTexture_GrXferBarrierType);
2133 dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
2134 dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
2135 }
2136 GrVkImage* image = vkRT->colorAttachment();
2137 VkImageMemoryBarrier barrier;
2138 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
2139 barrier.pNext = nullptr;
2140 barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
2141 barrier.dstAccessMask = dstAccess;
2142 barrier.oldLayout = image->currentLayout();
2143 barrier.newLayout = barrier.oldLayout;
2144 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2145 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2146 barrier.image = image->image();
2147 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2148 this->addImageMemoryBarrier(image->resource(),
2149 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2150 dstStage, true, &barrier);
2151 }
2152
deleteBackendTexture(const GrBackendTexture & tex)2153 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
2154 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2155
2156 GrVkImageInfo info;
2157 if (GrBackendTextures::GetVkImageInfo(tex, &info)) {
2158 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2159 }
2160 }
2161
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)2162 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2163 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2164 GrVkRenderPass::AttachmentFlags attachmentFlags;
2165 GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
2166 &attachmentsDescriptor, &attachmentFlags);
2167
2168 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
2169 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2170 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
2171 }
2172 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2173 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
2174 }
2175
2176 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
2177 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2178 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2179 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
2180 }
2181 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2182 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2183 if (!renderPass) {
2184 return false;
2185 }
2186
2187 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
2188
2189 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2190 desc,
2191 programInfo,
2192 renderPass->vkRenderPass(),
2193 &stat);
2194 if (!pipelineState) {
2195 return false;
2196 }
2197
2198 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
2199 }
2200
2201 #if defined(GPU_TEST_UTILS)
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const2202 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2203 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2204
2205 GrVkImageInfo backend;
2206 if (!GrBackendTextures::GetVkImageInfo(tex, &backend)) {
2207 return false;
2208 }
2209
2210 if (backend.fImage && backend.fAlloc.fMemory) {
2211 VkMemoryRequirements req;
2212 memset(&req, 0, sizeof(req));
2213 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2214 backend.fImage,
2215 &req));
2216 // TODO: find a better check
2217 // This will probably fail with a different driver
2218 return (req.size > 0) && (req.size <= 8192 * 8192);
2219 }
2220
2221 return false;
2222 }
2223
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)2224 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2225 GrColorType ct,
2226 int sampleCnt,
2227 GrProtected isProtected) {
2228 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
2229 dimensions.height() > this->caps()->maxRenderTargetSize()) {
2230 return {};
2231 }
2232
2233 VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2234
2235 GrVkImageInfo info;
2236 if (!this->createVkImageForBackendSurface(vkFormat,
2237 dimensions,
2238 sampleCnt,
2239 GrTexturable::kNo,
2240 GrRenderable::kYes,
2241 skgpu::Mipmapped::kNo,
2242 &info,
2243 isProtected)) {
2244 return {};
2245 }
2246 return GrBackendRenderTargets::MakeVk(dimensions.width(), dimensions.height(), info);
2247 }
2248
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)2249 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2250 SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2251
2252 GrVkImageInfo info;
2253 if (GrBackendRenderTargets::GetVkImageInfo(rt, &info)) {
2254 // something in the command buffer may still be using this, so force submit
2255 GrSubmitInfo submitInfo;
2256 submitInfo.fSync = GrSyncCpu::kYes;
2257 SkAssertResult(this->submitCommandBuffer(submitInfo));
2258 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2259 }
2260 }
2261 #endif
2262
2263 ////////////////////////////////////////////////////////////////////////////////
2264
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2265 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2266 VkPipelineStageFlags srcStageMask,
2267 VkPipelineStageFlags dstStageMask,
2268 bool byRegion,
2269 VkBufferMemoryBarrier* barrier) const {
2270 if (!this->currentCommandBuffer()) {
2271 return;
2272 }
2273 SkASSERT(resource);
2274 this->currentCommandBuffer()->pipelineBarrier(this,
2275 resource,
2276 srcStageMask,
2277 dstStageMask,
2278 byRegion,
2279 GrVkCommandBuffer::kBufferMemory_BarrierType,
2280 barrier);
2281 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2282 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2283 VkPipelineStageFlags dstStageMask,
2284 bool byRegion,
2285 VkBufferMemoryBarrier* barrier) const {
2286 if (!this->currentCommandBuffer()) {
2287 return;
2288 }
2289 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2290 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2291 // command with the buffer on the command buffer. Thus those other commands will already cause
2292 // the command buffer to be holding a ref to the buffer.
2293 this->currentCommandBuffer()->pipelineBarrier(this,
2294 /*resource=*/nullptr,
2295 srcStageMask,
2296 dstStageMask,
2297 byRegion,
2298 GrVkCommandBuffer::kBufferMemory_BarrierType,
2299 barrier);
2300 }
2301
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2302 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2303 VkPipelineStageFlags srcStageMask,
2304 VkPipelineStageFlags dstStageMask,
2305 bool byRegion,
2306 VkImageMemoryBarrier* barrier) const {
2307 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2308 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2309 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2310 // have a current command buffer. Thus we won't do the queue transfer.
2311 if (!this->currentCommandBuffer()) {
2312 return;
2313 }
2314 SkASSERT(resource);
2315 this->currentCommandBuffer()->pipelineBarrier(this,
2316 resource,
2317 srcStageMask,
2318 dstStageMask,
2319 byRegion,
2320 GrVkCommandBuffer::kImageMemory_BarrierType,
2321 barrier);
2322 }
2323
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)2324 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2325 SkSpan<GrSurfaceProxy*> proxies,
2326 SkSurfaces::BackendSurfaceAccess access,
2327 const skgpu::MutableTextureState* newState) {
2328 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2329 // not effect what we do here.
2330 if (!proxies.empty() && (access == SkSurfaces::BackendSurfaceAccess::kPresent || newState)) {
2331 // We currently don't support passing in new surface state for multiple proxies here. The
2332 // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2333 // state updates anyways. Additionally if we have a newState than we must not have any
2334 // BackendSurfaceAccess.
2335 SkASSERT(!newState || proxies.size() == 1);
2336 SkASSERT(!newState || access == SkSurfaces::BackendSurfaceAccess::kNoAccess);
2337 GrVkImage* image;
2338 for (GrSurfaceProxy* proxy : proxies) {
2339 SkASSERT(proxy->isInstantiated());
2340 if (GrTexture* tex = proxy->peekTexture()) {
2341 image = static_cast<GrVkTexture*>(tex)->textureImage();
2342 } else {
2343 GrRenderTarget* rt = proxy->peekRenderTarget();
2344 SkASSERT(rt);
2345 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2346 image = vkRT->externalAttachment();
2347 }
2348 if (newState) {
2349 VkImageLayout newLayout =
2350 skgpu::MutableTextureStates::GetVkImageLayout(newState);
2351 uint32_t newIndex =
2352 skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState);
2353 set_layout_and_queue_from_mutable_state(this, image, newLayout, newIndex);
2354 } else {
2355 SkASSERT(access == SkSurfaces::BackendSurfaceAccess::kPresent);
2356 image->prepareForPresent(this);
2357 }
2358 }
2359 }
2360 }
2361
addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback)2362 void GrVkGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2363 SkASSERT(finishedCallback);
2364 fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2365 }
2366
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2367 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2368 this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2369 }
2370
onSubmitToGpu(const GrSubmitInfo & info)2371 bool GrVkGpu::onSubmitToGpu(const GrSubmitInfo& info) {
2372 return this->submitCommandBuffer(info);
2373 }
2374
finishOutstandingGpuWork()2375 void GrVkGpu::finishOutstandingGpuWork() {
2376 VK_CALL(QueueWaitIdle(fQueue));
2377
2378 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2379 fResourceProvider.forceSyncAllCommandBuffers();
2380 }
2381 }
2382
onReportSubmitHistograms()2383 void GrVkGpu::onReportSubmitHistograms() {
2384 #if SK_HISTOGRAMS_ENABLED
2385 uint64_t allocatedMemory = 0, usedMemory = 0;
2386 std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory();
2387 SkASSERT(usedMemory <= allocatedMemory);
2388 if (allocatedMemory > 0) {
2389 SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2390 (usedMemory * 100) / allocatedMemory);
2391 }
2392 // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2393 // supports samples up to around 500MB which should support the amounts of memory we allocate.
2394 SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2395 #endif // SK_HISTOGRAMS_ENABLED
2396 }
2397
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2398 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2399 GrSurface* src,
2400 GrVkImage* dstImage,
2401 GrVkImage* srcImage,
2402 const SkIRect& srcRect,
2403 const SkIPoint& dstPoint) {
2404 if (!this->currentCommandBuffer()) {
2405 return;
2406 }
2407
2408 #ifdef SK_DEBUG
2409 int dstSampleCnt = dstImage->numSamples();
2410 int srcSampleCnt = srcImage->numSamples();
2411 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2412 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2413 VkFormat dstFormat = dstImage->imageFormat();
2414 VkFormat srcFormat;
2415 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2416 SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2417 srcFormat, srcSampleCnt, srcHasYcbcr));
2418 #endif
2419 if (src->isProtected() && !dst->isProtected()) {
2420 SkDebugf("Can't copy from protected memory to non-protected");
2421 return;
2422 }
2423
2424 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2425 // the cache is flushed since it is only being written to.
2426 dstImage->setImageLayout(this,
2427 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2428 VK_ACCESS_TRANSFER_WRITE_BIT,
2429 VK_PIPELINE_STAGE_TRANSFER_BIT,
2430 false);
2431
2432 srcImage->setImageLayout(this,
2433 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2434 VK_ACCESS_TRANSFER_READ_BIT,
2435 VK_PIPELINE_STAGE_TRANSFER_BIT,
2436 false);
2437
2438 VkImageCopy copyRegion;
2439 memset(©Region, 0, sizeof(VkImageCopy));
2440 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2441 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2442 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2443 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2444 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2445
2446 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2447 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2448 this->currentCommandBuffer()->copyImage(this,
2449 srcImage,
2450 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2451 dstImage,
2452 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2453 1,
2454 ©Region);
2455
2456 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2457 srcRect.width(), srcRect.height());
2458 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2459 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2460 }
2461
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)2462 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2463 GrSurface* src,
2464 GrVkImage* dstImage,
2465 GrVkImage* srcImage,
2466 const SkIRect& srcRect,
2467 const SkIRect& dstRect,
2468 GrSamplerState::Filter filter) {
2469 if (!this->currentCommandBuffer()) {
2470 return;
2471 }
2472
2473 #ifdef SK_DEBUG
2474 int dstSampleCnt = dstImage->numSamples();
2475 int srcSampleCnt = srcImage->numSamples();
2476 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2477 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2478 VkFormat dstFormat = dstImage->imageFormat();
2479 VkFormat srcFormat;
2480 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2481 SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2482 dstSampleCnt,
2483 dstImage->isLinearTiled(),
2484 dstHasYcbcr,
2485 srcFormat,
2486 srcSampleCnt,
2487 srcImage->isLinearTiled(),
2488 srcHasYcbcr));
2489
2490 #endif
2491 if (src->isProtected() && !dst->isProtected()) {
2492 SkDebugf("Can't copy from protected memory to non-protected");
2493 return;
2494 }
2495
2496 dstImage->setImageLayout(this,
2497 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2498 VK_ACCESS_TRANSFER_WRITE_BIT,
2499 VK_PIPELINE_STAGE_TRANSFER_BIT,
2500 false);
2501
2502 srcImage->setImageLayout(this,
2503 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2504 VK_ACCESS_TRANSFER_READ_BIT,
2505 VK_PIPELINE_STAGE_TRANSFER_BIT,
2506 false);
2507
2508 VkImageBlit blitRegion;
2509 memset(&blitRegion, 0, sizeof(VkImageBlit));
2510 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2511 blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2512 blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2513 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2514 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2515 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2516
2517 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2518 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2519 this->currentCommandBuffer()->blitImage(this,
2520 *srcImage,
2521 *dstImage,
2522 1,
2523 &blitRegion,
2524 filter == GrSamplerState::Filter::kNearest ?
2525 VK_FILTER_NEAREST : VK_FILTER_LINEAR);
2526
2527 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2528 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2529 }
2530
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2531 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2532 const SkIPoint& dstPoint) {
2533 if (src->isProtected() && !dst->isProtected()) {
2534 SkDebugf("Can't copy from protected memory to non-protected");
2535 return;
2536 }
2537 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2538 this->resolveImage(dst, srcRT, srcRect, dstPoint);
2539 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2540 srcRect.width(), srcRect.height());
2541 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2542 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2543 }
2544
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)2545 bool GrVkGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
2546 GrSurface* src, const SkIRect& srcRect,
2547 GrSamplerState::Filter filter) {
2548 #ifdef SK_DEBUG
2549 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2550 SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2551 }
2552 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2553 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2554 }
2555 #endif
2556 if (src->isProtected() && !dst->isProtected()) {
2557 SkDebugf("Can't copy from protected memory to non-protected");
2558 return false;
2559 }
2560
2561 GrVkImage* dstImage;
2562 GrVkImage* srcImage;
2563 GrRenderTarget* dstRT = dst->asRenderTarget();
2564 if (dstRT) {
2565 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2566 if (vkRT->wrapsSecondaryCommandBuffer()) {
2567 return false;
2568 }
2569 // This will technically return true for single sample rts that used DMSAA in which case we
2570 // don't have to pick the resolve attachment. But in that case the resolve and color
2571 // attachments will be the same anyways.
2572 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2573 dstImage = vkRT->resolveAttachment();
2574 } else {
2575 dstImage = vkRT->colorAttachment();
2576 }
2577 } else if (dst->asTexture()) {
2578 dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2579 } else {
2580 // The surface in a GrAttachment already
2581 dstImage = static_cast<GrVkImage*>(dst);
2582 }
2583 GrRenderTarget* srcRT = src->asRenderTarget();
2584 if (srcRT) {
2585 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2586 // This will technically return true for single sample rts that used DMSAA in which case we
2587 // don't have to pick the resolve attachment. But in that case the resolve and color
2588 // attachments will be the same anyways.
2589 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2590 srcImage = vkRT->resolveAttachment();
2591 } else {
2592 srcImage = vkRT->colorAttachment();
2593 }
2594 } else if (src->asTexture()) {
2595 SkASSERT(src->asTexture());
2596 srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2597 } else {
2598 // The surface in a GrAttachment already
2599 srcImage = static_cast<GrVkImage*>(src);
2600 }
2601
2602 VkFormat dstFormat = dstImage->imageFormat();
2603 VkFormat srcFormat = srcImage->imageFormat();
2604
2605 int dstSampleCnt = dstImage->numSamples();
2606 int srcSampleCnt = srcImage->numSamples();
2607
2608 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2609 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2610
2611 if (srcRect.size() == dstRect.size()) {
2612 // Prefer resolves or copy-image commands when there is no scaling
2613 const SkIPoint dstPoint = dstRect.topLeft();
2614 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2615 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2616 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2617 return true;
2618 }
2619
2620 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2621 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2622 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2623 return true;
2624 }
2625 }
2626
2627 if (this->vkCaps().canCopyAsBlit(dstFormat,
2628 dstSampleCnt,
2629 dstImage->isLinearTiled(),
2630 dstHasYcbcr,
2631 srcFormat,
2632 srcSampleCnt,
2633 srcImage->isLinearTiled(),
2634 srcHasYcbcr)) {
2635 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter);
2636 return true;
2637 }
2638
2639 return false;
2640 }
2641
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2642 bool GrVkGpu::onReadPixels(GrSurface* surface,
2643 SkIRect rect,
2644 GrColorType surfaceColorType,
2645 GrColorType dstColorType,
2646 void* buffer,
2647 size_t rowBytes) {
2648 if (surface->isProtected()) {
2649 return false;
2650 }
2651
2652 if (!this->currentCommandBuffer()) {
2653 return false;
2654 }
2655
2656 GrVkImage* image = nullptr;
2657 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2658 if (rt) {
2659 // Reading from render targets that wrap a secondary command buffer is not allowed since
2660 // it would require us to know the VkImage, which we don't have, as well as need us to
2661 // stop and start the VkRenderPass which we don't have access to.
2662 if (rt->wrapsSecondaryCommandBuffer()) {
2663 return false;
2664 }
2665 image = rt->nonMSAAAttachment();
2666 } else {
2667 image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2668 }
2669
2670 if (!image) {
2671 return false;
2672 }
2673
2674 if (dstColorType == GrColorType::kUnknown ||
2675 dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2676 return false;
2677 }
2678
2679 // Change layout of our target so it can be used as copy
2680 image->setImageLayout(this,
2681 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2682 VK_ACCESS_TRANSFER_READ_BIT,
2683 VK_PIPELINE_STAGE_TRANSFER_BIT,
2684 false);
2685
2686 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2687 if (skgpu::VkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2688 return false;
2689 }
2690 size_t tightRowBytes = bpp*rect.width();
2691
2692 VkBufferImageCopy region;
2693 memset(®ion, 0, sizeof(VkBufferImageCopy));
2694 VkOffset3D offset = { rect.left(), rect.top(), 0 };
2695 region.imageOffset = offset;
2696 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2697
2698 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2699 size_t imageRows = region.imageExtent.height;
2700 GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2701 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2702 transBufferRowBytes * imageRows,
2703 GrGpuBufferType::kXferGpuToCpu,
2704 kDynamic_GrAccessPattern,
2705 GrResourceProvider::ZeroInit::kNo);
2706
2707 if (!transferBuffer) {
2708 return false;
2709 }
2710
2711 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2712
2713 // Copy the image to a buffer so we can map it to cpu memory
2714 region.bufferOffset = 0;
2715 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2716 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2717 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2718
2719 this->currentCommandBuffer()->copyImageToBuffer(this,
2720 image,
2721 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2722 transferBuffer,
2723 1,
2724 ®ion);
2725
2726 // make sure the copy to buffer has finished
2727 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2728 VK_ACCESS_HOST_READ_BIT,
2729 VK_PIPELINE_STAGE_TRANSFER_BIT,
2730 VK_PIPELINE_STAGE_HOST_BIT,
2731 false);
2732
2733 // We need to submit the current command buffer to the Queue and make sure it finishes before
2734 // we can copy the data out of the buffer.
2735 GrSubmitInfo submitInfo;
2736 submitInfo.fSync = GrSyncCpu::kYes;
2737 if (!this->submitCommandBuffer(submitInfo)) {
2738 return false;
2739 }
2740 void* mappedMemory = transferBuffer->map();
2741 if (!mappedMemory) {
2742 return false;
2743 }
2744
2745 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2746
2747 transferBuffer->unmap();
2748 return true;
2749 }
2750
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2751 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2752 sk_sp<const GrVkFramebuffer> framebuffer,
2753 const VkClearValue* colorClear,
2754 const GrSurface* target,
2755 const SkIRect& renderPassBounds,
2756 bool forSecondaryCB) {
2757 if (!this->currentCommandBuffer()) {
2758 return false;
2759 }
2760 SkASSERT (!framebuffer->isExternal());
2761
2762 #ifdef SK_DEBUG
2763 uint32_t index;
2764 bool result = renderPass->colorAttachmentIndex(&index);
2765 SkASSERT(result && 0 == index);
2766 result = renderPass->stencilAttachmentIndex(&index);
2767 if (result) {
2768 SkASSERT(1 == index);
2769 }
2770 #endif
2771 VkClearValue clears[3];
2772 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2773 clears[0].color = colorClear->color;
2774 clears[stencilIndex].depthStencil.depth = 0.0f;
2775 clears[stencilIndex].depthStencil.stencil = 0;
2776
2777 return this->currentCommandBuffer()->beginRenderPass(
2778 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2779 }
2780
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2781 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2782 const SkIRect& bounds) {
2783 // We had a command buffer when we started the render pass, we should have one now as well.
2784 SkASSERT(this->currentCommandBuffer());
2785 this->currentCommandBuffer()->endRenderPass(this);
2786 this->didWriteToSurface(target, origin, &bounds);
2787 }
2788
2789 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpDeviceFaultInfo(const std::string & errorCategory)2790 void GrVkGpu::dumpDeviceFaultInfo(const std::string& errorCategory) {
2791 VkDeviceFaultCountsEXT fc{};
2792 fc.sType = VK_STRUCTURE_TYPE_DEVICE_FAULT_COUNTS_EXT;
2793 fc.pNext = nullptr;
2794 GR_VK_CALL(this->vkInterface(), GetDeviceFaultInfo(fDevice, &fc, nullptr));
2795
2796 const uint32_t vendorBinarySize =
2797 std::min(uint32_t(fc.vendorBinarySize), std::numeric_limits<uint32_t>::max());
2798 std::vector<VkDeviceFaultAddressInfoEXT> addressInfos (fc.addressInfoCount);
2799 std::vector<VkDeviceFaultVendorInfoEXT> vendorInfos (fc.vendorInfoCount);
2800 std::vector<uint8_t> vendorBinaryData(vendorBinarySize);
2801 VkDeviceFaultInfoEXT fi{};
2802 VkDebugUtilsObjectNameInfoEXT nameInfo{};
2803 fi.sType = VK_STRUCTURE_TYPE_DEVICE_FAULT_INFO_EXT;
2804 fi.pNext = &nameInfo;
2805 fi.pAddressInfos = addressInfos.data();
2806 fi.pVendorInfos = vendorInfos.data();
2807 fi.pVendorBinaryData = vendorBinaryData.data();
2808 GR_VK_CALL(this->vkInterface(), GetDeviceFaultInfo(fDevice, &fc, &fi));
2809 if (!fi.pNext) {
2810 SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s pNext is nullptr", errorCategory.c_str());
2811 return;
2812 }
2813 auto obj = static_cast<VkDebugUtilsObjectNameInfoEXT*>(fi.pNext);
2814 if (obj == nullptr) {
2815 SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s obj is nullptr", errorCategory.c_str());
2816 return;
2817 }
2818 auto vkImage = obj->objectHandle;
2819 if (!vkImage) {
2820 SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s vkimage is nullptr", errorCategory.c_str());
2821 return;
2822 }
2823 SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s vkimage 0x%{public}llx", errorCategory.c_str(), vkImage);
2824 }
2825
dumpVkImageDfx(const std::string & errorCategory)2826 void GrVkGpu::dumpVkImageDfx(const std::string& errorCategory) {
2827 if (!ParallelDebug::IsVkImageDfxEnabled()) {
2828 return;
2829 }
2830 dumpDeviceFaultInfo(errorCategory);
2831 auto context = getContext();
2832 if (context == nullptr) {
2833 SK_LOGE("GrVkGpu::dumpVkImageDfx %{public}s context nullptr", errorCategory.c_str());
2834 return;
2835 }
2836 std::stringstream dump;
2837 context->dumpAllResource(dump);
2838 std::string s;
2839 while (std::getline(dump, s, '\n')) {
2840 SK_LOGE("%{public}s", s.c_str());
2841 }
2842 }
2843 #endif
2844
reportVulkanError(const std::string & errorCategory)2845 void GrVkGpu::reportVulkanError(const std::string& errorCategory) {
2846 auto context = getContext();
2847 if (context == nullptr) {
2848 SK_LOGE("GrVkGpu::reportVulkanError %{public}s context nullptr", errorCategory.c_str());
2849 return;
2850 }
2851 SK_LOGE("GrVkGpu::reportVulkanError report %{public}s", errorCategory.c_str());
2852 context->processVulkanError();
2853 }
2854
checkVkResult(VkResult result)2855 bool GrVkGpu::checkVkResult(VkResult result) {
2856 int32_t numResult = static_cast<int32_t>(result);
2857 switch (numResult) {
2858 case VK_SUCCESS:
2859 return true;
2860 case VK_ERROR_DEVICE_LOST:
2861 if (!fDeviceIsLost) {
2862 // Callback should only be invoked once, and device should be marked as lost first.
2863 fDeviceIsLost = true;
2864 skgpu::InvokeDeviceLostCallback(vkInterface(),
2865 device(),
2866 fDeviceLostContext,
2867 fDeviceLostProc,
2868 vkCaps().supportsDeviceFaultInfo());
2869 }
2870 reportVulkanError("VK_ERROR_DEVICE_LOST");
2871 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
2872 dumpVkImageDfx("VK_ERROR_DEVICE_LOST");
2873 #endif
2874 abort();
2875 return false;
2876 case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2877 case VK_ERROR_OUT_OF_HOST_MEMORY:
2878 this->setOOMed();
2879 return false;
2880 case VK_HUAWEI_GPU_ERROR_RECOVER:
2881 reportVulkanError("VK_HUAWEI_GPU_ERROR_RECOVER");
2882 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
2883 dumpVkImageDfx("VK_HUAWEI_GPU_ERROR_RECOVER");
2884 #endif
2885 return true;
2886 default:
2887 return false;
2888 }
2889 }
2890
2891 #ifdef SKIA_DFX_FOR_OHOS
addAllocImageBytes(size_t bytes)2892 void GrVkGpu::addAllocImageBytes(size_t bytes)
2893 {
2894 auto cache = getContext()->priv().getResourceCache();
2895 if (!cache) {
2896 return;
2897 }
2898 cache->addAllocImageBytes(bytes);
2899 }
2900
removeAllocImageBytes(size_t bytes)2901 void GrVkGpu::removeAllocImageBytes(size_t bytes)
2902 {
2903 auto cache = getContext()->priv().getResourceCache();
2904 if (!cache) {
2905 return;
2906 }
2907 cache->removeAllocImageBytes(bytes);
2908 }
2909
addAllocBufferBytes(size_t bytes)2910 void GrVkGpu::addAllocBufferBytes(size_t bytes)
2911 {
2912 auto cache = getContext()->priv().getResourceCache();
2913 if (!cache) {
2914 return;
2915 }
2916 cache->addAllocBufferBytes(bytes);
2917 }
2918
removeAllocBufferBytes(size_t bytes)2919 void GrVkGpu::removeAllocBufferBytes(size_t bytes)
2920 {
2921 auto cache = getContext()->priv().getResourceCache();
2922 if (!cache) {
2923 return;
2924 }
2925 cache->removeAllocBufferBytes(bytes);
2926 }
2927 #endif
2928
vmaDefragment()2929 void GrVkGpu::vmaDefragment() {
2930 fMemoryAllocatorCacheImage->vmaDefragment();
2931 }
2932
dumpVmaStats(SkString * out)2933 void GrVkGpu::dumpVmaStats(SkString *out) {
2934 if (out == nullptr) {
2935 return;
2936 }
2937 out->appendf("dumpVmaCacheStats:\n");
2938 fMemoryAllocatorCacheImage->dumpVmaStats(out, "\n");
2939 }
2940
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2941 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2942 if (!this->currentCommandBuffer()) {
2943 return;
2944 }
2945 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2946 }
2947
submit(GrOpsRenderPass * renderPass)2948 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2949 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2950
2951 fCachedOpsRenderPass->submit();
2952 fCachedOpsRenderPass->reset();
2953 }
2954
makeSemaphore(bool isOwned)2955 [[nodiscard]] std::unique_ptr<GrSemaphore> GrVkGpu::makeSemaphore(bool isOwned) {
2956 return GrVkSemaphore::Make(this, isOwned);
2957 }
2958
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType wrapType,GrWrapOwnership ownership)2959 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2960 GrSemaphoreWrapType wrapType,
2961 GrWrapOwnership ownership) {
2962 return GrVkSemaphore::MakeWrapped(this, GrBackendSemaphores::GetVkSemaphore(semaphore),
2963 wrapType, ownership);
2964 }
2965
insertSemaphore(GrSemaphore * semaphore)2966 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2967 SkASSERT(semaphore);
2968
2969 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2970
2971 GrVkSemaphore::Resource* resource = vkSem->getResource();
2972 if (resource->shouldSignal()) {
2973 resource->ref();
2974 fSemaphoresToSignal.push_back(resource);
2975 }
2976 }
2977
waitSemaphore(GrSemaphore * semaphore)2978 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2979 SkASSERT(semaphore);
2980
2981 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2982
2983 GrVkSemaphore::Resource* resource = vkSem->getResource();
2984 if (resource->shouldWait()) {
2985 resource->ref();
2986 fSemaphoresToWaitOn.push_back(resource);
2987 }
2988 }
2989
prepareTextureForCrossContextUsage(GrTexture * texture)2990 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2991 SkASSERT(texture);
2992 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2993 vkTexture->setImageLayout(this,
2994 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2995 VK_ACCESS_SHADER_READ_BIT,
2996 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2997 false);
2998 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2999 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
3000 // Eventually we will abandon the whole GPU if this fails.
3001 this->submitToGpu();
3002
3003 // The image layout change serves as a barrier, so no semaphore is needed.
3004 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
3005 // thread safe so that only the first thread that tries to use the semaphore actually submits
3006 // it. This additionally would also require thread safety in command buffer submissions to
3007 // queues in general.
3008 return nullptr;
3009 }
3010
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)3011 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
3012 fDrawables.emplace_back(std::move(drawable));
3013 }
3014
storeVkPipelineCacheData()3015 void GrVkGpu::storeVkPipelineCacheData() {
3016 if (this->getContext()->priv().getPersistentCache()) {
3017 this->resourceProvider().storePipelineCacheData();
3018 }
3019 }
3020