• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkGpu.h"
9 
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/core/SkMipmap.h"
18 #include "src/core/SkTraceEvent.h"
19 #include "src/core/SkUtils.h"
20 #include "src/gpu/GrBackendUtils.h"
21 #include "src/gpu/GrDataUtils.h"
22 #include "src/gpu/GrDirectContextPriv.h"
23 #include "src/gpu/GrGeometryProcessor.h"
24 #include "src/gpu/GrGpuResourceCacheAccess.h"
25 #include "src/gpu/GrNativeRect.h"
26 #include "src/gpu/GrPipeline.h"
27 #include "src/gpu/GrRenderTarget.h"
28 #include "src/gpu/GrResourceProvider.h"
29 #include "src/gpu/GrTexture.h"
30 #include "src/gpu/GrThreadSafePipelineBuilder.h"
31 #include "src/gpu/SkGr.h"
32 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
33 #include "src/gpu/vk/GrVkBuffer.h"
34 #include "src/gpu/vk/GrVkCommandBuffer.h"
35 #include "src/gpu/vk/GrVkCommandPool.h"
36 #include "src/gpu/vk/GrVkFramebuffer.h"
37 #include "src/gpu/vk/GrVkImage.h"
38 #include "src/gpu/vk/GrVkInterface.h"
39 #include "src/gpu/vk/GrVkMemory.h"
40 #include "src/gpu/vk/GrVkOpsRenderPass.h"
41 #include "src/gpu/vk/GrVkPipeline.h"
42 #include "src/gpu/vk/GrVkPipelineState.h"
43 #include "src/gpu/vk/GrVkRenderPass.h"
44 #include "src/gpu/vk/GrVkResourceProvider.h"
45 #include "src/gpu/vk/GrVkSemaphore.h"
46 #include "src/gpu/vk/GrVkTexture.h"
47 #include "src/gpu/vk/GrVkTextureRenderTarget.h"
48 #include "src/image/SkImage_Gpu.h"
49 #include "src/image/SkSurface_Gpu.h"
50 
51 #include "include/gpu/vk/GrVkExtensions.h"
52 #include "include/gpu/vk/GrVkTypes.h"
53 
54 #include <utility>
55 
56 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
57 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
58 
59 constexpr uint8_t ASTC_HEADER_SIZE = 16;
60 
Make(const GrVkBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)61 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
62                            const GrContextOptions& options, GrDirectContext* direct) {
63     if (backendContext.fInstance == VK_NULL_HANDLE ||
64         backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
65         backendContext.fDevice == VK_NULL_HANDLE ||
66         backendContext.fQueue == VK_NULL_HANDLE) {
67         return nullptr;
68     }
69     if (!backendContext.fGetProc) {
70         return nullptr;
71     }
72 
73     PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
74             reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
75                     backendContext.fGetProc("vkEnumerateInstanceVersion",
76                                             VK_NULL_HANDLE, VK_NULL_HANDLE));
77     uint32_t instanceVersion = 0;
78     if (!localEnumerateInstanceVersion) {
79         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
80     } else {
81         VkResult err = localEnumerateInstanceVersion(&instanceVersion);
82         if (err) {
83             SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
84             return nullptr;
85         }
86     }
87 
88     PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
89             reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
90                     backendContext.fGetProc("vkGetPhysicalDeviceProperties",
91                                             backendContext.fInstance,
92                                             VK_NULL_HANDLE));
93 
94     if (!localGetPhysicalDeviceProperties) {
95         return nullptr;
96     }
97     VkPhysicalDeviceProperties physDeviceProperties;
98     localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
99     uint32_t physDevVersion = physDeviceProperties.apiVersion;
100 
101     uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
102                                                         : instanceVersion;
103 
104     instanceVersion = std::min(instanceVersion, apiVersion);
105     physDevVersion = std::min(physDevVersion, apiVersion);
106 
107     sk_sp<const GrVkInterface> interface;
108 
109     if (backendContext.fVkExtensions) {
110         interface.reset(new GrVkInterface(backendContext.fGetProc,
111                                           backendContext.fInstance,
112                                           backendContext.fDevice,
113                                           instanceVersion,
114                                           physDevVersion,
115                                           backendContext.fVkExtensions));
116         if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
117             return nullptr;
118         }
119     } else {
120         GrVkExtensions extensions;
121         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
122         // need to know if this is enabled to know if we can transition to a present layout when
123         // flushing a surface.
124         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
125             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
126             extensions.init(backendContext.fGetProc, backendContext.fInstance,
127                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
128         }
129         interface.reset(new GrVkInterface(backendContext.fGetProc,
130                                           backendContext.fInstance,
131                                           backendContext.fDevice,
132                                           instanceVersion,
133                                           physDevVersion,
134                                           &extensions));
135         if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
136             return nullptr;
137         }
138     }
139 
140     sk_sp<GrVkCaps> caps;
141     if (backendContext.fDeviceFeatures2) {
142         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
143                                 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
144                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
145     } else if (backendContext.fDeviceFeatures) {
146         VkPhysicalDeviceFeatures2 features2;
147         features2.pNext = nullptr;
148         features2.features = *backendContext.fDeviceFeatures;
149         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
150                                 features2, instanceVersion, physDevVersion,
151                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
152     } else {
153         VkPhysicalDeviceFeatures2 features;
154         memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
155         features.pNext = nullptr;
156         if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
157             features.features.geometryShader = true;
158         }
159         if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
160             features.features.dualSrcBlend = true;
161         }
162         if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
163             features.features.sampleRateShading = true;
164         }
165         GrVkExtensions extensions;
166         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
167         // need to know if this is enabled to know if we can transition to a present layout when
168         // flushing a surface.
169         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
170             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
171             extensions.init(backendContext.fGetProc, backendContext.fInstance,
172                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
173         }
174         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
175                                 features, instanceVersion, physDevVersion, extensions,
176                                 backendContext.fProtectedContext));
177     }
178 
179     if (!caps) {
180         return nullptr;
181     }
182 
183     sk_sp<GrVkMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
184     if (!memoryAllocator) {
185         // We were not given a memory allocator at creation
186         memoryAllocator = GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
187                                                        backendContext.fPhysicalDevice,
188                                                        backendContext.fDevice, physDevVersion,
189                                                        backendContext.fVkExtensions, interface,
190                                                        caps.get());
191     }
192     if (!memoryAllocator) {
193         SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
194         return nullptr;
195     }
196     const size_t maxBlockCount = SkGetVmaBlockCountMax(); // limit memory hols for vma cache
197     sk_sp<GrVkMemoryAllocator> memoryAllocatorCacheImage =
198         GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
199                                      backendContext.fPhysicalDevice,
200                                      backendContext.fDevice, physDevVersion,
201                                      backendContext.fVkExtensions, interface,
202                                      caps.get(), true, maxBlockCount);
203     if (!memoryAllocatorCacheImage) {
204         SkDEBUGFAIL("No supplied vulkan memory allocator for cache image and unable to create one internally.");
205         return nullptr;
206     }
207 
208      sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface,
209                                       instanceVersion, physDevVersion,
210                                       std::move(memoryAllocator),
211                                       std::move(memoryAllocatorCacheImage)));
212      if (backendContext.fProtectedContext == GrProtected::kYes &&
213          !vkGpu->vkCaps().supportsProtectedMemory()) {
214          return nullptr;
215      }
216      return std::move(vkGpu);
217 }
218 
219 ////////////////////////////////////////////////////////////////////////////////
220 
GrVkGpu(GrDirectContext * direct,const GrVkBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const GrVkInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<GrVkMemoryAllocator> memoryAllocator,sk_sp<GrVkMemoryAllocator> memoryAllocatorCacheImage)221 GrVkGpu::GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendContext,
222                  sk_sp<GrVkCaps> caps, sk_sp<const GrVkInterface> interface,
223                  uint32_t instanceVersion, uint32_t physicalDeviceVersion,
224                  sk_sp<GrVkMemoryAllocator> memoryAllocator,
225                  sk_sp<GrVkMemoryAllocator> memoryAllocatorCacheImage)
226         : INHERITED(direct)
227         , fInterface(std::move(interface))
228         , fMemoryAllocator(std::move(memoryAllocator))
229         , fMemoryAllocatorCacheImage(std::move(memoryAllocatorCacheImage))
230         , fVkCaps(std::move(caps))
231         , fPhysicalDevice(backendContext.fPhysicalDevice)
232         , fDevice(backendContext.fDevice)
233         , fQueue(backendContext.fQueue)
234         , fQueueIndex(backendContext.fGraphicsQueueIndex)
235         , fResourceProvider(this)
236         , fStagingBufferManager(this)
237         , fDisconnected(false)
238         , fProtectedContext(backendContext.fProtectedContext) {
239     SkASSERT(!backendContext.fOwnsInstanceAndDevice);
240     SkASSERT(fMemoryAllocator);
241     SkASSERT(fMemoryAllocatorCacheImage);
242 
243     this->initCapsAndCompiler(fVkCaps);
244 
245     VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
246     VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
247 
248     fResourceProvider.init();
249 
250     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
251     if (fMainCmdPool) {
252         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
253         SkASSERT(this->currentCommandBuffer());
254         this->currentCommandBuffer()->begin(this);
255     }
256 }
257 
destroyResources()258 void GrVkGpu::destroyResources() {
259     if (fMainCmdPool) {
260         fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
261         fMainCmdPool->close();
262     }
263 
264     // wait for all commands to finish
265     this->finishOutstandingGpuWork();
266 
267     if (fMainCmdPool) {
268         fMainCmdPool->unref();
269         fMainCmdPool = nullptr;
270     }
271 
272     for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
273         fSemaphoresToWaitOn[i]->unref();
274     }
275     fSemaphoresToWaitOn.reset();
276 
277     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
278         fSemaphoresToSignal[i]->unref();
279     }
280     fSemaphoresToSignal.reset();
281 
282     fStagingBufferManager.reset();
283 
284     fMSAALoadManager.destroyResources(this);
285 
286     // must call this just before we destroy the command pool and VkDevice
287     fResourceProvider.destroyResources();
288 }
289 
~GrVkGpu()290 GrVkGpu::~GrVkGpu() {
291     if (!fDisconnected) {
292         this->destroyResources();
293     }
294     // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
295     // clients can continue to delete backend textures even after a context has been abandoned.
296     fMemoryAllocator.reset();
297     fMemoryAllocatorCacheImage.reset();
298 }
299 
300 
disconnect(DisconnectType type)301 void GrVkGpu::disconnect(DisconnectType type) {
302     INHERITED::disconnect(type);
303     if (!fDisconnected) {
304         this->destroyResources();
305 
306         fSemaphoresToWaitOn.reset();
307         fSemaphoresToSignal.reset();
308         fMainCmdBuffer = nullptr;
309         fDisconnected = true;
310     }
311 }
312 
pipelineBuilder()313 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
314     return fResourceProvider.pipelineStateCache();
315 }
316 
refPipelineBuilder()317 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
318     return fResourceProvider.refPipelineStateCache();
319 }
320 
321 ///////////////////////////////////////////////////////////////////////////////
322 
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)323 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
324         GrRenderTarget* rt,
325         bool useMSAASurface,
326         GrAttachment* stencil,
327         GrSurfaceOrigin origin,
328         const SkIRect& bounds,
329         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
330         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
331         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
332         GrXferBarrierFlags renderPassXferBarriers) {
333     if (!fCachedOpsRenderPass) {
334         fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
335     }
336 
337     // For the given render target and requested render pass features we need to find a compatible
338     // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
339     // is compatible, but that is part of the framebuffer that we get here.
340     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
341 
342     SkASSERT(!useMSAASurface ||
343              rt->numSamples() > 1 ||
344              (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
345               vkRT->resolveAttachment() &&
346               vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
347 
348     // Covert the GrXferBarrierFlags into render pass self dependency flags
349     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
350     if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
351         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
352     }
353     if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
354         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
355     }
356 
357     // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
358     // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
359     // case we also need to update the color load/store ops since we don't want to ever load or
360     // store the msaa color attachment, but may need to for the resolve attachment.
361     GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
362     bool withResolve = false;
363     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
364     GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
365     if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
366         withResolve = true;
367         localColorInfo.fStoreOp = GrStoreOp::kDiscard;
368         if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
369             loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
370             localColorInfo.fLoadOp = GrLoadOp::kDiscard;
371         } else {
372             resolveInfo.fLoadOp = GrLoadOp::kDiscard;
373         }
374     }
375 
376     // Get the framebuffer to use for the render pass
377    sk_sp<GrVkFramebuffer> framebuffer;
378     if (vkRT->wrapsSecondaryCommandBuffer()) {
379         framebuffer = vkRT->externalFramebuffer();
380     } else {
381         auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
382                                        loadFromResolve);
383         framebuffer = sk_ref_sp(fb);
384     }
385     if (!framebuffer) {
386         return nullptr;
387     }
388 
389     if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
390                                    stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
391                                    sampledProxies)) {
392         return nullptr;
393     }
394     return fCachedOpsRenderPass.get();
395 }
396 
submitCommandBuffer(SyncQueue sync)397 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
398     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
399     if (!this->currentCommandBuffer()) {
400         return false;
401     }
402     SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
403 
404     if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
405         !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
406         // We may have added finished procs during the flush call. Since there is no actual work
407         // we are not submitting the command buffer and may never come back around to submit it.
408         // Thus we call all current finished procs manually, since the work has technically
409         // finished.
410         this->currentCommandBuffer()->callFinishedProcs();
411         SkASSERT(fDrawables.empty());
412         fResourceProvider.checkCommandBuffers();
413         return true;
414     }
415 
416     fMainCmdBuffer->end(this);
417     SkASSERT(fMainCmdPool);
418     fMainCmdPool->close();
419     bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
420                                                    fSemaphoresToWaitOn);
421 
422     if (didSubmit && sync == kForce_SyncQueue) {
423         fMainCmdBuffer->forceSync(this);
424     }
425 
426     // We must delete any drawables that had to wait until submit to destroy.
427     fDrawables.reset();
428 
429     // If we didn't submit the command buffer then we did not wait on any semaphores. We will
430     // continue to hold onto these semaphores and wait on them during the next command buffer
431     // submission.
432     if (didSubmit) {
433         for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
434             fSemaphoresToWaitOn[i]->unref();
435         }
436         fSemaphoresToWaitOn.reset();
437     }
438 
439     // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
440     // not try to recover the work that wasn't submitted and instead just drop it all. The client
441     // will be notified that the semaphores were not submit so that they will not try to wait on
442     // them.
443     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
444         fSemaphoresToSignal[i]->unref();
445     }
446     fSemaphoresToSignal.reset();
447 
448     // Release old command pool and create a new one
449     fMainCmdPool->unref();
450     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
451     if (fMainCmdPool) {
452         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
453         SkASSERT(fMainCmdBuffer);
454         fMainCmdBuffer->begin(this);
455     } else {
456         fMainCmdBuffer = nullptr;
457     }
458     // We must wait to call checkCommandBuffers until after we get a new command buffer. The
459     // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
460     // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
461     // one that was just submitted.
462     fResourceProvider.checkCommandBuffers();
463     return didSubmit;
464 }
465 
466 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)467 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
468                                            GrAccessPattern accessPattern, const void* data) {
469 #ifdef SK_DEBUG
470     switch (type) {
471         case GrGpuBufferType::kVertex:
472         case GrGpuBufferType::kIndex:
473         case GrGpuBufferType::kDrawIndirect:
474             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
475                      accessPattern == kStatic_GrAccessPattern);
476             break;
477         case GrGpuBufferType::kXferCpuToGpu:
478             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
479             break;
480         case GrGpuBufferType::kXferGpuToCpu:
481             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
482                      accessPattern == kStream_GrAccessPattern);
483             break;
484         case GrGpuBufferType::kUniform:
485             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
486             break;
487     }
488 #endif
489     sk_sp<GrGpuBuffer> buff = GrVkBuffer::Make(this, size, type, accessPattern);
490 
491     if (data && buff) {
492         buff->updateData(data, size);
493     }
494     return buff;
495 }
496 
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)497 bool GrVkGpu::onWritePixels(GrSurface* surface,
498                             SkIRect rect,
499                             GrColorType surfaceColorType,
500                             GrColorType srcColorType,
501                             const GrMipLevel texels[],
502                             int mipLevelCount,
503                             bool prepForTexSampling) {
504     GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
505     if (!texture) {
506         return false;
507     }
508     GrVkImage* texImage = texture->textureImage();
509 
510     // Make sure we have at least the base level
511     if (!mipLevelCount || !texels[0].fPixels) {
512         return false;
513     }
514 
515     SkASSERT(!GrVkFormatIsCompressed(texImage->imageFormat()));
516     bool success = false;
517     bool linearTiling = texImage->isLinearTiled();
518     if (linearTiling) {
519         if (mipLevelCount > 1) {
520             SkDebugf("Can't upload mipmap data to linear tiled texture");
521             return false;
522         }
523         if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
524             // Need to change the layout to general in order to perform a host write
525             texImage->setImageLayout(this,
526                                      VK_IMAGE_LAYOUT_GENERAL,
527                                      VK_ACCESS_HOST_WRITE_BIT,
528                                      VK_PIPELINE_STAGE_HOST_BIT,
529                                      false);
530             if (!this->submitCommandBuffer(kForce_SyncQueue)) {
531                 return false;
532             }
533         }
534         success = this->uploadTexDataLinear(texImage,
535                                             rect,
536                                             srcColorType,
537                                             texels[0].fPixels,
538                                             texels[0].fRowBytes);
539     } else {
540         SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
541         success = this->uploadTexDataOptimal(texImage,
542                                              rect,
543                                              srcColorType,
544                                              texels,
545                                              mipLevelCount);
546         if (1 == mipLevelCount) {
547             texture->markMipmapsDirty();
548         }
549     }
550 
551     if (prepForTexSampling) {
552         texImage->setImageLayout(this,
553                                       VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
554                                       VK_ACCESS_SHADER_READ_BIT,
555                                       VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
556                                       false);
557     }
558 
559     return success;
560 }
561 
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)562 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
563                                  SkIRect rect,
564                                  GrColorType surfaceColorType,
565                                  GrColorType bufferColorType,
566                                  sk_sp<GrGpuBuffer> transferBuffer,
567                                  size_t bufferOffset,
568                                  size_t rowBytes) {
569     if (!this->currentCommandBuffer()) {
570         return false;
571     }
572 
573     size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
574     if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
575         return false;
576     }
577 
578     // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
579     if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
580         return false;
581     }
582     GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
583     if (!tex) {
584         return false;
585     }
586     GrVkImage* vkImage = tex->textureImage();
587     VkFormat format = vkImage->imageFormat();
588 
589     // Can't transfer compressed data
590     SkASSERT(!GrVkFormatIsCompressed(format));
591 
592     if (!transferBuffer) {
593         return false;
594     }
595 
596     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
597         return false;
598     }
599     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
600 
601     SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
602 
603     // Set up copy region
604     VkBufferImageCopy region;
605     memset(&region, 0, sizeof(VkBufferImageCopy));
606     region.bufferOffset = bufferOffset;
607     region.bufferRowLength = (uint32_t)(rowBytes/bpp);
608     region.bufferImageHeight = 0;
609     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
610     region.imageOffset = { rect.left(), rect.top(), 0 };
611     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
612 
613     // Change layout of our target so it can be copied to
614     vkImage->setImageLayout(this,
615                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
616                             VK_ACCESS_TRANSFER_WRITE_BIT,
617                             VK_PIPELINE_STAGE_TRANSFER_BIT,
618                             false);
619 
620     const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
621 
622     // Copy the buffer to the image.
623     this->currentCommandBuffer()->copyBufferToImage(this,
624                                                     vkBuffer->vkBuffer(),
625                                                     vkImage,
626                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
627                                                     1,
628                                                     &region);
629     this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
630 
631     tex->markMipmapsDirty();
632     return true;
633 }
634 
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)635 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
636                                    SkIRect rect,
637                                    GrColorType surfaceColorType,
638                                    GrColorType bufferColorType,
639                                    sk_sp<GrGpuBuffer> transferBuffer,
640                                    size_t offset) {
641     if (!this->currentCommandBuffer()) {
642         return false;
643     }
644     SkASSERT(surface);
645     SkASSERT(transferBuffer);
646     if (fProtectedContext == GrProtected::kYes) {
647         return false;
648     }
649 
650     GrVkImage* srcImage;
651     if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
652         // Reading from render targets that wrap a secondary command buffer is not allowed since
653         // it would require us to know the VkImage, which we don't have, as well as need us to
654         // stop and start the VkRenderPass which we don't have access to.
655         if (rt->wrapsSecondaryCommandBuffer()) {
656             return false;
657         }
658         if (!rt->nonMSAAAttachment()) {
659             return false;
660         }
661         srcImage = rt->nonMSAAAttachment();
662     } else {
663         SkASSERT(surface->asTexture());
664         srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
665     }
666 
667     VkFormat format = srcImage->imageFormat();
668     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
669         return false;
670     }
671     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
672 
673     // Set up copy region
674     VkBufferImageCopy region;
675     memset(&region, 0, sizeof(VkBufferImageCopy));
676     region.bufferOffset = offset;
677     region.bufferRowLength = rect.width();
678     region.bufferImageHeight = 0;
679     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
680     region.imageOffset = {rect.left(), rect.top(), 0};
681     region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
682 
683     srcImage->setImageLayout(this,
684                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
685                              VK_ACCESS_TRANSFER_READ_BIT,
686                              VK_PIPELINE_STAGE_TRANSFER_BIT,
687                              false);
688 
689     this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
690                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
691                                                     transferBuffer, 1, &region);
692 
693     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
694     // Make sure the copy to buffer has finished.
695     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
696                                VK_ACCESS_HOST_READ_BIT,
697                                VK_PIPELINE_STAGE_TRANSFER_BIT,
698                                VK_PIPELINE_STAGE_HOST_BIT,
699                                false);
700     return true;
701 }
702 
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)703 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
704                            const SkIPoint& dstPoint) {
705     if (!this->currentCommandBuffer()) {
706         return;
707     }
708 
709     SkASSERT(dst);
710     SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
711 
712     VkImageResolve resolveInfo;
713     resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
714     resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
715     resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
716     resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
717     resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
718 
719     GrVkImage* dstImage;
720     GrRenderTarget* dstRT = dst->asRenderTarget();
721     GrTexture* dstTex = dst->asTexture();
722     if (dstTex) {
723         dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
724     } else {
725         SkASSERT(dst->asRenderTarget());
726         dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
727     }
728     SkASSERT(dstImage);
729 
730     dstImage->setImageLayout(this,
731                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
732                              VK_ACCESS_TRANSFER_WRITE_BIT,
733                              VK_PIPELINE_STAGE_TRANSFER_BIT,
734                              false);
735 
736     src->colorAttachment()->setImageLayout(this,
737                                            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
738                                            VK_ACCESS_TRANSFER_READ_BIT,
739                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
740                                            false);
741     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
742     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
743     this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
744                                                &resolveInfo);
745 }
746 
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)747 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
748     SkASSERT(target->numSamples() > 1);
749     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
750     SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
751 
752     if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
753         // We would have resolved the RT during the render pass;
754         return;
755     }
756 
757     this->resolveImage(target, rt, resolveRect,
758                        SkIPoint::Make(resolveRect.x(), resolveRect.y()));
759 }
760 
uploadTexDataLinear(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const void * data,size_t rowBytes)761 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
762                                   SkIRect rect,
763                                   GrColorType dataColorType,
764                                   const void* data,
765                                   size_t rowBytes) {
766     SkASSERT(data);
767     SkASSERT(texImage->isLinearTiled());
768 
769     SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
770 
771     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
772     size_t trimRowBytes = rect.width() * bpp;
773 
774     SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
775              VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
776     const VkImageSubresource subres = {
777         VK_IMAGE_ASPECT_COLOR_BIT,
778         0,  // mipLevel
779         0,  // arraySlice
780     };
781     VkSubresourceLayout layout;
782 
783     const GrVkInterface* interface = this->vkInterface();
784 
785     GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
786                                                     texImage->image(),
787                                                     &subres,
788                                                     &layout));
789 
790     const GrVkAlloc& alloc = texImage->alloc();
791     if (VK_NULL_HANDLE == alloc.fMemory) {
792         return false;
793     }
794     VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
795     VkDeviceSize size = rect.height()*layout.rowPitch;
796     SkASSERT(size + offset <= alloc.fSize);
797     void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
798     if (!mapPtr) {
799         return false;
800     }
801     mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
802 
803     SkRectMemcpy(mapPtr,
804                  static_cast<size_t>(layout.rowPitch),
805                  data,
806                  rowBytes,
807                  trimRowBytes,
808                  rect.height());
809 
810     GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
811     GrVkMemory::UnmapAlloc(this, alloc);
812 
813     return true;
814 }
815 
816 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
817 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,SkTArray<VkBufferImageCopy> * regions,SkTArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipmapped)818 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
819                                          SkTArray<VkBufferImageCopy>* regions,
820                                          SkTArray<size_t>* individualMipOffsets,
821                                          GrStagingBufferManager::Slice* slice,
822                                          SkImage::CompressionType compression,
823                                          VkFormat vkFormat,
824                                          SkISize dimensions,
825                                          GrMipmapped mipmapped) {
826     SkASSERT(compression != SkImage::CompressionType::kNone);
827     int numMipLevels = 1;
828     if (mipmapped == GrMipmapped::kYes) {
829         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
830     }
831 
832     regions->reserve_back(numMipLevels);
833     individualMipOffsets->reserve_back(numMipLevels);
834 
835     size_t bytesPerBlock = GrVkFormatBytesPerBlock(vkFormat);
836 
837     size_t bufferSize = SkCompressedDataSize(compression,
838                                              dimensions,
839                                              individualMipOffsets,
840                                              mipmapped == GrMipmapped::kYes);
841     SkASSERT(individualMipOffsets->count() == numMipLevels);
842 
843     // Get a staging buffer slice to hold our mip data.
844     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
845     size_t alignment = bytesPerBlock;
846     switch (alignment & 0b11) {
847         case 0:                     break;   // alignment is already a multiple of 4.
848         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
849         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
850     }
851     *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
852     if (!slice->fBuffer) {
853         return 0;
854     }
855 
856     for (int i = 0; i < numMipLevels; ++i) {
857         VkBufferImageCopy& region = regions->push_back();
858         memset(&region, 0, sizeof(VkBufferImageCopy));
859         region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
860         SkISize revisedDimensions = GrCompressedDimensions(compression, dimensions);
861         region.bufferRowLength = revisedDimensions.width();
862         region.bufferImageHeight = revisedDimensions.height();
863         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
864         region.imageOffset = {0, 0, 0};
865         region.imageExtent = {SkToU32(dimensions.width()),
866                               SkToU32(dimensions.height()), 1};
867 
868         dimensions = {std::max(1, dimensions.width() /2),
869                       std::max(1, dimensions.height()/2)};
870     }
871 
872     return bufferSize;
873 }
874 
fill_in_compressed_regions(SkTArray<VkBufferImageCopy> * regions,SkTArray<size_t> * individualMipOffsets,SkImage::CompressionType compression,SkISize dimensions,GrMipmapped mipmapped)875 static size_t fill_in_compressed_regions(SkTArray<VkBufferImageCopy>* regions,
876                                          SkTArray<size_t>* individualMipOffsets,
877                                          SkImage::CompressionType compression,
878                                          SkISize dimensions,
879                                          GrMipmapped mipmapped) {
880     SkASSERT(regions);
881     SkASSERT(individualMipOffsets);
882     SkASSERT(compression != SkImage::CompressionType::kNone);
883 
884     int mipmapLevelCount = 1;
885     if (mipmapped == GrMipmapped::kYes) {
886         mipmapLevelCount = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
887     }
888     regions->reserve_back(mipmapLevelCount);
889     individualMipOffsets->reserve_back(mipmapLevelCount);
890 
891     size_t bufferSize = SkCompressedDataSize(compression,
892                                              dimensions,
893                                              individualMipOffsets,
894                                              mipmapped == GrMipmapped::kYes);
895     SkASSERT(individualMipOffsets->count() == mipmapLevelCount);
896 
897     for (int i = 0; i < mipmapLevelCount; ++i) {
898         VkBufferImageCopy &region = regions->push_back();
899         region.bufferOffset = (*individualMipOffsets)[i];
900         if (compression == SkImage::CompressionType::kASTC_RGBA8_4x4 ||
901             compression == SkImage::CompressionType::kASTC_RGBA8_6x6 ||
902             compression == SkImage::CompressionType::kASTC_RGBA8_8x8) {
903             region.bufferOffset += ASTC_HEADER_SIZE;
904         }
905         SkISize compressedDimensions = GrCompressedDimensions(compression, dimensions);
906         region.bufferRowLength = compressedDimensions.width();
907         region.bufferImageHeight = compressedDimensions.height();
908         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
909         region.imageOffset = {0, 0, 0};
910         region.imageExtent.width = SkToU32(dimensions.width());
911         region.imageExtent.height = SkToU32(dimensions.height());
912         region.imageExtent.depth = 1;
913 
914         dimensions = {std::max(1, dimensions.width() / 2),
915                       std::max(1, dimensions.height() / 2)};
916     }
917 
918     return bufferSize;
919 }
920 
get_current_time()921 static int get_current_time() {
922     return static_cast<int>(
923         std::chrono::duration_cast<std::chrono::microseconds>(
924         std::chrono::steady_clock::now().time_since_epoch()).count());
925 }
926 
uploadTexDataOptimal(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)927 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
928                                    SkIRect rect,
929                                    GrColorType dataColorType,
930                                    const GrMipLevel texels[],
931                                    int mipLevelCount) {
932     if (!this->currentCommandBuffer()) {
933         return false;
934     }
935 
936     SkASSERT(!texImage->isLinearTiled());
937     // The assumption is either that we have no mipmaps, or that our rect is the entire texture
938     SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
939 
940     // We assume that if the texture has mip levels, we either upload to all the levels or just the
941     // first.
942     SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
943 
944     SkASSERT(!rect.isEmpty());
945 
946     SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
947 
948     SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
949     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
950 
951     // texels is const.
952     // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
953     // Because of this we need to make a non-const shallow copy of texels.
954     SkAutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
955     std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
956 
957     SkTArray<size_t> individualMipOffsets;
958     size_t combinedBufferSize;
959     if (mipLevelCount > 1) {
960         combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
961                                                               rect.size(),
962                                                               &individualMipOffsets,
963                                                               mipLevelCount);
964     } else {
965         SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
966         combinedBufferSize = rect.width()*rect.height()*bpp;
967         individualMipOffsets.push_back(0);
968     }
969     SkASSERT(combinedBufferSize);
970 
971     // Get a staging buffer slice to hold our mip data.
972     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
973     size_t alignment = bpp;
974     switch (alignment & 0b11) {
975         case 0:                     break;   // alignment is already a multiple of 4.
976         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
977         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
978     }
979     GrStagingBufferManager::Slice slice =
980             fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
981     if (!slice.fBuffer) {
982         return false;
983     }
984 
985     int uploadLeft = rect.left();
986     int uploadTop = rect.top();
987 
988     char* buffer = (char*) slice.fOffsetMapPtr;
989     SkTArray<VkBufferImageCopy> regions(mipLevelCount);
990 
991     int currentWidth = rect.width();
992     int currentHeight = rect.height();
993 #ifdef SKIA_OHOS
994     bool isTagEnabled = IsTagEnabled(HITRACE_TAG_GRAPHIC_AGP);
995 #endif
996     for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
997         if (texelsShallowCopy[currentMipLevel].fPixels) {
998             const size_t trimRowBytes = currentWidth * bpp;
999             const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1000 
1001             // copy data into the buffer, skipping the trailing bytes
1002             char* dst = buffer + individualMipOffsets[currentMipLevel];
1003             const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1004 #ifdef SKIA_OHOS
1005             int memStartTimestamp = 0;
1006             int memEndTimestamp = 0;
1007             if (UNLIKELY(isTagEnabled)) {
1008                 memStartTimestamp = get_current_time();
1009             }
1010 #endif
1011             SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
1012 #ifdef SKIA_OHOS
1013             if (UNLIKELY(isTagEnabled)) {
1014                 memEndTimestamp = get_current_time();
1015                 int duration = memEndTimestamp - memStartTimestamp;
1016                 if (duration > TRACE_LIMIT_TIME) {
1017                     HITRACE_OHOS_NAME_FMT_ALWAYS("uploadTexDataOptimal SkRectMemcpy: %zu Time: %d µs bpp = %zu "
1018                         "width: %d height: %d",
1019                         trimRowBytes * currentHeight, duration, bpp, currentWidth, currentHeight);
1020                 }
1021             }
1022 #endif
1023 
1024             VkBufferImageCopy& region = regions.push_back();
1025             memset(&region, 0, sizeof(VkBufferImageCopy));
1026             region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1027             region.bufferRowLength = currentWidth;
1028             region.bufferImageHeight = currentHeight;
1029             region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1030             region.imageOffset = {uploadLeft, uploadTop, 0};
1031             region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1032         }
1033 
1034         currentWidth  = std::max(1,  currentWidth/2);
1035         currentHeight = std::max(1, currentHeight/2);
1036     }
1037 
1038     // Change layout of our target so it can be copied to
1039     texImage->setImageLayout(this,
1040                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1041                              VK_ACCESS_TRANSFER_WRITE_BIT,
1042                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1043                              false);
1044 
1045     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1046     // because we don't need the command buffer to ref the buffer here. The reason being is that
1047     // the buffer is coming from the staging manager and the staging manager will make sure the
1048     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1049     // upload in the frame.
1050     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1051 #ifdef SKIA_OHOS
1052     int copyStartTimestamp = 0;
1053     int copyEndTimestamp = 0;
1054     if (UNLIKELY(isTagEnabled)) {
1055         copyStartTimestamp = get_current_time();
1056     }
1057 #endif
1058     this->currentCommandBuffer()->copyBufferToImage(this,
1059                                                     vkBuffer->vkBuffer(),
1060                                                     texImage,
1061                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1062                                                     regions.count(),
1063                                                     regions.begin());
1064 #ifdef SKIA_OHOS
1065     if (UNLIKELY(isTagEnabled)) {
1066         copyEndTimestamp = get_current_time();
1067         int duration = copyEndTimestamp - copyStartTimestamp;
1068         if (duration > TRACE_LIMIT_TIME) {
1069             HITRACE_OHOS_NAME_FMT_ALWAYS("uploadTexDataOptimal copyBufferToImage Time: %d µs width: %d height: %d",
1070                 duration, rect.width(), rect.height());
1071         }
1072     }
1073 #endif
1074     return true;
1075 }
1076 
1077 // It's probably possible to roll this into uploadTexDataOptimal,
1078 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage * uploadTexture,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipMapped,const void * data,size_t dataSize)1079 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1080                                       SkImage::CompressionType compression, VkFormat vkFormat,
1081                                       SkISize dimensions, GrMipmapped mipMapped,
1082                                       const void* data, size_t dataSize) {
1083     if (!this->currentCommandBuffer()) {
1084         return false;
1085     }
1086     SkASSERT(data);
1087     SkASSERT(!uploadTexture->isLinearTiled());
1088     // For now the assumption is that our rect is the entire texture.
1089     // Compressed textures are read-only so this should be a reasonable assumption.
1090     SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1091              dimensions.fHeight == uploadTexture->height());
1092 
1093     if (dimensions.fWidth == 0 || dimensions.fHeight  == 0) {
1094         return false;
1095     }
1096 
1097     SkASSERT(uploadTexture->imageFormat() == vkFormat);
1098     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1099 
1100 
1101     GrStagingBufferManager::Slice slice;
1102     SkTArray<VkBufferImageCopy> regions;
1103     SkTArray<size_t> individualMipOffsets;
1104     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1105                                                                         &regions,
1106                                                                         &individualMipOffsets,
1107                                                                         &slice,
1108                                                                         compression,
1109                                                                         vkFormat,
1110                                                                         dimensions,
1111                                                                         mipMapped);
1112     if (!slice.fBuffer) {
1113         return false;
1114     }
1115     SkASSERT(dataSize == combinedBufferSize);
1116 
1117     {
1118         char* buffer = (char*)slice.fOffsetMapPtr;
1119         memcpy(buffer, data, dataSize);
1120     }
1121 
1122     // Change layout of our target so it can be copied to
1123     uploadTexture->setImageLayout(this,
1124                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1125                                   VK_ACCESS_TRANSFER_WRITE_BIT,
1126                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
1127                                   false);
1128 
1129     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1130     // because we don't need the command buffer to ref the buffer here. The reason being is that
1131     // the buffer is coming from the staging manager and the staging manager will make sure the
1132     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1133     // upload in the frame.
1134     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1135     this->currentCommandBuffer()->copyBufferToImage(this,
1136                                                     vkBuffer->vkBuffer(),
1137                                                     uploadTexture,
1138                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1139                                                     regions.count(),
1140                                                     regions.begin());
1141 
1142     return true;
1143 }
1144 
uploadTexDataCompressed(GrVkImage * uploadTexture,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipMapped,OH_NativeBuffer * nativeBuffer,size_t bufferSize)1145 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1146                                       SkImage::CompressionType compression, VkFormat vkFormat,
1147                                       SkISize dimensions, GrMipmapped mipMapped,
1148                                       OH_NativeBuffer* nativeBuffer, size_t bufferSize) {
1149     if (!this->currentCommandBuffer()) {
1150         return false;
1151     }
1152     SkASSERT(uploadTexture);
1153     SkASSERT(nativeBuffer);
1154     SkASSERT(!uploadTexture->isLinearTiled());
1155 
1156     if (dimensions.width() == 0 || dimensions.height()  == 0) {
1157         return false;
1158     }
1159     SkASSERT(dimensions.width() == uploadTexture->width() && dimensions.height() == uploadTexture->height());
1160 
1161     SkASSERT(uploadTexture->imageFormat() == vkFormat);
1162     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1163 
1164     SkTArray<VkBufferImageCopy> regions;
1165     SkTArray<size_t> individualMipOffsets;
1166     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&regions, &individualMipOffsets,
1167                                                                         compression, dimensions, mipMapped);
1168     SkASSERT(bufferSize == combinedBufferSize);
1169 
1170     // Import external memory.
1171     sk_sp<GrVkBuffer> vkBuffer = GrVkBuffer::MakeFromOHNativeBuffer(this, nativeBuffer, bufferSize,
1172                                                                     GrGpuBufferType::kXferCpuToGpu,
1173                                                                     kDynamic_GrAccessPattern);
1174 
1175     if (vkBuffer == nullptr) {
1176         SkDebugf("Can't make vkbuffer from native buffer");
1177         return false;
1178     }
1179 
1180     // Change image layout so it can be copied to.
1181     uploadTexture->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1182                                   VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1183 
1184     // Copy the buffer to the image.
1185     this->currentCommandBuffer()->copyBufferToImage(this, vkBuffer->vkBuffer(), uploadTexture,
1186                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1187                                                     regions.count(), regions.begin());
1188     this->takeOwnershipOfBuffer(std::move(vkBuffer));
1189 
1190     return true;
1191 }
1192 
1193 ////////////////////////////////////////////////////////////////////////////////
1194 // TODO: make this take a GrMipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)1195 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1196                                           const GrBackendFormat& format,
1197                                           GrRenderable renderable,
1198                                           int renderTargetSampleCnt,
1199                                           SkBudgeted budgeted,
1200                                           GrProtected isProtected,
1201                                           int mipLevelCount,
1202                                           uint32_t levelClearMask) {
1203     VkFormat pixelFormat;
1204     SkAssertResult(format.asVkFormat(&pixelFormat));
1205     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1206     SkASSERT(mipLevelCount > 0);
1207 
1208     HITRACE_OHOS_NAME_FMT_ALWAYS("onCreateTexture width = %d, height = %d",
1209         dimensions.width(), dimensions.height());
1210     GrMipmapStatus mipmapStatus =
1211             mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1212 
1213     sk_sp<GrVkTexture> tex;
1214     if (renderable == GrRenderable::kYes) {
1215         tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1216                 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1217                 mipmapStatus, isProtected);
1218     } else {
1219         tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1220                                           mipLevelCount, isProtected, mipmapStatus);
1221     }
1222 
1223     if (!tex) {
1224         return nullptr;
1225     }
1226 
1227     if (levelClearMask) {
1228         if (!this->currentCommandBuffer()) {
1229             return nullptr;
1230         }
1231         SkSTArray<1, VkImageSubresourceRange> ranges;
1232         bool inRange = false;
1233         GrVkImage* texImage = tex->textureImage();
1234         for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1235             if (levelClearMask & (1U << i)) {
1236                 if (inRange) {
1237                     ranges.back().levelCount++;
1238                 } else {
1239                     auto& range = ranges.push_back();
1240                     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1241                     range.baseArrayLayer = 0;
1242                     range.baseMipLevel = i;
1243                     range.layerCount = 1;
1244                     range.levelCount = 1;
1245                     inRange = true;
1246                 }
1247             } else if (inRange) {
1248                 inRange = false;
1249             }
1250         }
1251         SkASSERT(!ranges.empty());
1252         static constexpr VkClearColorValue kZeroClearColor = {};
1253         texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1254                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1255         this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1256                                                       ranges.count(), ranges.begin());
1257     }
1258     return std::move(tex);
1259 }
1260 
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)1261 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1262                                                     const GrBackendFormat& format,
1263                                                     SkBudgeted budgeted,
1264                                                     GrMipmapped mipMapped,
1265                                                     GrProtected isProtected,
1266                                                     const void* data, size_t dataSize) {
1267     VkFormat pixelFormat;
1268     SkAssertResult(format.asVkFormat(&pixelFormat));
1269     SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1270 
1271     int numMipLevels = 1;
1272     if (mipMapped == GrMipmapped::kYes) {
1273         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1274     }
1275 
1276     GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
1277                                                                    : GrMipmapStatus::kNotAllocated;
1278 
1279     auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1280                                            numMipLevels, isProtected, mipmapStatus);
1281     if (!tex) {
1282         return nullptr;
1283     }
1284 
1285     SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1286     if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1287                                        dimensions, mipMapped, data, dataSize)) {
1288         return nullptr;
1289     }
1290 
1291     return std::move(tex);
1292 }
1293 
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,OH_NativeBuffer * nativeBuffer,size_t bufferSize)1294 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1295                                                     const GrBackendFormat& format,
1296                                                     SkBudgeted budgeted,
1297                                                     GrMipmapped mipMapped,
1298                                                     GrProtected isProtected,
1299                                                     OH_NativeBuffer* nativeBuffer,
1300                                                     size_t bufferSize) {
1301     VkFormat pixelFormat;
1302     SkAssertResult(format.asVkFormat(&pixelFormat));
1303     SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1304 
1305     int mipmapLevelCount = 1;
1306     GrMipmapStatus mipmapStatus = GrMipmapStatus::kNotAllocated;
1307     if (mipMapped == GrMipmapped::kYes) {
1308         mipmapLevelCount = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1309         mipmapStatus = GrMipmapStatus::kValid;
1310     }
1311 
1312     sk_sp<GrVkTexture> texture = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1313                                                              mipmapLevelCount, isProtected, mipmapStatus);
1314     if (!texture) {
1315         return nullptr;
1316     }
1317 
1318     SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1319     if (!this->uploadTexDataCompressed(texture->textureImage(), compression, pixelFormat,
1320                                        dimensions, mipMapped, nativeBuffer, bufferSize)) {
1321         return nullptr;
1322     }
1323 
1324     return std::move(texture);
1325 }
1326 
1327 ////////////////////////////////////////////////////////////////////////////////
1328 
copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,sk_sp<GrGpuBuffer> dstBuffer,VkDeviceSize srcOffset,VkDeviceSize dstOffset,VkDeviceSize size)1329 void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
1330                          sk_sp<GrGpuBuffer> dstBuffer,
1331                          VkDeviceSize srcOffset,
1332                          VkDeviceSize dstOffset,
1333                          VkDeviceSize size) {
1334     if (!this->currentCommandBuffer()) {
1335         return;
1336     }
1337     VkBufferCopy copyRegion;
1338     copyRegion.srcOffset = srcOffset;
1339     copyRegion.dstOffset = dstOffset;
1340     copyRegion.size = size;
1341     this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), std::move(dstBuffer), 1,
1342                                              &copyRegion);
1343 }
1344 
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1345 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1346                            VkDeviceSize offset, VkDeviceSize size) {
1347     if (!this->currentCommandBuffer()) {
1348         return false;
1349     }
1350     // Update the buffer
1351     this->currentCommandBuffer()->updateBuffer(this, std::move(buffer), offset, size, src);
1352 
1353     return true;
1354 }
1355 
1356 ////////////////////////////////////////////////////////////////////////////////
1357 
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1358 static bool check_image_info(const GrVkCaps& caps,
1359                              const GrVkImageInfo& info,
1360                              bool needsAllocation,
1361                              uint32_t graphicsQueueIndex) {
1362     if (VK_NULL_HANDLE == info.fImage) {
1363         return false;
1364     }
1365 
1366     if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1367         return false;
1368     }
1369 
1370     if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1371         return false;
1372     }
1373 
1374     if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1375         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1376         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1377         if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1378             if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1379                 return false;
1380             }
1381         } else {
1382             return false;
1383         }
1384     }
1385 
1386     if (info.fYcbcrConversionInfo.isValid()) {
1387         if (!caps.supportsYcbcrConversion()) {
1388             return false;
1389         }
1390         if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1391             return true;
1392         }
1393     }
1394 
1395     // We currently require everything to be made with transfer bits set
1396     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1397         !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1398         return false;
1399     }
1400 
1401     return true;
1402 }
1403 
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1404 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1405     // We don't support directly importing multisampled textures for sampling from shaders.
1406     if (info.fSampleCount != 1) {
1407         return false;
1408     }
1409 
1410     if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1411         return true;
1412     }
1413     if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1414         if (!caps.isVkFormatTexturable(info.fFormat)) {
1415             return false;
1416         }
1417     } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1418         if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1419             return false;
1420         }
1421     } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1422         if (!caps.supportsDRMFormatModifiers()) {
1423             return false;
1424         }
1425         // To be technically correct we should query the vulkan support for VkFormat and
1426         // drmFormatModifier pairs to confirm the required feature support is there. However, we
1427         // currently don't have our caps and format tables set up to do this effeciently. So
1428         // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1429         // up using valid features (checked below). In practice this should all be safe because
1430         // currently we are setting all drm format modifier textures to have a
1431         // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1432         // a shader. The video decoder isn't going to give us VkImages that don't support being
1433         // sampled.
1434     } else {
1435         SkUNREACHABLE;
1436     }
1437 
1438     // We currently require all textures to be made with sample support
1439     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1440         return false;
1441     }
1442 
1443     return true;
1444 }
1445 
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1446 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1447     if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1448         return false;
1449     }
1450     if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1451         return false;
1452     }
1453     return true;
1454 }
1455 
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1456 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1457                                                GrWrapOwnership ownership,
1458                                                GrWrapCacheable cacheable,
1459                                                GrIOType ioType) {
1460     GrVkImageInfo imageInfo;
1461     if (!backendTex.getVkImageInfo(&imageInfo)) {
1462         return nullptr;
1463     }
1464 
1465     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1466                           this->queueIndex())) {
1467         return nullptr;
1468     }
1469 
1470     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1471         return nullptr;
1472     }
1473 
1474     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1475         return nullptr;
1476     }
1477 
1478     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1479     SkASSERT(mutableState);
1480     return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1481                                            ioType, imageInfo, std::move(mutableState));
1482 }
1483 
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1484 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1485                                                          GrWrapOwnership ownership,
1486                                                          GrWrapCacheable cacheable) {
1487     return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1488 }
1489 
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1490 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1491                                                          int sampleCnt,
1492                                                          GrWrapOwnership ownership,
1493                                                          GrWrapCacheable cacheable) {
1494     GrVkImageInfo imageInfo;
1495     if (!backendTex.getVkImageInfo(&imageInfo)) {
1496         return nullptr;
1497     }
1498 
1499     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1500                           this->queueIndex())) {
1501         return nullptr;
1502     }
1503 
1504     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1505         return nullptr;
1506     }
1507     // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1508     // the wrapped VkImage.
1509     bool resolveOnly = sampleCnt > 1;
1510     if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1511         return nullptr;
1512     }
1513 
1514     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1515         return nullptr;
1516     }
1517 
1518     sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1519 
1520     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1521     SkASSERT(mutableState);
1522 
1523     return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1524                                                                    sampleCnt, ownership, cacheable,
1525                                                                    imageInfo,
1526                                                                    std::move(mutableState));
1527 }
1528 
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1529 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1530     GrVkImageInfo info;
1531     if (!backendRT.getVkImageInfo(&info)) {
1532         return nullptr;
1533     }
1534 
1535     if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1536         return nullptr;
1537     }
1538 
1539     // We will always render directly to this VkImage.
1540     static bool kResolveOnly = false;
1541     if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1542         return nullptr;
1543     }
1544 
1545     if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1546         return nullptr;
1547     }
1548 
1549     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendRT.getMutableState();
1550     SkASSERT(mutableState);
1551 
1552     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1553             this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1554 
1555     // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1556     SkASSERT(!backendRT.stencilBits());
1557     if (tgt) {
1558         SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1559     }
1560 
1561     return std::move(tgt);
1562 }
1563 
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1564 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1565         const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1566     int maxSize = this->caps()->maxTextureSize();
1567     if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1568         return nullptr;
1569     }
1570 
1571     GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1572     if (!backendFormat.isValid()) {
1573         return nullptr;
1574     }
1575     int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1576     if (!sampleCnt) {
1577         return nullptr;
1578     }
1579 
1580     return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1581 }
1582 
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkImage * src,const SkIRect & srcRect)1583 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1584                                   const GrVkRenderPass& renderPass,
1585                                   GrAttachment* dst,
1586                                   GrVkImage* src,
1587                                   const SkIRect& srcRect) {
1588     return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1589 }
1590 
onRegenerateMipMapLevels(GrTexture * tex)1591 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1592     if (!this->currentCommandBuffer()) {
1593         return false;
1594     }
1595     auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1596     // don't do anything for linearly tiled textures (can't have mipmaps)
1597     if (vkTex->isLinearTiled()) {
1598         SkDebugf("Trying to create mipmap for linear tiled texture");
1599         return false;
1600     }
1601     SkASSERT(tex->textureType() == GrTextureType::k2D);
1602 
1603     // determine if we can blit to and from this format
1604     const GrVkCaps& caps = this->vkCaps();
1605     if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1606         !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1607         !caps.mipmapSupport()) {
1608         return false;
1609     }
1610 
1611     int width = tex->width();
1612     int height = tex->height();
1613     VkImageBlit blitRegion;
1614     memset(&blitRegion, 0, sizeof(VkImageBlit));
1615 
1616     // SkMipmap doesn't include the base level in the level count so we have to add 1
1617     uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1618     SkASSERT(levelCount == vkTex->mipLevels());
1619 
1620     // change layout of the layers so we can write to them.
1621     vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1622                           VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1623 
1624     // setup memory barrier
1625     SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1626     VkImageMemoryBarrier imageMemoryBarrier = {
1627             VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,  // sType
1628             nullptr,                                 // pNext
1629             VK_ACCESS_TRANSFER_WRITE_BIT,            // srcAccessMask
1630             VK_ACCESS_TRANSFER_READ_BIT,             // dstAccessMask
1631             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,    // oldLayout
1632             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,    // newLayout
1633             VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
1634             VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
1635             vkTex->image(),                          // image
1636             {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}  // subresourceRange
1637     };
1638 
1639     // Blit the miplevels
1640     uint32_t mipLevel = 1;
1641     while (mipLevel < levelCount) {
1642         int prevWidth = width;
1643         int prevHeight = height;
1644         width = std::max(1, width / 2);
1645         height = std::max(1, height / 2);
1646 
1647         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1648         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1649                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1650 
1651         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1652         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1653         blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1654         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1655         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1656         blitRegion.dstOffsets[1] = { width, height, 1 };
1657         this->currentCommandBuffer()->blitImage(this,
1658                                                 vkTex->resource(),
1659                                                 vkTex->image(),
1660                                                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1661                                                 vkTex->resource(),
1662                                                 vkTex->image(),
1663                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1664                                                 1,
1665                                                 &blitRegion,
1666                                                 VK_FILTER_LINEAR);
1667         ++mipLevel;
1668     }
1669     if (levelCount > 1) {
1670         // This barrier logically is not needed, but it changes the final level to the same layout
1671         // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1672         // layouts and future layout changes easier. The alternative here would be to track layout
1673         // and memory accesses per layer which doesn't seem work it.
1674         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1675         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1676                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1677         vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1678     }
1679     return true;
1680 }
1681 
1682 ////////////////////////////////////////////////////////////////////////////////
1683 
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1684 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1685                                                    SkISize dimensions, int numStencilSamples) {
1686     VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1687 
1688     fStats.incStencilAttachmentCreates();
1689     return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1690 }
1691 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless memoryless)1692 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1693                                                 const GrBackendFormat& format,
1694                                                 int numSamples,
1695                                                 GrProtected isProtected,
1696                                                 GrMemoryless memoryless) {
1697     VkFormat pixelFormat;
1698     SkAssertResult(format.asVkFormat(&pixelFormat));
1699     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1700     SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1701 
1702     fStats.incMSAAAttachmentCreates();
1703     return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1704 }
1705 
1706 ////////////////////////////////////////////////////////////////////////////////
1707 
copy_src_data(char * mapPtr,VkFormat vkFormat,const SkTArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1708 bool copy_src_data(char* mapPtr,
1709                    VkFormat vkFormat,
1710                    const SkTArray<size_t>& individualMipOffsets,
1711                    const GrPixmap srcData[],
1712                    int numMipLevels) {
1713     SkASSERT(srcData && numMipLevels);
1714     SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1715     SkASSERT(individualMipOffsets.count() == numMipLevels);
1716     SkASSERT(mapPtr);
1717 
1718     size_t bytesPerPixel = GrVkFormatBytesPerBlock(vkFormat);
1719 
1720     for (int level = 0; level < numMipLevels; ++level) {
1721         const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1722 
1723         SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1724                      srcData[level].addr(), srcData[level].rowBytes(),
1725                      trimRB, srcData[level].height());
1726     }
1727     return true;
1728 }
1729 
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,GrMipmapped mipMapped,GrVkImageInfo * info,GrProtected isProtected)1730 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1731                                              SkISize dimensions,
1732                                              int sampleCnt,
1733                                              GrTexturable texturable,
1734                                              GrRenderable renderable,
1735                                              GrMipmapped mipMapped,
1736                                              GrVkImageInfo* info,
1737                                              GrProtected isProtected) {
1738     SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1739 
1740     if (fProtectedContext != isProtected) {
1741         return false;
1742     }
1743 
1744     if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1745         return false;
1746     }
1747 
1748     // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1749     if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1750         return false;
1751     }
1752 
1753     if (renderable == GrRenderable::kYes) {
1754         sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1755         if (!sampleCnt) {
1756             return false;
1757         }
1758     }
1759 
1760 
1761     int numMipLevels = 1;
1762     if (mipMapped == GrMipmapped::kYes) {
1763         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1764     }
1765 
1766     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1767                                    VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1768     if (texturable == GrTexturable::kYes) {
1769         usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1770     }
1771     if (renderable == GrRenderable::kYes) {
1772         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1773         // We always make our render targets support being used as input attachments
1774         usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1775     }
1776 
1777     GrVkImage::ImageDesc imageDesc;
1778     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1779     imageDesc.fFormat = vkFormat;
1780     imageDesc.fWidth = dimensions.width();
1781     imageDesc.fHeight = dimensions.height();
1782     imageDesc.fLevels = numMipLevels;
1783     imageDesc.fSamples = sampleCnt;
1784     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1785     imageDesc.fUsageFlags = usageFlags;
1786     imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1787     imageDesc.fIsProtected = fProtectedContext;
1788 
1789     if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1790         SkDebugf("Failed to init image info\n");
1791         return false;
1792     }
1793 
1794     return true;
1795 }
1796 
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)1797 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1798                                     sk_sp<GrRefCntedCallback> finishedCallback,
1799                                     std::array<float, 4> color) {
1800     GrVkImageInfo info;
1801     SkAssertResult(backendTexture.getVkImageInfo(&info));
1802 
1803     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1804     SkASSERT(mutableState);
1805     sk_sp<GrVkTexture> texture =
1806                 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1807                                                 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1808                                                 kRW_GrIOType, info, std::move(mutableState));
1809     if (!texture) {
1810         return false;
1811     }
1812     GrVkImage* texImage = texture->textureImage();
1813 
1814     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1815     if (!cmdBuffer) {
1816         return false;
1817     }
1818 
1819     texImage->setImageLayout(this,
1820                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1821                              VK_ACCESS_TRANSFER_WRITE_BIT,
1822                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1823                              false);
1824 
1825     // CmdClearColorImage doesn't work for compressed formats
1826     SkASSERT(!GrVkFormatIsCompressed(info.fFormat));
1827 
1828     VkClearColorValue vkColor;
1829     // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1830     // uint32 union members in those cases.
1831     vkColor.float32[0] = color[0];
1832     vkColor.float32[1] = color[1];
1833     vkColor.float32[2] = color[2];
1834     vkColor.float32[3] = color[3];
1835     VkImageSubresourceRange range;
1836     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1837     range.baseArrayLayer = 0;
1838     range.baseMipLevel = 0;
1839     range.layerCount = 1;
1840     range.levelCount = info.fLevelCount;
1841     cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1842 
1843     // Change image layout to shader read since if we use this texture as a borrowed
1844     // texture within Ganesh we require that its layout be set to that
1845     texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1846                                   VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1847                                   false);
1848 
1849     if (finishedCallback) {
1850         this->addFinishedCallback(std::move(finishedCallback));
1851     }
1852     return true;
1853 }
1854 
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)1855 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1856                                                  const GrBackendFormat& format,
1857                                                  GrRenderable renderable,
1858                                                  GrMipmapped mipMapped,
1859                                                  GrProtected isProtected) {
1860     const GrVkCaps& caps = this->vkCaps();
1861 
1862     if (fProtectedContext != isProtected) {
1863         return {};
1864     }
1865 
1866     VkFormat vkFormat;
1867     if (!format.asVkFormat(&vkFormat)) {
1868         return {};
1869     }
1870 
1871     // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1872     if (!caps.isVkFormatTexturable(vkFormat)) {
1873         return {};
1874     }
1875 
1876     if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
1877         return {};
1878     }
1879 
1880     GrVkImageInfo info;
1881     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1882                                               renderable, mipMapped, &info, isProtected)) {
1883         return {};
1884     }
1885 
1886     return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1887 }
1888 
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)1889 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
1890         SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1891         GrProtected isProtected) {
1892     return this->onCreateBackendTexture(dimensions, format, GrRenderable::kNo, mipMapped,
1893                                         isProtected);
1894 }
1895 
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)1896 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1897                                                sk_sp<GrRefCntedCallback> finishedCallback,
1898                                                const void* data,
1899                                                size_t size) {
1900     GrVkImageInfo info;
1901     SkAssertResult(backendTexture.getVkImageInfo(&info));
1902 
1903     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1904     SkASSERT(mutableState);
1905     sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1906                                                                  backendTexture.dimensions(),
1907                                                                  kBorrow_GrWrapOwnership,
1908                                                                  GrWrapCacheable::kNo,
1909                                                                  kRW_GrIOType,
1910                                                                  info,
1911                                                                  std::move(mutableState));
1912     if (!texture) {
1913         return false;
1914     }
1915 
1916     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1917     if (!cmdBuffer) {
1918         return false;
1919     }
1920     GrVkImage* image = texture->textureImage();
1921     image->setImageLayout(this,
1922                           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1923                           VK_ACCESS_TRANSFER_WRITE_BIT,
1924                           VK_PIPELINE_STAGE_TRANSFER_BIT,
1925                           false);
1926 
1927     SkImage::CompressionType compression =
1928             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1929 
1930     SkTArray<VkBufferImageCopy> regions;
1931     SkTArray<size_t> individualMipOffsets;
1932     GrStagingBufferManager::Slice slice;
1933 
1934     fill_in_compressed_regions(&fStagingBufferManager,
1935                                &regions,
1936                                &individualMipOffsets,
1937                                &slice,
1938                                compression,
1939                                info.fFormat,
1940                                backendTexture.dimensions(),
1941                                backendTexture.fMipmapped);
1942 
1943     if (!slice.fBuffer) {
1944         return false;
1945     }
1946 
1947     memcpy(slice.fOffsetMapPtr, data, size);
1948 
1949     cmdBuffer->addGrSurface(texture);
1950     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1951     // because we don't need the command buffer to ref the buffer here. The reason being is that
1952     // the buffer is coming from the staging manager and the staging manager will make sure the
1953     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1954     // every upload in the frame.
1955     cmdBuffer->copyBufferToImage(this,
1956                                  static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1957                                  image,
1958                                  image->currentLayout(),
1959                                  regions.count(),
1960                                  regions.begin());
1961 
1962     // Change image layout to shader read since if we use this texture as a borrowed
1963     // texture within Ganesh we require that its layout be set to that
1964     image->setImageLayout(this,
1965                           VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1966                           VK_ACCESS_SHADER_READ_BIT,
1967                           VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1968                           false);
1969 
1970     if (finishedCallback) {
1971         this->addFinishedCallback(std::move(finishedCallback));
1972     }
1973     return true;
1974 }
1975 
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,const GrVkSharedImageInfo & newInfo)1976 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1977                                              const GrVkSharedImageInfo& newInfo) {
1978     // Even though internally we use this helper for getting src access flags and stages they
1979     // can also be used for general dst flags since we don't know exactly what the client
1980     // plans on using the image for.
1981     VkImageLayout newLayout = newInfo.getImageLayout();
1982     if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1983         newLayout = image->currentLayout();
1984     }
1985     VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1986     VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1987 
1988     uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1989     uint32_t newQueueFamilyIndex = newInfo.getQueueFamilyIndex();
1990     auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1991         return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1992                queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1993     };
1994     if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1995         // It is illegal to have both the new and old queue be special queue families (i.e. external
1996         // or foreign).
1997         return;
1998     }
1999 
2000     image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
2001                                        newQueueFamilyIndex);
2002 }
2003 
setBackendSurfaceState(GrVkImageInfo info,sk_sp<GrBackendSurfaceMutableStateImpl> currentState,SkISize dimensions,const GrVkSharedImageInfo & newInfo,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)2004 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
2005                                      sk_sp<GrBackendSurfaceMutableStateImpl> currentState,
2006                                      SkISize dimensions,
2007                                      const GrVkSharedImageInfo& newInfo,
2008                                      GrBackendSurfaceMutableState* previousState,
2009                                      sk_sp<GrRefCntedCallback> finishedCallback) {
2010     sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
2011                                                       dimensions,
2012                                                       info,
2013                                                       std::move(currentState),
2014                                                       GrVkImage::UsageFlags::kColorAttachment,
2015                                                       kBorrow_GrWrapOwnership,
2016                                                       GrWrapCacheable::kNo,
2017                                                       /*forSecondaryCB=*/false);
2018     SkASSERT(texture);
2019     if (!texture) {
2020         return false;
2021     }
2022     if (previousState) {
2023         previousState->setVulkanState(texture->currentLayout(),
2024                                       texture->currentQueueFamilyIndex());
2025     }
2026     set_layout_and_queue_from_mutable_state(this, texture.get(), newInfo);
2027     if (finishedCallback) {
2028         this->addFinishedCallback(std::move(finishedCallback));
2029     }
2030     return true;
2031 }
2032 
setBackendTextureState(const GrBackendTexture & backendTeture,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)2033 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
2034                                      const GrBackendSurfaceMutableState& newState,
2035                                      GrBackendSurfaceMutableState* previousState,
2036                                      sk_sp<GrRefCntedCallback> finishedCallback) {
2037     GrVkImageInfo info;
2038     SkAssertResult(backendTeture.getVkImageInfo(&info));
2039     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendTeture.getMutableState();
2040     SkASSERT(currentState);
2041     SkASSERT(newState.isValid() && newState.fBackend == GrBackend::kVulkan);
2042     return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
2043                                         newState.fVkState, previousState,
2044                                         std::move(finishedCallback));
2045 }
2046 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)2047 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
2048                                           const GrBackendSurfaceMutableState& newState,
2049                                           GrBackendSurfaceMutableState* previousState,
2050                                           sk_sp<GrRefCntedCallback> finishedCallback) {
2051     GrVkImageInfo info;
2052     SkAssertResult(backendRenderTarget.getVkImageInfo(&info));
2053     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendRenderTarget.getMutableState();
2054     SkASSERT(currentState);
2055     SkASSERT(newState.fBackend == GrBackend::kVulkan);
2056     return this->setBackendSurfaceState(info, std::move(currentState),
2057                                         backendRenderTarget.dimensions(), newState.fVkState,
2058                                         previousState, std::move(finishedCallback));
2059 }
2060 
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)2061 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
2062     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2063     VkPipelineStageFlags dstStage;
2064     VkAccessFlags dstAccess;
2065     if (barrierType == kBlend_GrXferBarrierType) {
2066         dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
2067         dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
2068     } else {
2069         SkASSERT(barrierType == kTexture_GrXferBarrierType);
2070         dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
2071         dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
2072     }
2073     GrVkImage* image = vkRT->colorAttachment();
2074     VkImageMemoryBarrier barrier;
2075     barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
2076     barrier.pNext = nullptr;
2077     barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
2078     barrier.dstAccessMask = dstAccess;
2079     barrier.oldLayout = image->currentLayout();
2080     barrier.newLayout = barrier.oldLayout;
2081     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2082     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2083     barrier.image = image->image();
2084     barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2085     this->addImageMemoryBarrier(image->resource(),
2086                                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2087                                 dstStage, true, &barrier);
2088 }
2089 
deleteBackendTexture(const GrBackendTexture & tex)2090 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
2091     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2092 
2093     GrVkImageInfo info;
2094     if (tex.getVkImageInfo(&info)) {
2095         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2096     }
2097 }
2098 
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)2099 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2100     GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2101     GrVkRenderPass::AttachmentFlags attachmentFlags;
2102     GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
2103                                                        &attachmentsDescriptor, &attachmentFlags);
2104 
2105     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
2106     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2107         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
2108     }
2109     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2110         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
2111     }
2112 
2113     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
2114     if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2115         programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2116         loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
2117     }
2118     sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2119             &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2120     if (!renderPass) {
2121         return false;
2122     }
2123 
2124     GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
2125 
2126     auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2127                                     desc,
2128                                     programInfo,
2129                                     renderPass->vkRenderPass(),
2130                                     &stat);
2131     if (!pipelineState) {
2132         return false;
2133     }
2134 
2135     return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
2136 }
2137 
2138 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const2139 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2140     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2141 
2142     GrVkImageInfo backend;
2143     if (!tex.getVkImageInfo(&backend)) {
2144         return false;
2145     }
2146 
2147     if (backend.fImage && backend.fAlloc.fMemory) {
2148         VkMemoryRequirements req;
2149         memset(&req, 0, sizeof(req));
2150         GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2151                                                                    backend.fImage,
2152                                                                    &req));
2153         // TODO: find a better check
2154         // This will probably fail with a different driver
2155         return (req.size > 0) && (req.size <= 8192 * 8192);
2156     }
2157 
2158     return false;
2159 }
2160 
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)2161 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2162                                                                     GrColorType ct,
2163                                                                     int sampleCnt,
2164                                                                     GrProtected isProtected) {
2165     if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
2166         dimensions.height() > this->caps()->maxRenderTargetSize()) {
2167         return {};
2168     }
2169 
2170     VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2171 
2172     GrVkImageInfo info;
2173     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo,
2174                                               GrRenderable::kYes, GrMipmapped::kNo, &info,
2175                                               isProtected)) {
2176         return {};
2177     }
2178     return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info);
2179 }
2180 
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)2181 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2182     SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2183 
2184     GrVkImageInfo info;
2185     if (rt.getVkImageInfo(&info)) {
2186         // something in the command buffer may still be using this, so force submit
2187         SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2188         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2189     }
2190 }
2191 #endif
2192 
2193 ////////////////////////////////////////////////////////////////////////////////
2194 
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2195 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2196                                      VkPipelineStageFlags srcStageMask,
2197                                      VkPipelineStageFlags dstStageMask,
2198                                      bool byRegion,
2199                                      VkBufferMemoryBarrier* barrier) const {
2200     if (!this->currentCommandBuffer()) {
2201         return;
2202     }
2203     SkASSERT(resource);
2204     this->currentCommandBuffer()->pipelineBarrier(this,
2205                                                   resource,
2206                                                   srcStageMask,
2207                                                   dstStageMask,
2208                                                   byRegion,
2209                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2210                                                   barrier);
2211 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2212 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2213                                      VkPipelineStageFlags dstStageMask,
2214                                      bool byRegion,
2215                                      VkBufferMemoryBarrier* barrier) const {
2216     if (!this->currentCommandBuffer()) {
2217         return;
2218     }
2219     // We don't pass in a resource here to the command buffer. The command buffer only is using it
2220     // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2221     // command with the buffer on the command buffer. Thus those other commands will already cause
2222     // the command buffer to be holding a ref to the buffer.
2223     this->currentCommandBuffer()->pipelineBarrier(this,
2224                                                   /*resource=*/nullptr,
2225                                                   srcStageMask,
2226                                                   dstStageMask,
2227                                                   byRegion,
2228                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2229                                                   barrier);
2230 }
2231 
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2232 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2233                                     VkPipelineStageFlags srcStageMask,
2234                                     VkPipelineStageFlags dstStageMask,
2235                                     bool byRegion,
2236                                     VkImageMemoryBarrier* barrier) const {
2237     // If we are in the middle of destroying or abandoning the context we may hit a release proc
2238     // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2239     // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2240     // have a current command buffer. Thus we won't do the queue transfer.
2241     if (!this->currentCommandBuffer()) {
2242         return;
2243     }
2244     SkASSERT(resource);
2245     this->currentCommandBuffer()->pipelineBarrier(this,
2246                                                   resource,
2247                                                   srcStageMask,
2248                                                   dstStageMask,
2249                                                   byRegion,
2250                                                   GrVkCommandBuffer::kImageMemory_BarrierType,
2251                                                   barrier);
2252 }
2253 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)2254 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2255         SkSpan<GrSurfaceProxy*> proxies,
2256         SkSurface::BackendSurfaceAccess access,
2257         const GrBackendSurfaceMutableState* newState) {
2258     // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2259     // not effect what we do here.
2260     if (!proxies.empty() && (access == SkSurface::BackendSurfaceAccess::kPresent || newState)) {
2261         // We currently don't support passing in new surface state for multiple proxies here. The
2262         // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2263         // state updates anyways. Additionally if we have a newState than we must not have any
2264         // BackendSurfaceAccess.
2265         SkASSERT(!newState || proxies.size() == 1);
2266         SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
2267         GrVkImage* image;
2268         for (GrSurfaceProxy* proxy : proxies) {
2269             SkASSERT(proxy->isInstantiated());
2270             if (GrTexture* tex = proxy->peekTexture()) {
2271                 image = static_cast<GrVkTexture*>(tex)->textureImage();
2272             } else {
2273                 GrRenderTarget* rt = proxy->peekRenderTarget();
2274                 SkASSERT(rt);
2275                 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2276                 image = vkRT->externalAttachment();
2277             }
2278             if (newState) {
2279                 const GrVkSharedImageInfo& newInfo = newState->fVkState;
2280                 set_layout_and_queue_from_mutable_state(this, image, newInfo);
2281             } else {
2282                 SkASSERT(access == SkSurface::BackendSurfaceAccess::kPresent);
2283                 image->prepareForPresent(this);
2284             }
2285         }
2286     }
2287 }
2288 
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)2289 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2290                               GrGpuFinishedContext finishedContext) {
2291     SkASSERT(finishedProc);
2292     this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
2293 }
2294 
addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback)2295 void GrVkGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
2296     SkASSERT(finishedCallback);
2297     fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2298 }
2299 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2300 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2301     this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2302 }
2303 
onSubmitToGpu(bool syncCpu)2304 bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
2305     if (syncCpu) {
2306         return this->submitCommandBuffer(kForce_SyncQueue);
2307     } else {
2308         return this->submitCommandBuffer(kSkip_SyncQueue);
2309     }
2310 }
2311 
finishOutstandingGpuWork()2312 void GrVkGpu::finishOutstandingGpuWork() {
2313     VK_CALL(QueueWaitIdle(fQueue));
2314 
2315     if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2316         fResourceProvider.forceSyncAllCommandBuffers();
2317     }
2318 }
2319 
onReportSubmitHistograms()2320 void GrVkGpu::onReportSubmitHistograms() {
2321 #if SK_HISTOGRAMS_ENABLED
2322     uint64_t allocatedMemory = fMemoryAllocator->totalAllocatedMemory();
2323     uint64_t usedMemory = fMemoryAllocator->totalUsedMemory();
2324     SkASSERT(usedMemory <= allocatedMemory);
2325     if (allocatedMemory > 0) {
2326         SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2327                                 (usedMemory * 100) / allocatedMemory);
2328     }
2329     // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2330     // supports samples up to around 500MB which should support the amounts of memory we allocate.
2331     SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2332 #endif
2333 }
2334 
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2335 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2336                                      GrSurface* src,
2337                                      GrVkImage* dstImage,
2338                                      GrVkImage* srcImage,
2339                                      const SkIRect& srcRect,
2340                                      const SkIPoint& dstPoint) {
2341     if (!this->currentCommandBuffer()) {
2342         return;
2343     }
2344 
2345 #ifdef SK_DEBUG
2346     int dstSampleCnt = dstImage->numSamples();
2347     int srcSampleCnt = srcImage->numSamples();
2348     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2349     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2350     VkFormat dstFormat = dstImage->imageFormat();
2351     VkFormat srcFormat;
2352     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2353     SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2354                                          srcFormat, srcSampleCnt, srcHasYcbcr));
2355 #endif
2356     if (src->isProtected() && !dst->isProtected()) {
2357         SkDebugf("Can't copy from protected memory to non-protected");
2358         return;
2359     }
2360 
2361     // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2362     // the cache is flushed since it is only being written to.
2363     dstImage->setImageLayout(this,
2364                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2365                              VK_ACCESS_TRANSFER_WRITE_BIT,
2366                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2367                              false);
2368 
2369     srcImage->setImageLayout(this,
2370                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2371                              VK_ACCESS_TRANSFER_READ_BIT,
2372                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2373                              false);
2374 
2375     VkImageCopy copyRegion;
2376     memset(&copyRegion, 0, sizeof(VkImageCopy));
2377     copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2378     copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2379     copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2380     copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2381     copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2382 
2383     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2384     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2385     this->currentCommandBuffer()->copyImage(this,
2386                                             srcImage,
2387                                             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2388                                             dstImage,
2389                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2390                                             1,
2391                                             &copyRegion);
2392 
2393     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2394                                         srcRect.width(), srcRect.height());
2395     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2396     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2397 }
2398 
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2399 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2400                                 GrSurface* src,
2401                                 GrVkImage* dstImage,
2402                                 GrVkImage* srcImage,
2403                                 const SkIRect& srcRect,
2404                                 const SkIPoint& dstPoint) {
2405     if (!this->currentCommandBuffer()) {
2406         return;
2407     }
2408 
2409 #ifdef SK_DEBUG
2410     int dstSampleCnt = dstImage->numSamples();
2411     int srcSampleCnt = srcImage->numSamples();
2412     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2413     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2414     VkFormat dstFormat = dstImage->imageFormat();
2415     VkFormat srcFormat;
2416     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2417     SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2418                                           dstSampleCnt,
2419                                           dstImage->isLinearTiled(),
2420                                           dstHasYcbcr,
2421                                           srcFormat,
2422                                           srcSampleCnt,
2423                                           srcImage->isLinearTiled(),
2424                                           srcHasYcbcr));
2425 
2426 #endif
2427     if (src->isProtected() && !dst->isProtected()) {
2428         SkDebugf("Can't copy from protected memory to non-protected");
2429         return;
2430     }
2431 
2432     dstImage->setImageLayout(this,
2433                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2434                              VK_ACCESS_TRANSFER_WRITE_BIT,
2435                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2436                              false);
2437 
2438     srcImage->setImageLayout(this,
2439                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2440                              VK_ACCESS_TRANSFER_READ_BIT,
2441                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2442                              false);
2443 
2444     // Flip rect if necessary
2445     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
2446                                         srcRect.height());
2447 
2448     VkImageBlit blitRegion;
2449     memset(&blitRegion, 0, sizeof(VkImageBlit));
2450     blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2451     blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2452     blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2453     blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2454     blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2455     blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2456 
2457     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2458     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2459     this->currentCommandBuffer()->blitImage(this,
2460                                             *srcImage,
2461                                             *dstImage,
2462                                             1,
2463                                             &blitRegion,
2464                                             VK_FILTER_NEAREST); // We never scale so any filter works here
2465 
2466     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2467     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2468 }
2469 
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2470 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2471                                    const SkIPoint& dstPoint) {
2472     if (src->isProtected() && !dst->isProtected()) {
2473         SkDebugf("Can't copy from protected memory to non-protected");
2474         return;
2475     }
2476     GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2477     this->resolveImage(dst, srcRT, srcRect, dstPoint);
2478     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2479                                         srcRect.width(), srcRect.height());
2480     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2481     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2482 }
2483 
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2484 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2485                             const SkIPoint& dstPoint) {
2486 #ifdef SK_DEBUG
2487     if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2488         SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2489     }
2490     if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2491         SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2492     }
2493 #endif
2494     if (src->isProtected() && !dst->isProtected()) {
2495         SkDebugf("Can't copy from protected memory to non-protected");
2496         return false;
2497     }
2498 
2499     GrVkImage* dstImage;
2500     GrVkImage* srcImage;
2501     GrRenderTarget* dstRT = dst->asRenderTarget();
2502     if (dstRT) {
2503         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2504         if (vkRT->wrapsSecondaryCommandBuffer()) {
2505             return false;
2506         }
2507         // This will technically return true for single sample rts that used DMSAA in which case we
2508         // don't have to pick the resolve attachment. But in that case the resolve and color
2509         // attachments will be the same anyways.
2510         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2511             dstImage = vkRT->resolveAttachment();
2512         } else {
2513             dstImage = vkRT->colorAttachment();
2514         }
2515     } else if (dst->asTexture()) {
2516         dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2517     } else {
2518         // The surface in a GrAttachment already
2519         dstImage = static_cast<GrVkImage*>(dst);
2520     }
2521     GrRenderTarget* srcRT = src->asRenderTarget();
2522     if (srcRT) {
2523         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2524         // This will technically return true for single sample rts that used DMSAA in which case we
2525         // don't have to pick the resolve attachment. But in that case the resolve and color
2526         // attachments will be the same anyways.
2527         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2528             srcImage = vkRT->resolveAttachment();
2529         } else {
2530             srcImage = vkRT->colorAttachment();
2531         }
2532     } else if (src->asTexture()) {
2533         SkASSERT(src->asTexture());
2534         srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2535     } else {
2536         // The surface in a GrAttachment already
2537         srcImage = static_cast<GrVkImage*>(src);
2538     }
2539 
2540     VkFormat dstFormat = dstImage->imageFormat();
2541     VkFormat srcFormat = srcImage->imageFormat();
2542 
2543     int dstSampleCnt = dstImage->numSamples();
2544     int srcSampleCnt = srcImage->numSamples();
2545 
2546     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2547     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2548 
2549     if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2550                                         srcFormat, srcSampleCnt, srcHasYcbcr)) {
2551         this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2552         return true;
2553     }
2554 
2555     if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2556                                     srcFormat, srcSampleCnt, srcHasYcbcr)) {
2557         this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2558         return true;
2559     }
2560 
2561     if (this->vkCaps().canCopyAsBlit(dstFormat,
2562                                      dstSampleCnt,
2563                                      dstImage->isLinearTiled(),
2564                                      dstHasYcbcr,
2565                                      srcFormat,
2566                                      srcSampleCnt,
2567                                      srcImage->isLinearTiled(),
2568                                      srcHasYcbcr)) {
2569         this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
2570         return true;
2571     }
2572 
2573     return false;
2574 }
2575 
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2576 bool GrVkGpu::onReadPixels(GrSurface* surface,
2577                            SkIRect rect,
2578                            GrColorType surfaceColorType,
2579                            GrColorType dstColorType,
2580                            void* buffer,
2581                            size_t rowBytes) {
2582     if (surface->isProtected()) {
2583         return false;
2584     }
2585 
2586     if (!this->currentCommandBuffer()) {
2587         return false;
2588     }
2589 
2590     GrVkImage* image = nullptr;
2591     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2592     if (rt) {
2593         // Reading from render targets that wrap a secondary command buffer is not allowed since
2594         // it would require us to know the VkImage, which we don't have, as well as need us to
2595         // stop and start the VkRenderPass which we don't have access to.
2596         if (rt->wrapsSecondaryCommandBuffer()) {
2597             return false;
2598         }
2599         image = rt->nonMSAAAttachment();
2600     } else {
2601         image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2602     }
2603 
2604     if (!image) {
2605         return false;
2606     }
2607 
2608     if (dstColorType == GrColorType::kUnknown ||
2609         dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2610         return false;
2611     }
2612 
2613     // Change layout of our target so it can be used as copy
2614     image->setImageLayout(this,
2615                           VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2616                           VK_ACCESS_TRANSFER_READ_BIT,
2617                           VK_PIPELINE_STAGE_TRANSFER_BIT,
2618                           false);
2619 
2620     size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2621     if (GrVkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2622         return false;
2623     }
2624     size_t tightRowBytes = bpp*rect.width();
2625 
2626     VkBufferImageCopy region;
2627     memset(&region, 0, sizeof(VkBufferImageCopy));
2628     VkOffset3D offset = { rect.left(), rect.top(), 0 };
2629     region.imageOffset = offset;
2630     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2631 
2632     size_t transBufferRowBytes = bpp * region.imageExtent.width;
2633     size_t imageRows = region.imageExtent.height;
2634     GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2635     sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2636             transBufferRowBytes * imageRows, GrGpuBufferType::kXferGpuToCpu,
2637             kDynamic_GrAccessPattern);
2638 
2639     if (!transferBuffer) {
2640         return false;
2641     }
2642 
2643     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2644 
2645     // Copy the image to a buffer so we can map it to cpu memory
2646     region.bufferOffset = 0;
2647     region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2648     region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2649     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2650 
2651     this->currentCommandBuffer()->copyImageToBuffer(this,
2652                                                     image,
2653                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2654                                                     transferBuffer,
2655                                                     1,
2656                                                     &region);
2657 
2658     // make sure the copy to buffer has finished
2659     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2660                                VK_ACCESS_HOST_READ_BIT,
2661                                VK_PIPELINE_STAGE_TRANSFER_BIT,
2662                                VK_PIPELINE_STAGE_HOST_BIT,
2663                                false);
2664 
2665     // We need to submit the current command buffer to the Queue and make sure it finishes before
2666     // we can copy the data out of the buffer.
2667     if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2668         return false;
2669     }
2670     void* mappedMemory = transferBuffer->map();
2671 
2672     SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2673 
2674     transferBuffer->unmap();
2675     return true;
2676 }
2677 
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2678 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2679                               sk_sp<const GrVkFramebuffer> framebuffer,
2680                               const VkClearValue* colorClear,
2681                               const GrSurface* target,
2682                               const SkIRect& renderPassBounds,
2683                               bool forSecondaryCB) {
2684     if (!this->currentCommandBuffer()) {
2685         return false;
2686     }
2687     SkASSERT (!framebuffer->isExternal());
2688 
2689 #ifdef SK_DEBUG
2690     uint32_t index;
2691     bool result = renderPass->colorAttachmentIndex(&index);
2692     SkASSERT(result && 0 == index);
2693     result = renderPass->stencilAttachmentIndex(&index);
2694     if (result) {
2695         SkASSERT(1 == index);
2696     }
2697 #endif
2698     VkClearValue clears[3];
2699     int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2700     clears[0].color = colorClear->color;
2701     clears[stencilIndex].depthStencil.depth = 0.0f;
2702     clears[stencilIndex].depthStencil.stencil = 0;
2703 
2704    return this->currentCommandBuffer()->beginRenderPass(
2705         this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2706 }
2707 
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2708 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2709                             const SkIRect& bounds) {
2710     // We had a command buffer when we started the render pass, we should have one now as well.
2711     SkASSERT(this->currentCommandBuffer());
2712     this->currentCommandBuffer()->endRenderPass(this);
2713     this->didWriteToSurface(target, origin, &bounds);
2714 }
2715 
2716 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpDeviceFaultInfo(const std::string & errorCategory)2717 void GrVkGpu::dumpDeviceFaultInfo(const std::string& errorCategory) {
2718     VkDeviceFaultCountsEXT fc{};
2719     fc.sType = VK_STRUCTURE_TYPE_DEVICE_FAULT_COUNTS_EXT;
2720     fc.pNext = nullptr;
2721     GR_VK_CALL(this->vkInterface(), GetDeviceFaultInfoEXT(fDevice, &fc, nullptr));
2722 
2723     const uint32_t vendorBinarySize =
2724         std::min(uint32_t(fc.vendorBinarySize), std::numeric_limits<uint32_t>::max());
2725     std::vector<VkDeviceFaultAddressInfoEXT>    addressInfos    (fc.addressInfoCount);
2726     std::vector<VkDeviceFaultVendorInfoEXT>     vendorInfos     (fc.vendorInfoCount);
2727     std::vector<uint8_t>                        vendorBinaryData(vendorBinarySize);
2728     VkDeviceFaultInfoEXT fi{};
2729     VkDebugUtilsObjectNameInfoEXT nameInfo{};
2730     fi.sType                = VK_STRUCTURE_TYPE_DEVICE_FAULT_INFO_EXT;
2731     fi.pNext                = &nameInfo;
2732     fi.pAddressInfos        = addressInfos.data();
2733     fi.pVendorInfos         = vendorInfos.data();
2734     fi.pVendorBinaryData    = vendorBinaryData.data();
2735     GR_VK_CALL(this->vkInterface(), GetDeviceFaultInfoEXT(fDevice, &fc, &fi));
2736     if (!fi.pNext) {
2737         SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s pNext is nullptr", errorCategory.c_str());
2738         return;
2739     }
2740     auto obj = static_cast<VkDebugUtilsObjectNameInfoEXT*>(fi.pNext);
2741     if (obj == nullptr) {
2742         SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s obj is nullptr", errorCategory.c_str());
2743         return;
2744     }
2745     auto vkImage = obj->objectHandle;
2746     if (!vkImage) {
2747         SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s vkimage is nullptr", errorCategory.c_str());
2748         return;
2749     }
2750     SK_LOGE("GrVkGpu::dumpDeviceFaultInfo %{public}s vkimage 0x%{public}llx", errorCategory.c_str(), vkImage);
2751 }
2752 
dumpVkImageDfx(const std::string & errorCategory)2753 void GrVkGpu::dumpVkImageDfx(const std::string& errorCategory) {
2754     if (!ParallelDebug::IsVkImageDfxEnabled()) {
2755         return;
2756     }
2757     dumpDeviceFaultInfo(errorCategory);
2758     auto context = getContext();
2759     if (context == nullptr) {
2760         SK_LOGE("GrVkGpu::dumpVkImageDfx %{public}s context nullptr", errorCategory.c_str());
2761         return;
2762     }
2763     std::stringstream dump;
2764     context->dumpAllResource(dump);
2765     std::string s;
2766     while (std::getline(dump, s, '\n')) {
2767         SK_LOGE("%{public}s", s.c_str());
2768     }
2769 }
2770 #endif
2771 
reportVulkanError(const std::string & errorCategory)2772 void GrVkGpu::reportVulkanError(const std::string& errorCategory) {
2773     auto context = getContext();
2774     if (context == nullptr) {
2775         SK_LOGE("GrVkGpu::reportVulkanError %{public}s context nullptr", errorCategory.c_str());
2776         return;
2777     }
2778     SK_LOGE("GrVkGpu::reportVulkanError report %{public}s", errorCategory.c_str());
2779     context->processVulkanError();
2780 }
2781 
checkVkResult(VkResult result)2782 bool GrVkGpu::checkVkResult(VkResult result) {
2783     int32_t numResult = static_cast<int32_t>(result);
2784     switch (numResult) {
2785         case VK_SUCCESS:
2786             return true;
2787         case VK_ERROR_DEVICE_LOST:
2788 #ifdef SKIA_DFX_FOR_OHOS
2789             {
2790                 auto context = getContext();
2791                 if (context) {
2792                     auto cache = context->priv().getResourceCache();
2793                     if (cache) {
2794                         auto cacheInfo = cache->cacheInfo();
2795                         SK_LOGE("GrVkGpu::checkVkResult VK_ERROR_DEVICE_LOST, cacheInfo = %{public}s",
2796                             cacheInfo.c_str());
2797                     }
2798                 }
2799             }
2800 #endif
2801             fDeviceIsLost = true;
2802             reportVulkanError("VK_ERROR_DEVICE_LOST");
2803 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
2804             dumpVkImageDfx("VK_ERROR_DEVICE_LOST");
2805 #endif
2806             return false;
2807         case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2808         case VK_ERROR_OUT_OF_HOST_MEMORY:
2809             this->setOOMed();
2810             return false;
2811         case VK_HUAWEI_GPU_ERROR_RECOVER:
2812             reportVulkanError("VK_HUAWEI_GPU_ERROR_RECOVER");
2813 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
2814             dumpVkImageDfx("VK_HUAWEI_GPU_ERROR_RECOVER");
2815 #endif
2816             return true;
2817         default:
2818             return false;
2819     }
2820 }
2821 
GetHpsDimension(const SkBlurArg & blurArg) const2822 std::array<int, 2> GrVkGpu::GetHpsDimension(const SkBlurArg& blurArg) const
2823 {
2824     int width = 0;
2825     int height = 0;
2826     VkRect2D srcRegion;
2827     srcRegion.offset = {blurArg.srcRect.fLeft, blurArg.srcRect.fTop};
2828     srcRegion.extent = {
2829         static_cast<uint32_t>(std::clamp(
2830             blurArg.srcRect.width(), 0.0f, static_cast<float>(UINT32_MAX))),
2831         static_cast<uint32_t>(std::clamp(
2832             blurArg.srcRect.height(), 0.0f, static_cast<float>(UINT32_MAX)))};
2833 
2834     VkRect2D dstRegion;
2835     dstRegion.offset = { blurArg.dstRect.fLeft, blurArg.dstRect.fTop };
2836     dstRegion.extent = {
2837         static_cast<uint32_t>(std::clamp(
2838             blurArg.dstRect.width(), 0.0f, static_cast<float>(UINT32_MAX))),
2839         static_cast<uint32_t>(std::clamp(
2840             blurArg.dstRect.height(), 0.0f, static_cast<float>(UINT32_MAX)))};
2841 
2842     VkDrawBlurImageInfoHUAWEI drawBlurImageInfo {};
2843     drawBlurImageInfo.sType = VkStructureTypeHUAWEI::VK_STRUCTURE_TYPE_DRAW_BLUR_IMAGE_INFO_HUAWEI;
2844     drawBlurImageInfo.pNext = nullptr;
2845     drawBlurImageInfo.sigma = blurArg.sigma;
2846     drawBlurImageInfo.srcRegion = srcRegion;
2847     drawBlurImageInfo.dstRegion = dstRegion;
2848     drawBlurImageInfo.srcImageView = VK_NULL_HANDLE;
2849 
2850     VkRect2D hpsDimension {};
2851     auto grVkInterface = this->vkInterface();
2852     if (grVkInterface != nullptr && grVkInterface->fFunctions.fGetBlurImageSizeHUAWEI != nullptr) {
2853         VK_CALL(GetBlurImageSizeHUAWEI(this->device(), &drawBlurImageInfo, &hpsDimension));
2854         width = static_cast<int>(hpsDimension.extent.width);
2855         height = static_cast<int>(hpsDimension.extent.height);
2856     }
2857 
2858     std::array<int, 2> res = {width, height}; // There are 2 variables here.
2859     return res;
2860 }
2861 
dumpVmaStats(SkString * out)2862 void GrVkGpu::dumpVmaStats(SkString *out) {
2863     if (out == nullptr) {
2864         return;
2865     }
2866     out->appendf("dumpVmaCacheStats:\n");
2867     fMemoryAllocatorCacheImage->dumpVmaStats(out, "\n");
2868 }
2869 
2870 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)2871 void GrVkGpu::setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority)
2872 {
2873     if (!fMemoryReclaimer) {
2874         fMemoryReclaimer = std::make_unique<GrVkMemoryReclaimer>(enabled, setThreadPriority);
2875     }
2876 }
2877 
2878 // OH ISSUE: asyn memory reclaimer
flushGpuMemoryInWaitQueue()2879 void GrVkGpu::flushGpuMemoryInWaitQueue()
2880 {
2881     if (fMemoryReclaimer) {
2882         fMemoryReclaimer->flushGpuMemoryInWaitQueue();
2883     }
2884 }
2885 
2886 
2887 #ifdef SKIA_DFX_FOR_OHOS
addAllocImageBytes(size_t bytes)2888 void GrVkGpu::addAllocImageBytes(size_t bytes)
2889 {
2890     auto cache = getContext()->priv().getResourceCache();
2891     if (!cache) {
2892         return;
2893     }
2894     cache->addAllocImageBytes(bytes);
2895 }
2896 
removeAllocImageBytes(size_t bytes)2897 void GrVkGpu::removeAllocImageBytes(size_t bytes)
2898 {
2899     auto cache = getContext()->priv().getResourceCache();
2900     if (!cache) {
2901         return;
2902     }
2903     cache->removeAllocImageBytes(bytes);
2904 }
2905 
addAllocBufferBytes(size_t bytes)2906 void GrVkGpu::addAllocBufferBytes(size_t bytes)
2907 {
2908     auto cache = getContext()->priv().getResourceCache();
2909     if (!cache) {
2910         return;
2911     }
2912     cache->addAllocBufferBytes(bytes);
2913 }
2914 
removeAllocBufferBytes(size_t bytes)2915 void GrVkGpu::removeAllocBufferBytes(size_t bytes)
2916 {
2917     auto cache = getContext()->priv().getResourceCache();
2918     if (!cache) {
2919         return;
2920     }
2921     cache->removeAllocBufferBytes(bytes);
2922 }
2923 #endif
2924 
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2925 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2926     if (!this->currentCommandBuffer()) {
2927         return;
2928     }
2929     this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2930 }
2931 
submit(GrOpsRenderPass * renderPass)2932 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2933     SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2934 
2935     fCachedOpsRenderPass->submit();
2936     fCachedOpsRenderPass->reset();
2937 }
2938 
insertFence()2939 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2940     VkFenceCreateInfo createInfo;
2941     memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2942     createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2943     createInfo.pNext = nullptr;
2944     createInfo.flags = 0;
2945     VkFence fence = VK_NULL_HANDLE;
2946     VkResult result;
2947 
2948     VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
2949     if (result != VK_SUCCESS) {
2950         return 0;
2951     }
2952     VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
2953     if (result != VK_SUCCESS) {
2954         VK_CALL(DestroyFence(this->device(), fence, nullptr));
2955         return 0;
2956     }
2957 
2958     static_assert(sizeof(GrFence) >= sizeof(VkFence));
2959     return (GrFence)fence;
2960 }
2961 
waitFence(GrFence fence)2962 bool GrVkGpu::waitFence(GrFence fence) {
2963     SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2964 
2965     VkResult result;
2966     VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0));
2967     return (VK_SUCCESS == result);
2968 }
2969 
deleteFence(GrFence fence) const2970 void GrVkGpu::deleteFence(GrFence fence) const {
2971     VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2972 }
2973 
makeSemaphore(bool isOwned)2974 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2975     return GrVkSemaphore::Make(this, isOwned);
2976 }
2977 
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType wrapType,GrWrapOwnership ownership)2978 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2979                                                            GrSemaphoreWrapType wrapType,
2980                                                            GrWrapOwnership ownership) {
2981     return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2982 }
2983 
insertSemaphore(GrSemaphore * semaphore)2984 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2985     SkASSERT(semaphore);
2986 
2987     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2988 
2989     GrVkSemaphore::Resource* resource = vkSem->getResource();
2990     if (resource->shouldSignal()) {
2991         resource->ref();
2992         fSemaphoresToSignal.push_back(resource);
2993     }
2994 }
2995 
waitSemaphore(GrSemaphore * semaphore)2996 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2997     SkASSERT(semaphore);
2998 
2999     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
3000 
3001     GrVkSemaphore::Resource* resource = vkSem->getResource();
3002     if (resource->shouldWait()) {
3003         resource->ref();
3004         fSemaphoresToWaitOn.push_back(resource);
3005     }
3006 }
3007 
prepareTextureForCrossContextUsage(GrTexture * texture)3008 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
3009     SkASSERT(texture);
3010     GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
3011     vkTexture->setImageLayout(this,
3012                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
3013                               VK_ACCESS_SHADER_READ_BIT,
3014                               VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
3015                               false);
3016     // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
3017     // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
3018     // Eventually we will abandon the whole GPU if this fails.
3019     this->submitToGpu(false);
3020 
3021     // The image layout change serves as a barrier, so no semaphore is needed.
3022     // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
3023     // thread safe so that only the first thread that tries to use the semaphore actually submits
3024     // it. This additionally would also require thread safety in command buffer submissions to
3025     // queues in general.
3026     return nullptr;
3027 }
3028 
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)3029 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
3030     fDrawables.emplace_back(std::move(drawable));
3031 }
3032 
storeVkPipelineCacheData()3033 void GrVkGpu::storeVkPipelineCacheData() {
3034     if (this->getContext()->priv().getPersistentCache()) {
3035         this->resourceProvider().storePipelineCacheData();
3036     }
3037 }
3038