• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkGpu.h"
9 
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/core/SkMipmap.h"
18 #include "src/gpu/GrBackendUtils.h"
19 #include "src/gpu/GrDataUtils.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrGeometryProcessor.h"
22 #include "src/gpu/GrGpuResourceCacheAccess.h"
23 #include "src/gpu/GrNativeRect.h"
24 #include "src/gpu/GrPipeline.h"
25 #include "src/gpu/GrRenderTarget.h"
26 #include "src/gpu/GrSurfaceDrawContext.h"
27 #include "src/gpu/GrTexture.h"
28 #include "src/gpu/GrThreadSafePipelineBuilder.h"
29 #include "src/gpu/SkGpuDevice.h"
30 #include "src/gpu/SkGr.h"
31 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
32 #include "src/gpu/vk/GrVkBuffer.h"
33 #include "src/gpu/vk/GrVkCommandBuffer.h"
34 #include "src/gpu/vk/GrVkCommandPool.h"
35 #include "src/gpu/vk/GrVkFramebuffer.h"
36 #include "src/gpu/vk/GrVkImage.h"
37 #include "src/gpu/vk/GrVkInterface.h"
38 #include "src/gpu/vk/GrVkMemory.h"
39 #include "src/gpu/vk/GrVkOpsRenderPass.h"
40 #include "src/gpu/vk/GrVkPipeline.h"
41 #include "src/gpu/vk/GrVkPipelineState.h"
42 #include "src/gpu/vk/GrVkRenderPass.h"
43 #include "src/gpu/vk/GrVkResourceProvider.h"
44 #include "src/gpu/vk/GrVkSemaphore.h"
45 #include "src/gpu/vk/GrVkTexture.h"
46 #include "src/gpu/vk/GrVkTextureRenderTarget.h"
47 #include "src/image/SkImage_Gpu.h"
48 #include "src/image/SkSurface_Gpu.h"
49 
50 #include "include/gpu/vk/GrVkExtensions.h"
51 #include "include/gpu/vk/GrVkTypes.h"
52 
53 #include <utility>
54 
55 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
56 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
57 
Make(const GrVkBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)58 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
59                            const GrContextOptions& options, GrDirectContext* direct) {
60     if (backendContext.fInstance == VK_NULL_HANDLE ||
61         backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
62         backendContext.fDevice == VK_NULL_HANDLE ||
63         backendContext.fQueue == VK_NULL_HANDLE) {
64         return nullptr;
65     }
66     if (!backendContext.fGetProc) {
67         return nullptr;
68     }
69 
70     PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
71             reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
72                     backendContext.fGetProc("vkEnumerateInstanceVersion",
73                                             VK_NULL_HANDLE, VK_NULL_HANDLE));
74     uint32_t instanceVersion = 0;
75     if (!localEnumerateInstanceVersion) {
76         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
77     } else {
78         VkResult err = localEnumerateInstanceVersion(&instanceVersion);
79         if (err) {
80             SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
81             return nullptr;
82         }
83     }
84 
85     PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
86             reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
87                     backendContext.fGetProc("vkGetPhysicalDeviceProperties",
88                                             backendContext.fInstance,
89                                             VK_NULL_HANDLE));
90 
91     if (!localGetPhysicalDeviceProperties) {
92         return nullptr;
93     }
94     VkPhysicalDeviceProperties physDeviceProperties;
95     localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
96     uint32_t physDevVersion = physDeviceProperties.apiVersion;
97 
98     uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
99                                                         : instanceVersion;
100 
101     instanceVersion = std::min(instanceVersion, apiVersion);
102     physDevVersion = std::min(physDevVersion, apiVersion);
103 
104     sk_sp<const GrVkInterface> interface;
105 
106     if (backendContext.fVkExtensions) {
107         interface.reset(new GrVkInterface(backendContext.fGetProc,
108                                           backendContext.fInstance,
109                                           backendContext.fDevice,
110                                           instanceVersion,
111                                           physDevVersion,
112                                           backendContext.fVkExtensions));
113         if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
114             return nullptr;
115         }
116     } else {
117         GrVkExtensions extensions;
118         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
119         // need to know if this is enabled to know if we can transition to a present layout when
120         // flushing a surface.
121         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
122             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
123             extensions.init(backendContext.fGetProc, backendContext.fInstance,
124                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
125         }
126         interface.reset(new GrVkInterface(backendContext.fGetProc,
127                                           backendContext.fInstance,
128                                           backendContext.fDevice,
129                                           instanceVersion,
130                                           physDevVersion,
131                                           &extensions));
132         if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
133             return nullptr;
134         }
135     }
136 
137     sk_sp<GrVkCaps> caps;
138     if (backendContext.fDeviceFeatures2) {
139         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
140                                 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
141                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
142     } else if (backendContext.fDeviceFeatures) {
143         VkPhysicalDeviceFeatures2 features2;
144         features2.pNext = nullptr;
145         features2.features = *backendContext.fDeviceFeatures;
146         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
147                                 features2, instanceVersion, physDevVersion,
148                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
149     } else {
150         VkPhysicalDeviceFeatures2 features;
151         memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
152         features.pNext = nullptr;
153         if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
154             features.features.geometryShader = true;
155         }
156         if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
157             features.features.dualSrcBlend = true;
158         }
159         if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
160             features.features.sampleRateShading = true;
161         }
162         GrVkExtensions extensions;
163         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
164         // need to know if this is enabled to know if we can transition to a present layout when
165         // flushing a surface.
166         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
167             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
168             extensions.init(backendContext.fGetProc, backendContext.fInstance,
169                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
170         }
171         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
172                                 features, instanceVersion, physDevVersion, extensions,
173                                 backendContext.fProtectedContext));
174     }
175 
176     if (!caps) {
177         return nullptr;
178     }
179 
180     sk_sp<GrVkMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
181     if (!memoryAllocator) {
182         // We were not given a memory allocator at creation
183         memoryAllocator = GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
184                                                        backendContext.fPhysicalDevice,
185                                                        backendContext.fDevice, physDevVersion,
186                                                        backendContext.fVkExtensions, interface,
187                                                        caps.get());
188     }
189     if (!memoryAllocator) {
190         SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
191         return nullptr;
192     }
193 
194      sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface,
195                                       instanceVersion, physDevVersion,
196                                       std::move(memoryAllocator)));
197      if (backendContext.fProtectedContext == GrProtected::kYes &&
198          !vkGpu->vkCaps().supportsProtectedMemory()) {
199          return nullptr;
200      }
201      return std::move(vkGpu);
202 }
203 
204 ////////////////////////////////////////////////////////////////////////////////
205 
GrVkGpu(GrDirectContext * direct,const GrVkBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const GrVkInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<GrVkMemoryAllocator> memoryAllocator)206 GrVkGpu::GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendContext,
207                  sk_sp<GrVkCaps> caps, sk_sp<const GrVkInterface> interface,
208                  uint32_t instanceVersion, uint32_t physicalDeviceVersion,
209                  sk_sp<GrVkMemoryAllocator> memoryAllocator)
210         : INHERITED(direct)
211         , fInterface(std::move(interface))
212         , fMemoryAllocator(std::move(memoryAllocator))
213         , fVkCaps(std::move(caps))
214         , fPhysicalDevice(backendContext.fPhysicalDevice)
215         , fDevice(backendContext.fDevice)
216         , fQueue(backendContext.fQueue)
217         , fQueueIndex(backendContext.fGraphicsQueueIndex)
218         , fResourceProvider(this)
219         , fStagingBufferManager(this)
220         , fDisconnected(false)
221         , fProtectedContext(backendContext.fProtectedContext) {
222     SkASSERT(!backendContext.fOwnsInstanceAndDevice);
223     SkASSERT(fMemoryAllocator);
224 
225     this->initCapsAndCompiler(fVkCaps);
226 
227     VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
228     VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
229 
230     fResourceProvider.init();
231 
232     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
233     if (fMainCmdPool) {
234         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
235         SkASSERT(this->currentCommandBuffer());
236         this->currentCommandBuffer()->begin(this);
237     }
238 }
239 
destroyResources()240 void GrVkGpu::destroyResources() {
241     if (fMainCmdPool) {
242         fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
243         fMainCmdPool->close();
244     }
245 
246     // wait for all commands to finish
247     this->finishOutstandingGpuWork();
248 
249     if (fMainCmdPool) {
250         fMainCmdPool->unref();
251         fMainCmdPool = nullptr;
252     }
253 
254     for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
255         fSemaphoresToWaitOn[i]->unref();
256     }
257     fSemaphoresToWaitOn.reset();
258 
259     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
260         fSemaphoresToSignal[i]->unref();
261     }
262     fSemaphoresToSignal.reset();
263 
264     fStagingBufferManager.reset();
265 
266     fMSAALoadManager.destroyResources(this);
267 
268     // must call this just before we destroy the command pool and VkDevice
269     fResourceProvider.destroyResources();
270 }
271 
~GrVkGpu()272 GrVkGpu::~GrVkGpu() {
273     if (!fDisconnected) {
274         this->destroyResources();
275     }
276     // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
277     // clients can continue to delete backend textures even after a context has been abandoned.
278     fMemoryAllocator.reset();
279 }
280 
281 
disconnect(DisconnectType type)282 void GrVkGpu::disconnect(DisconnectType type) {
283     INHERITED::disconnect(type);
284     if (!fDisconnected) {
285         this->destroyResources();
286 
287         fSemaphoresToWaitOn.reset();
288         fSemaphoresToSignal.reset();
289         fMainCmdBuffer = nullptr;
290         fDisconnected = true;
291     }
292 }
293 
pipelineBuilder()294 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
295     return fResourceProvider.pipelineStateCache();
296 }
297 
refPipelineBuilder()298 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
299     return fResourceProvider.refPipelineStateCache();
300 }
301 
302 ///////////////////////////////////////////////////////////////////////////////
303 
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)304 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
305         GrRenderTarget* rt,
306         bool useMSAASurface,
307         GrAttachment* stencil,
308         GrSurfaceOrigin origin,
309         const SkIRect& bounds,
310         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
311         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
312         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
313         GrXferBarrierFlags renderPassXferBarriers) {
314     if (!fCachedOpsRenderPass) {
315         fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
316     }
317 
318     // For the given render target and requested render pass features we need to find a compatible
319     // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
320     // is compatible, but that is part of the framebuffer that we get here.
321     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
322 
323     SkASSERT(!useMSAASurface ||
324              (rt->numSamples() > 1 ||
325               (this->vkCaps().preferDiscardableMSAAAttachment() && vkRT->resolveAttachment() &&
326                vkRT->resolveAttachment()->supportsInputAttachmentUsage())));
327 
328     // Covert the GrXferBarrierFlags into render pass self dependency flags
329     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
330     if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
331         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
332     }
333     if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
334         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
335     }
336 
337     // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
338     // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
339     // case we also need to update the color load/store ops since we don't want to ever load or
340     // store the msaa color attachment, but may need to for the resolve attachment.
341     GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
342     bool withResolve = false;
343     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
344     GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
345     if (useMSAASurface && this->vkCaps().preferDiscardableMSAAAttachment() &&
346         vkRT->resolveAttachment() && vkRT->resolveAttachment()->supportsInputAttachmentUsage()) {
347         withResolve = true;
348         localColorInfo.fStoreOp = GrStoreOp::kDiscard;
349         if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
350             loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
351             localColorInfo.fLoadOp = GrLoadOp::kDiscard;
352         } else {
353             resolveInfo.fLoadOp = GrLoadOp::kDiscard;
354         }
355     }
356 
357     // Get the framebuffer to use for the render pass
358    sk_sp<GrVkFramebuffer> framebuffer;
359     if (vkRT->wrapsSecondaryCommandBuffer()) {
360         framebuffer = vkRT->externalFramebuffer();
361     } else {
362         auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
363                                        loadFromResolve);
364         framebuffer = sk_ref_sp(fb);
365     }
366     if (!framebuffer) {
367         return nullptr;
368     }
369 
370     if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
371                                    stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
372                                    sampledProxies)) {
373         return nullptr;
374     }
375     return fCachedOpsRenderPass.get();
376 }
377 
submitCommandBuffer(SyncQueue sync)378 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
379     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
380     if (!this->currentCommandBuffer()) {
381         return false;
382     }
383     SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
384 
385     if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
386         !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
387         // We may have added finished procs during the flush call. Since there is no actual work
388         // we are not submitting the command buffer and may never come back around to submit it.
389         // Thus we call all current finished procs manually, since the work has technically
390         // finished.
391         this->currentCommandBuffer()->callFinishedProcs();
392         SkASSERT(fDrawables.empty());
393         fResourceProvider.checkCommandBuffers();
394         return true;
395     }
396 
397     fMainCmdBuffer->end(this);
398     SkASSERT(fMainCmdPool);
399     fMainCmdPool->close();
400     bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
401                                                    fSemaphoresToWaitOn);
402 
403     if (didSubmit && sync == kForce_SyncQueue) {
404         fMainCmdBuffer->forceSync(this);
405     }
406 
407     // We must delete any drawables that had to wait until submit to destroy.
408     fDrawables.reset();
409 
410     // If we didn't submit the command buffer then we did not wait on any semaphores. We will
411     // continue to hold onto these semaphores and wait on them during the next command buffer
412     // submission.
413     if (didSubmit) {
414         for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
415             fSemaphoresToWaitOn[i]->unref();
416         }
417         fSemaphoresToWaitOn.reset();
418     }
419 
420     // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
421     // not try to recover the work that wasn't submitted and instead just drop it all. The client
422     // will be notified that the semaphores were not submit so that they will not try to wait on
423     // them.
424     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
425         fSemaphoresToSignal[i]->unref();
426     }
427     fSemaphoresToSignal.reset();
428 
429     // Release old command pool and create a new one
430     fMainCmdPool->unref();
431     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
432     if (fMainCmdPool) {
433         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
434         SkASSERT(fMainCmdBuffer);
435         fMainCmdBuffer->begin(this);
436     } else {
437         fMainCmdBuffer = nullptr;
438     }
439     // We must wait to call checkCommandBuffers until after we get a new command buffer. The
440     // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
441     // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
442     // one that was just submitted.
443     fResourceProvider.checkCommandBuffers();
444     return didSubmit;
445 }
446 
447 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)448 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
449                                            GrAccessPattern accessPattern, const void* data) {
450 #ifdef SK_DEBUG
451     switch (type) {
452         case GrGpuBufferType::kVertex:
453         case GrGpuBufferType::kIndex:
454         case GrGpuBufferType::kDrawIndirect:
455             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
456                      accessPattern == kStatic_GrAccessPattern);
457             break;
458         case GrGpuBufferType::kXferCpuToGpu:
459             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
460             break;
461         case GrGpuBufferType::kXferGpuToCpu:
462             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
463                      accessPattern == kStream_GrAccessPattern);
464             break;
465         case GrGpuBufferType::kUniform:
466             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
467             break;
468     }
469 #endif
470     sk_sp<GrGpuBuffer> buff = GrVkBuffer::Make(this, size, type, accessPattern);
471 
472     if (data && buff) {
473         buff->updateData(data, size);
474     }
475     return buff;
476 }
477 
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)478 bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
479                             GrColorType surfaceColorType, GrColorType srcColorType,
480                             const GrMipLevel texels[], int mipLevelCount,
481                             bool prepForTexSampling) {
482     GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
483     if (!texture) {
484         return false;
485     }
486     GrVkAttachment* texAttachment = texture->textureAttachment();
487 
488     // Make sure we have at least the base level
489     if (!mipLevelCount || !texels[0].fPixels) {
490         return false;
491     }
492 
493     SkASSERT(!GrVkFormatIsCompressed(texAttachment->imageFormat()));
494     bool success = false;
495     bool linearTiling = texAttachment->isLinearTiled();
496     if (linearTiling) {
497         if (mipLevelCount > 1) {
498             SkDebugf("Can't upload mipmap data to linear tiled texture");
499             return false;
500         }
501         if (VK_IMAGE_LAYOUT_PREINITIALIZED != texAttachment->currentLayout()) {
502             // Need to change the layout to general in order to perform a host write
503             texAttachment->setImageLayout(this,
504                                           VK_IMAGE_LAYOUT_GENERAL,
505                                           VK_ACCESS_HOST_WRITE_BIT,
506                                           VK_PIPELINE_STAGE_HOST_BIT,
507                                           false);
508             if (!this->submitCommandBuffer(kForce_SyncQueue)) {
509                 return false;
510             }
511         }
512         success = this->uploadTexDataLinear(texAttachment, left, top, width, height, srcColorType,
513                                             texels[0].fPixels, texels[0].fRowBytes);
514     } else {
515         SkASSERT(mipLevelCount <= (int)texAttachment->mipLevels());
516         success = this->uploadTexDataOptimal(texAttachment, left, top, width, height, srcColorType,
517                                              texels, mipLevelCount);
518         if (1 == mipLevelCount) {
519             texture->markMipmapsDirty();
520         }
521     }
522 
523     if (prepForTexSampling) {
524         texAttachment->setImageLayout(this,
525                                       VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
526                                       VK_ACCESS_SHADER_READ_BIT,
527                                       VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
528                                       false);
529     }
530 
531     return success;
532 }
533 
onTransferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)534 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
535                                  GrColorType surfaceColorType, GrColorType bufferColorType,
536                                  sk_sp<GrGpuBuffer> transferBuffer, size_t bufferOffset,
537                                  size_t rowBytes) {
538     if (!this->currentCommandBuffer()) {
539         return false;
540     }
541 
542     size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
543     if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
544         return false;
545     }
546 
547     // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
548     if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
549         return false;
550     }
551     GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
552     if (!tex) {
553         return false;
554     }
555     GrVkAttachment* vkTex = tex->textureAttachment();
556     VkFormat format = vkTex->imageFormat();
557 
558     // Can't transfer compressed data
559     SkASSERT(!GrVkFormatIsCompressed(format));
560 
561     if (!transferBuffer) {
562         return false;
563     }
564 
565     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
566         return false;
567     }
568     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
569 
570     SkDEBUGCODE(
571         SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
572         SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
573         SkASSERT(bounds.contains(subRect));
574     )
575 
576     // Set up copy region
577     VkBufferImageCopy region;
578     memset(&region, 0, sizeof(VkBufferImageCopy));
579     region.bufferOffset = bufferOffset;
580     region.bufferRowLength = (uint32_t)(rowBytes/bpp);
581     region.bufferImageHeight = 0;
582     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
583     region.imageOffset = { left, top, 0 };
584     region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
585 
586     // Change layout of our target so it can be copied to
587     vkTex->setImageLayout(this,
588                           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
589                           VK_ACCESS_TRANSFER_WRITE_BIT,
590                           VK_PIPELINE_STAGE_TRANSFER_BIT,
591                           false);
592 
593     const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
594 
595     // Copy the buffer to the image.
596     this->currentCommandBuffer()->copyBufferToImage(this,
597                                                     vkBuffer->vkBuffer(),
598                                                     vkTex,
599                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
600                                                     1,
601                                                     &region);
602     this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
603 
604     tex->markMipmapsDirty();
605     return true;
606 }
607 
onTransferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)608 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
609                                    GrColorType surfaceColorType, GrColorType bufferColorType,
610                                    sk_sp<GrGpuBuffer> transferBuffer, size_t offset) {
611     if (!this->currentCommandBuffer()) {
612         return false;
613     }
614     SkASSERT(surface);
615     SkASSERT(transferBuffer);
616     if (fProtectedContext == GrProtected::kYes) {
617         return false;
618     }
619 
620     GrVkImage* srcImage;
621     if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
622         // Reading from render targets that wrap a secondary command buffer is not allowed since
623         // it would require us to know the VkImage, which we don't have, as well as need us to
624         // stop and start the VkRenderPass which we don't have access to.
625         if (rt->wrapsSecondaryCommandBuffer()) {
626             return false;
627         }
628         if (!rt->nonMSAAAttachment()) {
629             return false;
630         }
631         srcImage = rt->nonMSAAAttachment();
632     } else {
633         SkASSERT(surface->asTexture());
634         srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureAttachment();
635     }
636 
637     VkFormat format = srcImage->imageFormat();
638     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
639         return false;
640     }
641     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
642 
643     // Set up copy region
644     VkBufferImageCopy region;
645     memset(&region, 0, sizeof(VkBufferImageCopy));
646     region.bufferOffset = offset;
647     region.bufferRowLength = width;
648     region.bufferImageHeight = 0;
649     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
650     region.imageOffset = { left, top, 0 };
651     region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
652 
653     srcImage->setImageLayout(this,
654                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
655                              VK_ACCESS_TRANSFER_READ_BIT,
656                              VK_PIPELINE_STAGE_TRANSFER_BIT,
657                              false);
658 
659     this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
660                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
661                                                     transferBuffer, 1, &region);
662 
663     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
664     // Make sure the copy to buffer has finished.
665     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
666                                VK_ACCESS_HOST_READ_BIT,
667                                VK_PIPELINE_STAGE_TRANSFER_BIT,
668                                VK_PIPELINE_STAGE_HOST_BIT,
669                                false);
670     return true;
671 }
672 
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)673 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
674                            const SkIPoint& dstPoint) {
675     if (!this->currentCommandBuffer()) {
676         return;
677     }
678 
679     SkASSERT(dst);
680     SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
681 
682     VkImageResolve resolveInfo;
683     resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
684     resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
685     resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
686     resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
687     resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
688 
689     GrVkImage* dstImage;
690     GrRenderTarget* dstRT = dst->asRenderTarget();
691     GrTexture* dstTex = dst->asTexture();
692     if (dstTex) {
693         dstImage = static_cast<GrVkTexture*>(dstTex)->textureAttachment();
694     } else {
695         SkASSERT(dst->asRenderTarget());
696         dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
697     }
698     SkASSERT(dstImage);
699 
700     dstImage->setImageLayout(this,
701                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
702                              VK_ACCESS_TRANSFER_WRITE_BIT,
703                              VK_PIPELINE_STAGE_TRANSFER_BIT,
704                              false);
705 
706     src->colorAttachment()->setImageLayout(this,
707                                            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
708                                            VK_ACCESS_TRANSFER_READ_BIT,
709                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
710                                            false);
711     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
712     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
713     this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
714                                                &resolveInfo);
715 }
716 
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)717 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
718     SkASSERT(target->numSamples() > 1);
719     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
720     SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
721 
722     if (this->vkCaps().preferDiscardableMSAAAttachment() && rt->resolveAttachment() &&
723         rt->resolveAttachment()->supportsInputAttachmentUsage()) {
724         // We would have resolved the RT during the render pass;
725         return;
726     }
727 
728     this->resolveImage(target, rt, resolveRect,
729                        SkIPoint::Make(resolveRect.x(), resolveRect.y()));
730 }
731 
uploadTexDataLinear(GrVkAttachment * texAttachment,int left,int top,int width,int height,GrColorType dataColorType,const void * data,size_t rowBytes)732 bool GrVkGpu::uploadTexDataLinear(GrVkAttachment* texAttachment, int left, int top, int width,
733                                   int height, GrColorType dataColorType, const void* data,
734                                   size_t rowBytes) {
735     SkASSERT(data);
736     SkASSERT(texAttachment->isLinearTiled());
737 
738     SkDEBUGCODE(
739         SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
740         SkIRect bounds = SkIRect::MakeWH(texAttachment->width(), texAttachment->height());
741         SkASSERT(bounds.contains(subRect));
742     )
743     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
744     size_t trimRowBytes = width * bpp;
745 
746     SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texAttachment->currentLayout() ||
747              VK_IMAGE_LAYOUT_GENERAL == texAttachment->currentLayout());
748     const VkImageSubresource subres = {
749         VK_IMAGE_ASPECT_COLOR_BIT,
750         0,  // mipLevel
751         0,  // arraySlice
752     };
753     VkSubresourceLayout layout;
754 
755     const GrVkInterface* interface = this->vkInterface();
756 
757     GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
758                                                     texAttachment->image(),
759                                                     &subres,
760                                                     &layout));
761 
762     const GrVkAlloc& alloc = texAttachment->alloc();
763     if (VK_NULL_HANDLE == alloc.fMemory) {
764         return false;
765     }
766     VkDeviceSize offset = top * layout.rowPitch + left * bpp;
767     VkDeviceSize size = height*layout.rowPitch;
768     SkASSERT(size + offset <= alloc.fSize);
769     void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
770     if (!mapPtr) {
771         return false;
772     }
773     mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
774 
775     SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
776                  height);
777 
778     GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
779     GrVkMemory::UnmapAlloc(this, alloc);
780 
781     return true;
782 }
783 
784 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
785 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,SkTArray<VkBufferImageCopy> * regions,SkTArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipmapped)786 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
787                                          SkTArray<VkBufferImageCopy>* regions,
788                                          SkTArray<size_t>* individualMipOffsets,
789                                          GrStagingBufferManager::Slice* slice,
790                                          SkImage::CompressionType compression,
791                                          VkFormat vkFormat,
792                                          SkISize dimensions,
793                                          GrMipmapped mipmapped) {
794     SkASSERT(compression != SkImage::CompressionType::kNone);
795     int numMipLevels = 1;
796     if (mipmapped == GrMipmapped::kYes) {
797         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
798     }
799 
800     regions->reserve_back(numMipLevels);
801     individualMipOffsets->reserve_back(numMipLevels);
802 
803     size_t bytesPerBlock = GrVkFormatBytesPerBlock(vkFormat);
804 
805     size_t bufferSize = SkCompressedDataSize(compression,
806                                              dimensions,
807                                              individualMipOffsets,
808                                              mipmapped == GrMipmapped::kYes);
809     SkASSERT(individualMipOffsets->count() == numMipLevels);
810 
811     // Get a staging buffer slice to hold our mip data.
812     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
813     size_t alignment = bytesPerBlock;
814     switch (alignment & 0b11) {
815         case 0:                     break;   // alignment is already a multiple of 4.
816         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
817         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
818     }
819     *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
820     if (!slice->fBuffer) {
821         return 0;
822     }
823 
824     for (int i = 0; i < numMipLevels; ++i) {
825         VkBufferImageCopy& region = regions->push_back();
826         memset(&region, 0, sizeof(VkBufferImageCopy));
827         region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
828         SkISize revisedDimensions = GrCompressedDimensions(compression, dimensions);
829         region.bufferRowLength = revisedDimensions.width();
830         region.bufferImageHeight = revisedDimensions.height();
831         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
832         region.imageOffset = {0, 0, 0};
833         region.imageExtent = {SkToU32(dimensions.width()),
834                               SkToU32(dimensions.height()), 1};
835 
836         dimensions = {std::max(1, dimensions.width() /2),
837                       std::max(1, dimensions.height()/2)};
838     }
839 
840     return bufferSize;
841 }
842 
uploadTexDataOptimal(GrVkAttachment * texAttachment,int left,int top,int width,int height,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)843 bool GrVkGpu::uploadTexDataOptimal(GrVkAttachment* texAttachment, int left, int top, int width,
844                                    int height, GrColorType dataColorType, const GrMipLevel texels[],
845                                    int mipLevelCount) {
846     if (!this->currentCommandBuffer()) {
847         return false;
848     }
849 
850     SkASSERT(!texAttachment->isLinearTiled());
851     // The assumption is either that we have no mipmaps, or that our rect is the entire texture
852     SkASSERT(1 == mipLevelCount ||
853              (0 == left && 0 == top && width == texAttachment->width() &&
854               height == texAttachment->height()));
855 
856     // We assume that if the texture has mip levels, we either upload to all the levels or just the
857     // first.
858     SkASSERT(1 == mipLevelCount || mipLevelCount == (int)texAttachment->mipLevels());
859 
860     if (width == 0 || height == 0) {
861         return false;
862     }
863 
864     SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texAttachment));
865 
866     SkASSERT(this->vkCaps().isVkFormatTexturable(texAttachment->imageFormat()));
867     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
868 
869     // texels is const.
870     // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
871     // Because of this we need to make a non-const shallow copy of texels.
872     SkAutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
873     std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
874 
875     SkTArray<size_t> individualMipOffsets;
876     size_t combinedBufferSize;
877     if (mipLevelCount > 1) {
878         combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
879                                                               {width, height},
880                                                               &individualMipOffsets,
881                                                               mipLevelCount);
882     } else {
883         SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
884         combinedBufferSize = width*height*bpp;
885         individualMipOffsets.push_back(0);
886     }
887     SkASSERT(combinedBufferSize);
888 
889     // Get a staging buffer slice to hold our mip data.
890     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
891     size_t alignment = bpp;
892     switch (alignment & 0b11) {
893         case 0:                     break;   // alignment is already a multiple of 4.
894         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
895         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
896     }
897     GrStagingBufferManager::Slice slice =
898             fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
899     if (!slice.fBuffer) {
900         return false;
901     }
902 
903     int uploadLeft = left;
904     int uploadTop = top;
905 
906     char* buffer = (char*) slice.fOffsetMapPtr;
907     SkTArray<VkBufferImageCopy> regions(mipLevelCount);
908 
909     int currentWidth = width;
910     int currentHeight = height;
911     int layerHeight = texAttachment->height();
912     for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
913         if (texelsShallowCopy[currentMipLevel].fPixels) {
914             SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
915             const size_t trimRowBytes = currentWidth * bpp;
916             const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
917 
918             // copy data into the buffer, skipping the trailing bytes
919             char* dst = buffer + individualMipOffsets[currentMipLevel];
920             const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
921             SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
922 
923             VkBufferImageCopy& region = regions.push_back();
924             memset(&region, 0, sizeof(VkBufferImageCopy));
925             region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
926             region.bufferRowLength = currentWidth;
927             region.bufferImageHeight = currentHeight;
928             region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
929             region.imageOffset = {uploadLeft, uploadTop, 0};
930             region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
931         }
932 
933         currentWidth  = std::max(1,  currentWidth/2);
934         currentHeight = std::max(1, currentHeight/2);
935 
936         layerHeight = currentHeight;
937     }
938 
939     // Change layout of our target so it can be copied to
940     texAttachment->setImageLayout(this,
941                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
942                                   VK_ACCESS_TRANSFER_WRITE_BIT,
943                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
944                                   false);
945 
946     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
947     // because we don't need the command buffer to ref the buffer here. The reason being is that
948     // the buffer is coming from the staging manager and the staging manager will make sure the
949     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
950     // upload in the frame.
951     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
952     this->currentCommandBuffer()->copyBufferToImage(this,
953                                                     vkBuffer->vkBuffer(),
954                                                     texAttachment,
955                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
956                                                     regions.count(),
957                                                     regions.begin());
958     return true;
959 }
960 
961 // It's probably possible to roll this into uploadTexDataOptimal,
962 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkAttachment * uploadTexture,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipMapped,const void * data,size_t dataSize)963 bool GrVkGpu::uploadTexDataCompressed(GrVkAttachment* uploadTexture,
964                                       SkImage::CompressionType compression, VkFormat vkFormat,
965                                       SkISize dimensions, GrMipmapped mipMapped,
966                                       const void* data, size_t dataSize) {
967     if (!this->currentCommandBuffer()) {
968         return false;
969     }
970     SkASSERT(data);
971     SkASSERT(!uploadTexture->isLinearTiled());
972     // For now the assumption is that our rect is the entire texture.
973     // Compressed textures are read-only so this should be a reasonable assumption.
974     SkASSERT(dimensions.fWidth == uploadTexture->width() &&
975              dimensions.fHeight == uploadTexture->height());
976 
977     if (dimensions.fWidth == 0 || dimensions.fHeight  == 0) {
978         return false;
979     }
980 
981     SkASSERT(uploadTexture->imageFormat() == vkFormat);
982     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
983 
984 
985     GrStagingBufferManager::Slice slice;
986     SkTArray<VkBufferImageCopy> regions;
987     SkTArray<size_t> individualMipOffsets;
988     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
989                                                                         &regions,
990                                                                         &individualMipOffsets,
991                                                                         &slice,
992                                                                         compression,
993                                                                         vkFormat,
994                                                                         dimensions,
995                                                                         mipMapped);
996     if (!slice.fBuffer) {
997         return false;
998     }
999     SkASSERT(dataSize == combinedBufferSize);
1000 
1001     {
1002         char* buffer = (char*)slice.fOffsetMapPtr;
1003         memcpy(buffer, data, dataSize);
1004     }
1005 
1006     // Change layout of our target so it can be copied to
1007     uploadTexture->setImageLayout(this,
1008                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1009                                   VK_ACCESS_TRANSFER_WRITE_BIT,
1010                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
1011                                   false);
1012 
1013     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1014     // because we don't need the command buffer to ref the buffer here. The reason being is that
1015     // the buffer is coming from the staging manager and the staging manager will make sure the
1016     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1017     // upload in the frame.
1018     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1019     this->currentCommandBuffer()->copyBufferToImage(this,
1020                                                     vkBuffer->vkBuffer(),
1021                                                     uploadTexture,
1022                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1023                                                     regions.count(),
1024                                                     regions.begin());
1025 
1026     return true;
1027 }
1028 
1029 ////////////////////////////////////////////////////////////////////////////////
1030 // TODO: make this take a GrMipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)1031 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1032                                           const GrBackendFormat& format,
1033                                           GrRenderable renderable,
1034                                           int renderTargetSampleCnt,
1035                                           SkBudgeted budgeted,
1036                                           GrProtected isProtected,
1037                                           int mipLevelCount,
1038                                           uint32_t levelClearMask) {
1039     VkFormat pixelFormat;
1040     SkAssertResult(format.asVkFormat(&pixelFormat));
1041     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1042     SkASSERT(mipLevelCount > 0);
1043 
1044     GrMipmapStatus mipmapStatus =
1045             mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1046 
1047     sk_sp<GrVkTexture> tex;
1048     if (renderable == GrRenderable::kYes) {
1049         tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1050                 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1051                 mipmapStatus, isProtected);
1052     } else {
1053         tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1054                                           mipLevelCount, isProtected, mipmapStatus);
1055     }
1056 
1057     if (!tex) {
1058         return nullptr;
1059     }
1060 
1061     if (levelClearMask) {
1062         if (!this->currentCommandBuffer()) {
1063             return nullptr;
1064         }
1065         SkSTArray<1, VkImageSubresourceRange> ranges;
1066         bool inRange = false;
1067         GrVkImage* texImage = tex->textureAttachment();
1068         for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1069             if (levelClearMask & (1U << i)) {
1070                 if (inRange) {
1071                     ranges.back().levelCount++;
1072                 } else {
1073                     auto& range = ranges.push_back();
1074                     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1075                     range.baseArrayLayer = 0;
1076                     range.baseMipLevel = i;
1077                     range.layerCount = 1;
1078                     range.levelCount = 1;
1079                     inRange = true;
1080                 }
1081             } else if (inRange) {
1082                 inRange = false;
1083             }
1084         }
1085         SkASSERT(!ranges.empty());
1086         static constexpr VkClearColorValue kZeroClearColor = {};
1087         texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1088                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1089         this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1090                                                       ranges.count(), ranges.begin());
1091     }
1092     return std::move(tex);
1093 }
1094 
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)1095 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1096                                                     const GrBackendFormat& format,
1097                                                     SkBudgeted budgeted,
1098                                                     GrMipmapped mipMapped,
1099                                                     GrProtected isProtected,
1100                                                     const void* data, size_t dataSize) {
1101     VkFormat pixelFormat;
1102     SkAssertResult(format.asVkFormat(&pixelFormat));
1103     SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1104 
1105     int numMipLevels = 1;
1106     if (mipMapped == GrMipmapped::kYes) {
1107         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1108     }
1109 
1110     GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
1111                                                                    : GrMipmapStatus::kNotAllocated;
1112 
1113     auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1114                                            numMipLevels, isProtected, mipmapStatus);
1115     if (!tex) {
1116         return nullptr;
1117     }
1118 
1119     SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1120     if (!this->uploadTexDataCompressed(tex->textureAttachment(), compression, pixelFormat,
1121                                        dimensions, mipMapped, data, dataSize)) {
1122         return nullptr;
1123     }
1124 
1125     return std::move(tex);
1126 }
1127 
1128 ////////////////////////////////////////////////////////////////////////////////
1129 
copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,sk_sp<GrGpuBuffer> dstBuffer,VkDeviceSize srcOffset,VkDeviceSize dstOffset,VkDeviceSize size)1130 void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
1131                          sk_sp<GrGpuBuffer> dstBuffer,
1132                          VkDeviceSize srcOffset,
1133                          VkDeviceSize dstOffset,
1134                          VkDeviceSize size) {
1135     if (!this->currentCommandBuffer()) {
1136         return;
1137     }
1138     VkBufferCopy copyRegion;
1139     copyRegion.srcOffset = srcOffset;
1140     copyRegion.dstOffset = dstOffset;
1141     copyRegion.size = size;
1142     this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), std::move(dstBuffer), 1,
1143                                              &copyRegion);
1144 }
1145 
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1146 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1147                            VkDeviceSize offset, VkDeviceSize size) {
1148     if (!this->currentCommandBuffer()) {
1149         return false;
1150     }
1151     // Update the buffer
1152     this->currentCommandBuffer()->updateBuffer(this, std::move(buffer), offset, size, src);
1153 
1154     return true;
1155 }
1156 
1157 ////////////////////////////////////////////////////////////////////////////////
1158 
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1159 static bool check_image_info(const GrVkCaps& caps,
1160                              const GrVkImageInfo& info,
1161                              bool needsAllocation,
1162                              uint32_t graphicsQueueIndex) {
1163     if (VK_NULL_HANDLE == info.fImage) {
1164         return false;
1165     }
1166 
1167     if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1168         return false;
1169     }
1170 
1171     if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1172         return false;
1173     }
1174 
1175     if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1176         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1177         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1178         if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1179             if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1180                 return false;
1181             }
1182         } else {
1183             return false;
1184         }
1185     }
1186 
1187     if (info.fYcbcrConversionInfo.isValid()) {
1188         if (!caps.supportsYcbcrConversion()) {
1189             return false;
1190         }
1191         if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1192             return true;
1193         }
1194     }
1195 
1196     // We currently require everything to be made with transfer bits set
1197     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1198         !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1199         return false;
1200     }
1201 
1202     return true;
1203 }
1204 
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1205 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1206     // We don't support directly importing multisampled textures for sampling from shaders.
1207     if (info.fSampleCount != 1) {
1208         return false;
1209     }
1210 
1211     if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1212         return true;
1213     }
1214     if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1215         if (!caps.isVkFormatTexturable(info.fFormat)) {
1216             return false;
1217         }
1218     } else {
1219         SkASSERT(info.fImageTiling == VK_IMAGE_TILING_LINEAR);
1220         if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1221             return false;
1222         }
1223     }
1224 
1225     // We currently require all textures to be made with sample support
1226     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1227         return false;
1228     }
1229 
1230     return true;
1231 }
1232 
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1233 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1234     if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1235         return false;
1236     }
1237     if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1238         return false;
1239     }
1240     return true;
1241 }
1242 
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1243 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1244                                                GrWrapOwnership ownership,
1245                                                GrWrapCacheable cacheable,
1246                                                GrIOType ioType) {
1247     GrVkImageInfo imageInfo;
1248     if (!backendTex.getVkImageInfo(&imageInfo)) {
1249         return nullptr;
1250     }
1251 
1252     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1253                           this->queueIndex())) {
1254         return nullptr;
1255     }
1256 
1257     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1258         return nullptr;
1259     }
1260 
1261     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1262         return nullptr;
1263     }
1264 
1265     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1266     SkASSERT(mutableState);
1267     return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1268                                            ioType, imageInfo, std::move(mutableState));
1269 }
1270 
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1271 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1272                                                          GrWrapOwnership ownership,
1273                                                          GrWrapCacheable cacheable) {
1274     return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1275 }
1276 
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1277 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1278                                                          int sampleCnt,
1279                                                          GrWrapOwnership ownership,
1280                                                          GrWrapCacheable cacheable) {
1281     GrVkImageInfo imageInfo;
1282     if (!backendTex.getVkImageInfo(&imageInfo)) {
1283         return nullptr;
1284     }
1285 
1286     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1287                           this->queueIndex())) {
1288         return nullptr;
1289     }
1290 
1291     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1292         return nullptr;
1293     }
1294     // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1295     // the wrapped VkImage.
1296     bool resolveOnly = sampleCnt > 1;
1297     if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1298         return nullptr;
1299     }
1300 
1301     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1302         return nullptr;
1303     }
1304 
1305     sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1306 
1307     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1308     SkASSERT(mutableState);
1309 
1310     return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1311                                                                    sampleCnt, ownership, cacheable,
1312                                                                    imageInfo,
1313                                                                    std::move(mutableState));
1314 }
1315 
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1316 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1317     GrVkImageInfo info;
1318     if (!backendRT.getVkImageInfo(&info)) {
1319         return nullptr;
1320     }
1321 
1322     if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1323         return nullptr;
1324     }
1325 
1326     // We will always render directly to this VkImage.
1327     static bool kResolveOnly = false;
1328     if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1329         return nullptr;
1330     }
1331 
1332     if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1333         return nullptr;
1334     }
1335 
1336     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendRT.getMutableState();
1337     SkASSERT(mutableState);
1338 
1339     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1340             this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1341 
1342     // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1343     SkASSERT(!backendRT.stencilBits());
1344     if (tgt) {
1345         SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1346     }
1347 
1348     return std::move(tgt);
1349 }
1350 
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1351 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1352         const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1353     int maxSize = this->caps()->maxTextureSize();
1354     if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1355         return nullptr;
1356     }
1357 
1358     GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1359     if (!backendFormat.isValid()) {
1360         return nullptr;
1361     }
1362     int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1363     if (!sampleCnt) {
1364         return nullptr;
1365     }
1366 
1367     return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1368 }
1369 
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkAttachment * src,const SkIRect & srcRect)1370 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1371                                   const GrVkRenderPass& renderPass,
1372                                   GrAttachment* dst,
1373                                   GrVkAttachment* src,
1374                                   const SkIRect& srcRect) {
1375     return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1376 }
1377 
onRegenerateMipMapLevels(GrTexture * tex)1378 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1379     if (!this->currentCommandBuffer()) {
1380         return false;
1381     }
1382     auto* vkTex = static_cast<GrVkTexture*>(tex)->textureAttachment();
1383     // don't do anything for linearly tiled textures (can't have mipmaps)
1384     if (vkTex->isLinearTiled()) {
1385         SkDebugf("Trying to create mipmap for linear tiled texture");
1386         return false;
1387     }
1388     SkASSERT(tex->textureType() == GrTextureType::k2D);
1389 
1390     // determine if we can blit to and from this format
1391     const GrVkCaps& caps = this->vkCaps();
1392     if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1393         !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1394         !caps.mipmapSupport()) {
1395         return false;
1396     }
1397 
1398     int width = tex->width();
1399     int height = tex->height();
1400     VkImageBlit blitRegion;
1401     memset(&blitRegion, 0, sizeof(VkImageBlit));
1402 
1403     // SkMipmap doesn't include the base level in the level count so we have to add 1
1404     uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1405     SkASSERT(levelCount == vkTex->mipLevels());
1406 
1407     // change layout of the layers so we can write to them.
1408     vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1409                           VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1410 
1411     // setup memory barrier
1412     SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1413     VkImageMemoryBarrier imageMemoryBarrier = {
1414             VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,  // sType
1415             nullptr,                                 // pNext
1416             VK_ACCESS_TRANSFER_WRITE_BIT,            // srcAccessMask
1417             VK_ACCESS_TRANSFER_READ_BIT,             // dstAccessMask
1418             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,    // oldLayout
1419             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,    // newLayout
1420             VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
1421             VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
1422             vkTex->image(),                          // image
1423             {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}  // subresourceRange
1424     };
1425 
1426     // Blit the miplevels
1427     uint32_t mipLevel = 1;
1428     while (mipLevel < levelCount) {
1429         int prevWidth = width;
1430         int prevHeight = height;
1431         width = std::max(1, width / 2);
1432         height = std::max(1, height / 2);
1433 
1434         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1435         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1436                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1437 
1438         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1439         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1440         blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1441         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1442         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1443         blitRegion.dstOffsets[1] = { width, height, 1 };
1444         this->currentCommandBuffer()->blitImage(this,
1445                                                 vkTex->resource(),
1446                                                 vkTex->image(),
1447                                                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1448                                                 vkTex->resource(),
1449                                                 vkTex->image(),
1450                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1451                                                 1,
1452                                                 &blitRegion,
1453                                                 VK_FILTER_LINEAR);
1454         ++mipLevel;
1455     }
1456     if (levelCount > 1) {
1457         // This barrier logically is not needed, but it changes the final level to the same layout
1458         // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1459         // layouts and future layout changes easier. The alternative here would be to track layout
1460         // and memory accesses per layer which doesn't seem work it.
1461         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1462         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1463                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1464         vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1465     }
1466     return true;
1467 }
1468 
1469 ////////////////////////////////////////////////////////////////////////////////
1470 
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1471 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1472                                                    SkISize dimensions, int numStencilSamples) {
1473     VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1474 
1475     fStats.incStencilAttachmentCreates();
1476     return GrVkAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1477 }
1478 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected)1479 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1480                                                 const GrBackendFormat& format,
1481                                                 int numSamples,
1482                                                 GrProtected isProtected) {
1483     VkFormat pixelFormat;
1484     SkAssertResult(format.asVkFormat(&pixelFormat));
1485     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1486     SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1487 
1488     fStats.incMSAAAttachmentCreates();
1489     return GrVkAttachment::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected);
1490 }
1491 
1492 ////////////////////////////////////////////////////////////////////////////////
1493 
copy_src_data(char * mapPtr,VkFormat vkFormat,const SkTArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1494 bool copy_src_data(char* mapPtr,
1495                    VkFormat vkFormat,
1496                    const SkTArray<size_t>& individualMipOffsets,
1497                    const GrPixmap srcData[],
1498                    int numMipLevels) {
1499     SkASSERT(srcData && numMipLevels);
1500     SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1501     SkASSERT(individualMipOffsets.count() == numMipLevels);
1502     SkASSERT(mapPtr);
1503 
1504     size_t bytesPerPixel = GrVkFormatBytesPerBlock(vkFormat);
1505 
1506     for (int level = 0; level < numMipLevels; ++level) {
1507         const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1508 
1509         SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1510                      srcData[level].addr(), srcData[level].rowBytes(),
1511                      trimRB, srcData[level].height());
1512     }
1513     return true;
1514 }
1515 
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,GrMipmapped mipMapped,GrVkImageInfo * info,GrProtected isProtected)1516 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1517                                              SkISize dimensions,
1518                                              int sampleCnt,
1519                                              GrTexturable texturable,
1520                                              GrRenderable renderable,
1521                                              GrMipmapped mipMapped,
1522                                              GrVkImageInfo* info,
1523                                              GrProtected isProtected) {
1524     SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1525 
1526     if (fProtectedContext != isProtected) {
1527         return false;
1528     }
1529 
1530     if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1531         return false;
1532     }
1533 
1534     // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1535     if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1536         return false;
1537     }
1538 
1539     if (renderable == GrRenderable::kYes) {
1540         sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1541         if (!sampleCnt) {
1542             return false;
1543         }
1544     }
1545 
1546 
1547     int numMipLevels = 1;
1548     if (mipMapped == GrMipmapped::kYes) {
1549         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1550     }
1551 
1552     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1553                                    VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1554     if (texturable == GrTexturable::kYes) {
1555         usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1556     }
1557     if (renderable == GrRenderable::kYes) {
1558         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1559         // We always make our render targets support being used as input attachments
1560         usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1561     }
1562 
1563     GrVkImage::ImageDesc imageDesc;
1564     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1565     imageDesc.fFormat = vkFormat;
1566     imageDesc.fWidth = dimensions.width();
1567     imageDesc.fHeight = dimensions.height();
1568     imageDesc.fLevels = numMipLevels;
1569     imageDesc.fSamples = sampleCnt;
1570     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1571     imageDesc.fUsageFlags = usageFlags;
1572     imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1573     imageDesc.fIsProtected = fProtectedContext;
1574 
1575     if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1576         SkDebugf("Failed to init image info\n");
1577         return false;
1578     }
1579 
1580     return true;
1581 }
1582 
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)1583 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1584                                     sk_sp<GrRefCntedCallback> finishedCallback,
1585                                     std::array<float, 4> color) {
1586     GrVkImageInfo info;
1587     SkAssertResult(backendTexture.getVkImageInfo(&info));
1588 
1589     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1590     SkASSERT(mutableState);
1591     sk_sp<GrVkTexture> texture =
1592                 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1593                                                 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1594                                                 kRW_GrIOType, info, std::move(mutableState));
1595     if (!texture) {
1596         return false;
1597     }
1598     GrVkAttachment* texAttachment = texture->textureAttachment();
1599 
1600     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1601     if (!cmdBuffer) {
1602         return false;
1603     }
1604 
1605     texAttachment->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1606                                   VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
1607                                   false);
1608 
1609     // CmdClearColorImage doesn't work for compressed formats
1610     SkASSERT(!GrVkFormatIsCompressed(info.fFormat));
1611 
1612     VkClearColorValue vkColor;
1613     // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1614     // uint32 union members in those cases.
1615     vkColor.float32[0] = color[0];
1616     vkColor.float32[1] = color[1];
1617     vkColor.float32[2] = color[2];
1618     vkColor.float32[3] = color[3];
1619     VkImageSubresourceRange range;
1620     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1621     range.baseArrayLayer = 0;
1622     range.baseMipLevel = 0;
1623     range.layerCount = 1;
1624     range.levelCount = info.fLevelCount;
1625     cmdBuffer->clearColorImage(this, texAttachment, &vkColor, 1, &range);
1626 
1627     // Change image layout to shader read since if we use this texture as a borrowed
1628     // texture within Ganesh we require that its layout be set to that
1629     texAttachment->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1630                                   VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1631                                   false);
1632 
1633     if (finishedCallback) {
1634         this->addFinishedCallback(std::move(finishedCallback));
1635     }
1636     return true;
1637 }
1638 
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)1639 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1640                                                  const GrBackendFormat& format,
1641                                                  GrRenderable renderable,
1642                                                  GrMipmapped mipMapped,
1643                                                  GrProtected isProtected) {
1644     const GrVkCaps& caps = this->vkCaps();
1645 
1646     if (fProtectedContext != isProtected) {
1647         return {};
1648     }
1649 
1650     VkFormat vkFormat;
1651     if (!format.asVkFormat(&vkFormat)) {
1652         return {};
1653     }
1654 
1655     // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1656     if (!caps.isVkFormatTexturable(vkFormat)) {
1657         return {};
1658     }
1659 
1660     if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
1661         return {};
1662     }
1663 
1664     GrVkImageInfo info;
1665     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1666                                               renderable, mipMapped, &info, isProtected)) {
1667         return {};
1668     }
1669 
1670     return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1671 }
1672 
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)1673 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
1674         SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1675         GrProtected isProtected) {
1676     return this->onCreateBackendTexture(dimensions, format, GrRenderable::kNo, mipMapped,
1677                                         isProtected);
1678 }
1679 
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)1680 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1681                                                sk_sp<GrRefCntedCallback> finishedCallback,
1682                                                const void* data,
1683                                                size_t size) {
1684     GrVkImageInfo info;
1685     SkAssertResult(backendTexture.getVkImageInfo(&info));
1686 
1687     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1688     SkASSERT(mutableState);
1689     sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1690                                                                  backendTexture.dimensions(),
1691                                                                  kBorrow_GrWrapOwnership,
1692                                                                  GrWrapCacheable::kNo,
1693                                                                  kRW_GrIOType,
1694                                                                  info,
1695                                                                  std::move(mutableState));
1696     if (!texture) {
1697         return false;
1698     }
1699 
1700     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1701     if (!cmdBuffer) {
1702         return false;
1703     }
1704     GrVkAttachment* attachment = texture->textureAttachment();
1705     attachment->setImageLayout(this,
1706                                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1707                                VK_ACCESS_TRANSFER_WRITE_BIT,
1708                                VK_PIPELINE_STAGE_TRANSFER_BIT,
1709                                false);
1710 
1711     SkImage::CompressionType compression =
1712             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1713 
1714     SkTArray<VkBufferImageCopy> regions;
1715     SkTArray<size_t> individualMipOffsets;
1716     GrStagingBufferManager::Slice slice;
1717 
1718     fill_in_compressed_regions(&fStagingBufferManager,
1719                                &regions,
1720                                &individualMipOffsets,
1721                                &slice,
1722                                compression,
1723                                info.fFormat,
1724                                backendTexture.dimensions(),
1725                                backendTexture.fMipmapped);
1726 
1727     if (!slice.fBuffer) {
1728         return false;
1729     }
1730 
1731     memcpy(slice.fOffsetMapPtr, data, size);
1732 
1733     cmdBuffer->addGrSurface(texture);
1734     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1735     // because we don't need the command buffer to ref the buffer here. The reason being is that
1736     // the buffer is coming from the staging manager and the staging manager will make sure the
1737     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1738     // every upload in the frame.
1739     cmdBuffer->copyBufferToImage(this,
1740                                  static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1741                                  attachment,
1742                                  attachment->currentLayout(),
1743                                  regions.count(),
1744                                  regions.begin());
1745 
1746     // Change image layout to shader read since if we use this texture as a borrowed
1747     // texture within Ganesh we require that its layout be set to that
1748     attachment->setImageLayout(this,
1749                                VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1750                                VK_ACCESS_SHADER_READ_BIT,
1751                                VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1752                                false);
1753 
1754     if (finishedCallback) {
1755         this->addFinishedCallback(std::move(finishedCallback));
1756     }
1757     return true;
1758 }
1759 
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,const GrVkSharedImageInfo & newInfo)1760 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1761                                              const GrVkSharedImageInfo& newInfo) {
1762     // Even though internally we use this helper for getting src access flags and stages they
1763     // can also be used for general dst flags since we don't know exactly what the client
1764     // plans on using the image for.
1765     VkImageLayout newLayout = newInfo.getImageLayout();
1766     if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1767         newLayout = image->currentLayout();
1768     }
1769     VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1770     VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1771 
1772     uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1773     uint32_t newQueueFamilyIndex = newInfo.getQueueFamilyIndex();
1774     auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1775         return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1776                queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1777     };
1778     if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1779         // It is illegal to have both the new and old queue be special queue families (i.e. external
1780         // or foreign).
1781         return;
1782     }
1783 
1784     image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1785                                        newQueueFamilyIndex);
1786 }
1787 
setBackendSurfaceState(GrVkImageInfo info,sk_sp<GrBackendSurfaceMutableStateImpl> currentState,SkISize dimensions,const GrVkSharedImageInfo & newInfo,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1788 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1789                                      sk_sp<GrBackendSurfaceMutableStateImpl> currentState,
1790                                      SkISize dimensions,
1791                                      const GrVkSharedImageInfo& newInfo,
1792                                      GrBackendSurfaceMutableState* previousState,
1793                                      sk_sp<GrRefCntedCallback> finishedCallback) {
1794     sk_sp<GrVkAttachment> texture = GrVkAttachment::MakeWrapped(
1795             this, dimensions, info, std::move(currentState),
1796            GrVkAttachment::UsageFlags::kColorAttachment, kBorrow_GrWrapOwnership,
1797            GrWrapCacheable::kNo, /*forSecondaryCB=*/false);
1798     SkASSERT(texture);
1799     if (!texture) {
1800         return false;
1801     }
1802     if (previousState) {
1803         previousState->setVulkanState(texture->currentLayout(),
1804                                       texture->currentQueueFamilyIndex());
1805     }
1806     set_layout_and_queue_from_mutable_state(this, texture.get(), newInfo);
1807     if (finishedCallback) {
1808         this->addFinishedCallback(std::move(finishedCallback));
1809     }
1810     return true;
1811 }
1812 
setBackendTextureState(const GrBackendTexture & backendTeture,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1813 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1814                                      const GrBackendSurfaceMutableState& newState,
1815                                      GrBackendSurfaceMutableState* previousState,
1816                                      sk_sp<GrRefCntedCallback> finishedCallback) {
1817     GrVkImageInfo info;
1818     SkAssertResult(backendTeture.getVkImageInfo(&info));
1819     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendTeture.getMutableState();
1820     SkASSERT(currentState);
1821     SkASSERT(newState.isValid() && newState.fBackend == GrBackend::kVulkan);
1822     return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1823                                         newState.fVkState, previousState,
1824                                         std::move(finishedCallback));
1825 }
1826 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1827 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1828                                           const GrBackendSurfaceMutableState& newState,
1829                                           GrBackendSurfaceMutableState* previousState,
1830                                           sk_sp<GrRefCntedCallback> finishedCallback) {
1831     GrVkImageInfo info;
1832     SkAssertResult(backendRenderTarget.getVkImageInfo(&info));
1833     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendRenderTarget.getMutableState();
1834     SkASSERT(currentState);
1835     SkASSERT(newState.fBackend == GrBackend::kVulkan);
1836     return this->setBackendSurfaceState(info, std::move(currentState),
1837                                         backendRenderTarget.dimensions(), newState.fVkState,
1838                                         previousState, std::move(finishedCallback));
1839 }
1840 
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)1841 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
1842     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1843     VkPipelineStageFlags dstStage;
1844     VkAccessFlags dstAccess;
1845     if (barrierType == kBlend_GrXferBarrierType) {
1846         dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1847         dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
1848     } else {
1849         SkASSERT(barrierType == kTexture_GrXferBarrierType);
1850         dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1851         dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
1852     }
1853     GrVkAttachment* colorAttachment = vkRT->colorAttachment();
1854     VkImageMemoryBarrier barrier;
1855     barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1856     barrier.pNext = nullptr;
1857     barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1858     barrier.dstAccessMask = dstAccess;
1859     barrier.oldLayout = colorAttachment->currentLayout();
1860     barrier.newLayout = barrier.oldLayout;
1861     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1862     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1863     barrier.image = colorAttachment->image();
1864     barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, colorAttachment->mipLevels(), 0, 1};
1865     this->addImageMemoryBarrier(colorAttachment->resource(),
1866                                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1867                                 dstStage, true, &barrier);
1868 }
1869 
deleteBackendTexture(const GrBackendTexture & tex)1870 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1871     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1872 
1873     GrVkImageInfo info;
1874     if (tex.getVkImageInfo(&info)) {
1875         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1876     }
1877 }
1878 
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)1879 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1880     GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
1881     GrVkRenderPass::AttachmentFlags attachmentFlags;
1882     GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
1883                                                        &attachmentsDescriptor, &attachmentFlags);
1884 
1885     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
1886     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
1887         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
1888     }
1889     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
1890         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
1891     }
1892 
1893     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
1894     if (programInfo.targetSupportsVkResolveLoad() && programInfo.colorLoadOp() == GrLoadOp::kLoad &&
1895         this->vkCaps().preferDiscardableMSAAAttachment()) {
1896         loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
1897     }
1898     sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
1899             &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
1900     if (!renderPass) {
1901         return false;
1902     }
1903 
1904     GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
1905 
1906     auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1907                                     desc,
1908                                     programInfo,
1909                                     renderPass->vkRenderPass(),
1910                                     &stat);
1911     if (!pipelineState) {
1912         return false;
1913     }
1914 
1915     return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
1916 }
1917 
1918 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const1919 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1920     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1921 
1922     GrVkImageInfo backend;
1923     if (!tex.getVkImageInfo(&backend)) {
1924         return false;
1925     }
1926 
1927     if (backend.fImage && backend.fAlloc.fMemory) {
1928         VkMemoryRequirements req;
1929         memset(&req, 0, sizeof(req));
1930         GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1931                                                                    backend.fImage,
1932                                                                    &req));
1933         // TODO: find a better check
1934         // This will probably fail with a different driver
1935         return (req.size > 0) && (req.size <= 8192 * 8192);
1936     }
1937 
1938     return false;
1939 }
1940 
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)1941 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1942                                                                     GrColorType ct,
1943                                                                     int sampleCnt,
1944                                                                     GrProtected isProtected) {
1945     if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
1946         dimensions.height() > this->caps()->maxRenderTargetSize()) {
1947         return {};
1948     }
1949 
1950     VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
1951 
1952     GrVkImageInfo info;
1953     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo,
1954                                               GrRenderable::kYes, GrMipmapped::kNo, &info,
1955                                               isProtected)) {
1956         return {};
1957     }
1958     return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info);
1959 }
1960 
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)1961 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1962     SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
1963 
1964     GrVkImageInfo info;
1965     if (rt.getVkImageInfo(&info)) {
1966         // something in the command buffer may still be using this, so force submit
1967         SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
1968         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1969     }
1970 }
1971 #endif
1972 
1973 ////////////////////////////////////////////////////////////////////////////////
1974 
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const1975 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
1976                                      VkPipelineStageFlags srcStageMask,
1977                                      VkPipelineStageFlags dstStageMask,
1978                                      bool byRegion,
1979                                      VkBufferMemoryBarrier* barrier) const {
1980     if (!this->currentCommandBuffer()) {
1981         return;
1982     }
1983     SkASSERT(resource);
1984     this->currentCommandBuffer()->pipelineBarrier(this,
1985                                                   resource,
1986                                                   srcStageMask,
1987                                                   dstStageMask,
1988                                                   byRegion,
1989                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
1990                                                   barrier);
1991 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const1992 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1993                                      VkPipelineStageFlags dstStageMask,
1994                                      bool byRegion,
1995                                      VkBufferMemoryBarrier* barrier) const {
1996     if (!this->currentCommandBuffer()) {
1997         return;
1998     }
1999     // We don't pass in a resource here to the command buffer. The command buffer only is using it
2000     // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2001     // command with the buffer on the command buffer. Thus those other commands will already cause
2002     // the command buffer to be holding a ref to the buffer.
2003     this->currentCommandBuffer()->pipelineBarrier(this,
2004                                                   /*resource=*/nullptr,
2005                                                   srcStageMask,
2006                                                   dstStageMask,
2007                                                   byRegion,
2008                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2009                                                   barrier);
2010 }
2011 
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2012 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2013                                     VkPipelineStageFlags srcStageMask,
2014                                     VkPipelineStageFlags dstStageMask,
2015                                     bool byRegion,
2016                                     VkImageMemoryBarrier* barrier) const {
2017     // If we are in the middle of destroying or abandoning the context we may hit a release proc
2018     // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2019     // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2020     // have a current command buffer. Thus we won't do the queue transfer.
2021     if (!this->currentCommandBuffer()) {
2022         return;
2023     }
2024     SkASSERT(resource);
2025     this->currentCommandBuffer()->pipelineBarrier(this,
2026                                                   resource,
2027                                                   srcStageMask,
2028                                                   dstStageMask,
2029                                                   byRegion,
2030                                                   GrVkCommandBuffer::kImageMemory_BarrierType,
2031                                                   barrier);
2032 }
2033 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)2034 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2035         SkSpan<GrSurfaceProxy*> proxies,
2036         SkSurface::BackendSurfaceAccess access,
2037         const GrBackendSurfaceMutableState* newState) {
2038     // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2039     // not effect what we do here.
2040     if (!proxies.empty() && (access == SkSurface::BackendSurfaceAccess::kPresent || newState)) {
2041         // We currently don't support passing in new surface state for multiple proxies here. The
2042         // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2043         // state updates anyways. Additionally if we have a newState than we must not have any
2044         // BackendSurfaceAccess.
2045         SkASSERT(!newState || proxies.size() == 1);
2046         SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
2047         GrVkImage* image;
2048         for (GrSurfaceProxy* proxy : proxies) {
2049             SkASSERT(proxy->isInstantiated());
2050             if (GrTexture* tex = proxy->peekTexture()) {
2051                 image = static_cast<GrVkTexture*>(tex)->textureAttachment();
2052             } else {
2053                 GrRenderTarget* rt = proxy->peekRenderTarget();
2054                 SkASSERT(rt);
2055                 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2056                 image = vkRT->externalAttachment();
2057             }
2058             if (newState) {
2059                 const GrVkSharedImageInfo& newInfo = newState->fVkState;
2060                 set_layout_and_queue_from_mutable_state(this, image, newInfo);
2061             } else {
2062                 SkASSERT(access == SkSurface::BackendSurfaceAccess::kPresent);
2063                 image->prepareForPresent(this);
2064             }
2065         }
2066     }
2067 }
2068 
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)2069 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2070                               GrGpuFinishedContext finishedContext) {
2071     SkASSERT(finishedProc);
2072     this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
2073 }
2074 
addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback)2075 void GrVkGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
2076     SkASSERT(finishedCallback);
2077     fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2078 }
2079 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2080 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2081     this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2082 }
2083 
onSubmitToGpu(bool syncCpu)2084 bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
2085     if (syncCpu) {
2086         return this->submitCommandBuffer(kForce_SyncQueue);
2087     } else {
2088         return this->submitCommandBuffer(kSkip_SyncQueue);
2089     }
2090 }
2091 
finishOutstandingGpuWork()2092 void GrVkGpu::finishOutstandingGpuWork() {
2093     VK_CALL(QueueWaitIdle(fQueue));
2094 
2095     if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2096         fResourceProvider.forceSyncAllCommandBuffers();
2097     }
2098 }
2099 
onReportSubmitHistograms()2100 void GrVkGpu::onReportSubmitHistograms() {
2101 #if SK_HISTOGRAMS_ENABLED
2102     uint64_t allocatedMemory = fMemoryAllocator->totalAllocatedMemory();
2103     uint64_t usedMemory = fMemoryAllocator->totalUsedMemory();
2104     SkASSERT(usedMemory <= allocatedMemory);
2105     if (allocatedMemory > 0) {
2106         SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2107                                 (usedMemory * 100) / allocatedMemory);
2108     }
2109     // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2110     // supports samples up to around 500MB which should support the amounts of memory we allocate.
2111     SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2112 #endif
2113 }
2114 
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2115 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
2116                                      GrVkImage* srcImage, const SkIRect& srcRect,
2117                                      const SkIPoint& dstPoint) {
2118     if (!this->currentCommandBuffer()) {
2119         return;
2120     }
2121 
2122 #ifdef SK_DEBUG
2123     int dstSampleCnt = dstImage->vkImageInfo().fSampleCount;
2124     int srcSampleCnt = srcImage->vkImageInfo().fSampleCount;
2125     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2126     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2127     VkFormat dstFormat = dstImage->imageFormat();
2128     VkFormat srcFormat;
2129     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2130     SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2131                                          srcFormat, srcSampleCnt, srcHasYcbcr));
2132 #endif
2133     if (src->isProtected() && !dst->isProtected()) {
2134         SkDebugf("Can't copy from protected memory to non-protected");
2135         return;
2136     }
2137 
2138     // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2139     // the cache is flushed since it is only being written to.
2140     dstImage->setImageLayout(this,
2141                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2142                              VK_ACCESS_TRANSFER_WRITE_BIT,
2143                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2144                              false);
2145 
2146     srcImage->setImageLayout(this,
2147                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2148                              VK_ACCESS_TRANSFER_READ_BIT,
2149                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2150                              false);
2151 
2152     VkImageCopy copyRegion;
2153     memset(&copyRegion, 0, sizeof(VkImageCopy));
2154     copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2155     copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2156     copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2157     copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2158     copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2159 
2160     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2161     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2162     this->currentCommandBuffer()->copyImage(this,
2163                                             srcImage,
2164                                             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2165                                             dstImage,
2166                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2167                                             1,
2168                                             &copyRegion);
2169 
2170     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2171                                         srcRect.width(), srcRect.height());
2172     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2173     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2174 }
2175 
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2176 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
2177                                 GrVkImage* srcImage, const SkIRect& srcRect,
2178                                 const SkIPoint& dstPoint) {
2179     if (!this->currentCommandBuffer()) {
2180         return;
2181     }
2182 
2183 #ifdef SK_DEBUG
2184     int dstSampleCnt = dstImage->vkImageInfo().fSampleCount;
2185     int srcSampleCnt = srcImage->vkImageInfo().fSampleCount;
2186     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2187     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2188     VkFormat dstFormat = dstImage->imageFormat();
2189     VkFormat srcFormat;
2190     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2191     SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
2192                                           dstHasYcbcr, srcFormat, srcSampleCnt,
2193                                           srcImage->isLinearTiled(), srcHasYcbcr));
2194 
2195 #endif
2196     if (src->isProtected() && !dst->isProtected()) {
2197         SkDebugf("Can't copy from protected memory to non-protected");
2198         return;
2199     }
2200 
2201     dstImage->setImageLayout(this,
2202                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2203                              VK_ACCESS_TRANSFER_WRITE_BIT,
2204                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2205                              false);
2206 
2207     srcImage->setImageLayout(this,
2208                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2209                              VK_ACCESS_TRANSFER_READ_BIT,
2210                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2211                              false);
2212 
2213     // Flip rect if necessary
2214     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
2215                                         srcRect.height());
2216 
2217     VkImageBlit blitRegion;
2218     memset(&blitRegion, 0, sizeof(VkImageBlit));
2219     blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2220     blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2221     blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2222     blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2223     blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2224     blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2225 
2226     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2227     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2228     this->currentCommandBuffer()->blitImage(this,
2229                                             *srcImage,
2230                                             *dstImage,
2231                                             1,
2232                                             &blitRegion,
2233                                             VK_FILTER_NEAREST); // We never scale so any filter works here
2234 
2235     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2236     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2237 }
2238 
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2239 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2240                                    const SkIPoint& dstPoint) {
2241     if (src->isProtected() && !dst->isProtected()) {
2242         SkDebugf("Can't copy from protected memory to non-protected");
2243         return;
2244     }
2245     GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2246     this->resolveImage(dst, srcRT, srcRect, dstPoint);
2247     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2248                                         srcRect.width(), srcRect.height());
2249     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2250     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2251 }
2252 
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2253 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2254                             const SkIPoint& dstPoint) {
2255 #ifdef SK_DEBUG
2256     if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2257         SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2258     }
2259     if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2260         SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2261     }
2262 #endif
2263     if (src->isProtected() && !dst->isProtected()) {
2264         SkDebugf("Can't copy from protected memory to non-protected");
2265         return false;
2266     }
2267 
2268     bool useDiscardableMSAA = this->vkCaps().preferDiscardableMSAAAttachment();
2269 
2270     GrVkImage* dstImage;
2271     GrVkImage* srcImage;
2272     GrRenderTarget* dstRT = dst->asRenderTarget();
2273     if (dstRT) {
2274         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2275         if (vkRT->wrapsSecondaryCommandBuffer()) {
2276             return false;
2277         }
2278         if (useDiscardableMSAA && vkRT->resolveAttachment() &&
2279             vkRT->resolveAttachment()->supportsInputAttachmentUsage()) {
2280             dstImage = vkRT->resolveAttachment();
2281         } else {
2282             dstImage = vkRT->colorAttachment();
2283         }
2284     } else if (dst->asTexture()) {
2285         dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureAttachment();
2286     } else {
2287         // The surface in a GrAttachment already
2288         dstImage = static_cast<GrVkAttachment*>(dst);
2289     }
2290     GrRenderTarget* srcRT = src->asRenderTarget();
2291     if (srcRT) {
2292         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2293         if (useDiscardableMSAA && vkRT->resolveAttachment() &&
2294             vkRT->resolveAttachment()->supportsInputAttachmentUsage()) {
2295             srcImage = vkRT->resolveAttachment();
2296         } else {
2297             srcImage = vkRT->colorAttachment();
2298         }
2299     } else if (src->asTexture()) {
2300         SkASSERT(src->asTexture());
2301         srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureAttachment();
2302     } else {
2303         // The surface in a GrAttachment already
2304         srcImage = static_cast<GrVkAttachment*>(src);
2305     }
2306 
2307     VkFormat dstFormat = dstImage->imageFormat();
2308     VkFormat srcFormat = srcImage->imageFormat();
2309 
2310     int dstSampleCnt = dstImage->vkImageInfo().fSampleCount;
2311     int srcSampleCnt = srcImage->vkImageInfo().fSampleCount;
2312 
2313     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2314     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2315 
2316     if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2317                                         srcFormat, srcSampleCnt, srcHasYcbcr)) {
2318         this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2319         return true;
2320     }
2321 
2322     if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2323                                     srcFormat, srcSampleCnt, srcHasYcbcr)) {
2324         this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2325         return true;
2326     }
2327 
2328     if (this->vkCaps().canCopyAsBlit(dstFormat, dstSampleCnt, dstImage->isLinearTiled(),
2329                                      dstHasYcbcr, srcFormat, srcSampleCnt,
2330                                      srcImage->isLinearTiled(), srcHasYcbcr)) {
2331         this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
2332         return true;
2333     }
2334 
2335     return false;
2336 }
2337 
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2338 bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
2339                            GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
2340                            size_t rowBytes) {
2341     if (surface->isProtected()) {
2342         return false;
2343     }
2344 
2345     if (!this->currentCommandBuffer()) {
2346         return false;
2347     }
2348 
2349     GrVkImage* image = nullptr;
2350     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2351     if (rt) {
2352         // Reading from render targets that wrap a secondary command buffer is not allowed since
2353         // it would require us to know the VkImage, which we don't have, as well as need us to
2354         // stop and start the VkRenderPass which we don't have access to.
2355         if (rt->wrapsSecondaryCommandBuffer()) {
2356             return false;
2357         }
2358         image = rt->nonMSAAAttachment();
2359     } else {
2360         image = static_cast<GrVkTexture*>(surface->asTexture())->textureAttachment();
2361     }
2362 
2363     if (!image) {
2364         return false;
2365     }
2366 
2367     if (dstColorType == GrColorType::kUnknown ||
2368         dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2369         return false;
2370     }
2371 
2372     // Change layout of our target so it can be used as copy
2373     image->setImageLayout(this,
2374                           VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2375                           VK_ACCESS_TRANSFER_READ_BIT,
2376                           VK_PIPELINE_STAGE_TRANSFER_BIT,
2377                           false);
2378 
2379     size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2380     if (GrVkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2381         return false;
2382     }
2383     size_t tightRowBytes = bpp * width;
2384 
2385     VkBufferImageCopy region;
2386     memset(&region, 0, sizeof(VkBufferImageCopy));
2387     VkOffset3D offset = { left, top, 0 };
2388     region.imageOffset = offset;
2389     region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
2390 
2391     size_t transBufferRowBytes = bpp * region.imageExtent.width;
2392     size_t imageRows = region.imageExtent.height;
2393     GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2394     sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2395             transBufferRowBytes * imageRows, GrGpuBufferType::kXferGpuToCpu,
2396             kDynamic_GrAccessPattern);
2397 
2398     if (!transferBuffer) {
2399         return false;
2400     }
2401 
2402     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2403 
2404     // Copy the image to a buffer so we can map it to cpu memory
2405     region.bufferOffset = 0;
2406     region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2407     region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2408     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2409 
2410     this->currentCommandBuffer()->copyImageToBuffer(this,
2411                                                     image,
2412                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2413                                                     transferBuffer,
2414                                                     1,
2415                                                     &region);
2416 
2417     // make sure the copy to buffer has finished
2418     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2419                                VK_ACCESS_HOST_READ_BIT,
2420                                VK_PIPELINE_STAGE_TRANSFER_BIT,
2421                                VK_PIPELINE_STAGE_HOST_BIT,
2422                                false);
2423 
2424     // We need to submit the current command buffer to the Queue and make sure it finishes before
2425     // we can copy the data out of the buffer.
2426     if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2427         return false;
2428     }
2429     void* mappedMemory = transferBuffer->map();
2430 
2431     SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, height);
2432 
2433     transferBuffer->unmap();
2434     return true;
2435 }
2436 
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2437 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2438                               sk_sp<const GrVkFramebuffer> framebuffer,
2439                               const VkClearValue* colorClear,
2440                               const GrSurface* target,
2441                               const SkIRect& renderPassBounds,
2442                               bool forSecondaryCB) {
2443     if (!this->currentCommandBuffer()) {
2444         return false;
2445     }
2446     SkASSERT (!framebuffer->isExternal());
2447 
2448 #ifdef SK_DEBUG
2449     uint32_t index;
2450     bool result = renderPass->colorAttachmentIndex(&index);
2451     SkASSERT(result && 0 == index);
2452     result = renderPass->stencilAttachmentIndex(&index);
2453     if (result) {
2454         SkASSERT(1 == index);
2455     }
2456 #endif
2457     VkClearValue clears[3];
2458     int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2459     clears[0].color = colorClear->color;
2460     clears[stencilIndex].depthStencil.depth = 0.0f;
2461     clears[stencilIndex].depthStencil.stencil = 0;
2462 
2463    return this->currentCommandBuffer()->beginRenderPass(
2464         this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2465 }
2466 
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2467 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2468                             const SkIRect& bounds) {
2469     // We had a command buffer when we started the render pass, we should have one now as well.
2470     SkASSERT(this->currentCommandBuffer());
2471     this->currentCommandBuffer()->endRenderPass(this);
2472     this->didWriteToSurface(target, origin, &bounds);
2473 }
2474 
checkVkResult(VkResult result)2475 bool GrVkGpu::checkVkResult(VkResult result) {
2476     switch (result) {
2477         case VK_SUCCESS:
2478             return true;
2479         case VK_ERROR_DEVICE_LOST:
2480             fDeviceIsLost = true;
2481             return false;
2482         case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2483         case VK_ERROR_OUT_OF_HOST_MEMORY:
2484             this->setOOMed();
2485             return false;
2486         default:
2487             return false;
2488     }
2489 }
2490 
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2491 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2492     if (!this->currentCommandBuffer()) {
2493         return;
2494     }
2495     this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2496 }
2497 
submit(GrOpsRenderPass * renderPass)2498 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2499     SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2500 
2501     fCachedOpsRenderPass->submit();
2502     fCachedOpsRenderPass->reset();
2503 }
2504 
insertFence()2505 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2506     VkFenceCreateInfo createInfo;
2507     memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2508     createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2509     createInfo.pNext = nullptr;
2510     createInfo.flags = 0;
2511     VkFence fence = VK_NULL_HANDLE;
2512     VkResult result;
2513 
2514     VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
2515     if (result != VK_SUCCESS) {
2516         return 0;
2517     }
2518     VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
2519     if (result != VK_SUCCESS) {
2520         VK_CALL(DestroyFence(this->device(), fence, nullptr));
2521         return 0;
2522     }
2523 
2524     static_assert(sizeof(GrFence) >= sizeof(VkFence));
2525     return (GrFence)fence;
2526 }
2527 
waitFence(GrFence fence)2528 bool GrVkGpu::waitFence(GrFence fence) {
2529     SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2530 
2531     VkResult result;
2532     VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0));
2533     return (VK_SUCCESS == result);
2534 }
2535 
deleteFence(GrFence fence) const2536 void GrVkGpu::deleteFence(GrFence fence) const {
2537     VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2538 }
2539 
makeSemaphore(bool isOwned)2540 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2541     return GrVkSemaphore::Make(this, isOwned);
2542 }
2543 
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)2544 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(
2545         const GrBackendSemaphore& semaphore,
2546         GrResourceProvider::SemaphoreWrapType wrapType,
2547         GrWrapOwnership ownership) {
2548     return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2549 }
2550 
insertSemaphore(GrSemaphore * semaphore)2551 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2552     SkASSERT(semaphore);
2553 
2554     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2555 
2556     GrVkSemaphore::Resource* resource = vkSem->getResource();
2557     if (resource->shouldSignal()) {
2558         resource->ref();
2559         fSemaphoresToSignal.push_back(resource);
2560     }
2561 }
2562 
waitSemaphore(GrSemaphore * semaphore)2563 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2564     SkASSERT(semaphore);
2565 
2566     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2567 
2568     GrVkSemaphore::Resource* resource = vkSem->getResource();
2569     if (resource->shouldWait()) {
2570         resource->ref();
2571         fSemaphoresToWaitOn.push_back(resource);
2572     }
2573 }
2574 
prepareTextureForCrossContextUsage(GrTexture * texture)2575 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2576     SkASSERT(texture);
2577     GrVkAttachment* vkTexture = static_cast<GrVkTexture*>(texture)->textureAttachment();
2578     vkTexture->setImageLayout(this,
2579                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2580                               VK_ACCESS_SHADER_READ_BIT,
2581                               VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2582                               false);
2583     // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2584     // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2585     // Eventually we will abandon the whole GPU if this fails.
2586     this->submitToGpu(false);
2587 
2588     // The image layout change serves as a barrier, so no semaphore is needed.
2589     // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2590     // thread safe so that only the first thread that tries to use the semaphore actually submits
2591     // it. This additionally would also require thread safety in command buffer submissions to
2592     // queues in general.
2593     return nullptr;
2594 }
2595 
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2596 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2597     fDrawables.emplace_back(std::move(drawable));
2598 }
2599 
storeVkPipelineCacheData()2600 void GrVkGpu::storeVkPipelineCacheData() {
2601     if (this->getContext()->priv().getPersistentCache()) {
2602         this->resourceProvider().storePipelineCacheData();
2603     }
2604 }
2605