• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkGpu.h"
9 
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/core/SkMipmap.h"
18 #include "src/core/SkTraceEvent.h"
19 #include "src/gpu/GrBackendUtils.h"
20 #include "src/gpu/GrDataUtils.h"
21 #include "src/gpu/GrDirectContextPriv.h"
22 #include "src/gpu/GrGeometryProcessor.h"
23 #include "src/gpu/GrGpuResourceCacheAccess.h"
24 #include "src/gpu/GrNativeRect.h"
25 #include "src/gpu/GrPipeline.h"
26 #include "src/gpu/GrRenderTarget.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrTexture.h"
29 #include "src/gpu/GrThreadSafePipelineBuilder.h"
30 #include "src/gpu/SkGr.h"
31 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
32 #include "src/gpu/vk/GrVkBuffer.h"
33 #include "src/gpu/vk/GrVkCommandBuffer.h"
34 #include "src/gpu/vk/GrVkCommandPool.h"
35 #include "src/gpu/vk/GrVkFramebuffer.h"
36 #include "src/gpu/vk/GrVkImage.h"
37 #include "src/gpu/vk/GrVkInterface.h"
38 #include "src/gpu/vk/GrVkMemory.h"
39 #include "src/gpu/vk/GrVkOpsRenderPass.h"
40 #include "src/gpu/vk/GrVkPipeline.h"
41 #include "src/gpu/vk/GrVkPipelineState.h"
42 #include "src/gpu/vk/GrVkRenderPass.h"
43 #include "src/gpu/vk/GrVkResourceProvider.h"
44 #include "src/gpu/vk/GrVkSemaphore.h"
45 #include "src/gpu/vk/GrVkTexture.h"
46 #include "src/gpu/vk/GrVkTextureRenderTarget.h"
47 #include "src/image/SkImage_Gpu.h"
48 #include "src/image/SkSurface_Gpu.h"
49 
50 #include "include/gpu/vk/GrVkExtensions.h"
51 #include "include/gpu/vk/GrVkTypes.h"
52 
53 #include <utility>
54 
55 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
56 #include "hitrace_meter.h"
57 #endif
58 
59 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
60 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
61 
Make(const GrVkBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)62 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
63                            const GrContextOptions& options, GrDirectContext* direct) {
64     if (backendContext.fInstance == VK_NULL_HANDLE ||
65         backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
66         backendContext.fDevice == VK_NULL_HANDLE ||
67         backendContext.fQueue == VK_NULL_HANDLE) {
68         return nullptr;
69     }
70     if (!backendContext.fGetProc) {
71         return nullptr;
72     }
73 
74     PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
75             reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
76                     backendContext.fGetProc("vkEnumerateInstanceVersion",
77                                             VK_NULL_HANDLE, VK_NULL_HANDLE));
78     uint32_t instanceVersion = 0;
79     if (!localEnumerateInstanceVersion) {
80         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
81     } else {
82         VkResult err = localEnumerateInstanceVersion(&instanceVersion);
83         if (err) {
84             SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
85             return nullptr;
86         }
87     }
88 
89     PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
90             reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
91                     backendContext.fGetProc("vkGetPhysicalDeviceProperties",
92                                             backendContext.fInstance,
93                                             VK_NULL_HANDLE));
94 
95     if (!localGetPhysicalDeviceProperties) {
96         return nullptr;
97     }
98     VkPhysicalDeviceProperties physDeviceProperties;
99     localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
100     uint32_t physDevVersion = physDeviceProperties.apiVersion;
101 
102     uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
103                                                         : instanceVersion;
104 
105     instanceVersion = std::min(instanceVersion, apiVersion);
106     physDevVersion = std::min(physDevVersion, apiVersion);
107 
108     sk_sp<const GrVkInterface> interface;
109 
110     if (backendContext.fVkExtensions) {
111         interface.reset(new GrVkInterface(backendContext.fGetProc,
112                                           backendContext.fInstance,
113                                           backendContext.fDevice,
114                                           instanceVersion,
115                                           physDevVersion,
116                                           backendContext.fVkExtensions));
117         if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
118             return nullptr;
119         }
120     } else {
121         GrVkExtensions extensions;
122         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
123         // need to know if this is enabled to know if we can transition to a present layout when
124         // flushing a surface.
125         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
126             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
127             extensions.init(backendContext.fGetProc, backendContext.fInstance,
128                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
129         }
130         interface.reset(new GrVkInterface(backendContext.fGetProc,
131                                           backendContext.fInstance,
132                                           backendContext.fDevice,
133                                           instanceVersion,
134                                           physDevVersion,
135                                           &extensions));
136         if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
137             return nullptr;
138         }
139     }
140 
141     sk_sp<GrVkCaps> caps;
142     if (backendContext.fDeviceFeatures2) {
143         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
144                                 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
145                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
146     } else if (backendContext.fDeviceFeatures) {
147         VkPhysicalDeviceFeatures2 features2;
148         features2.pNext = nullptr;
149         features2.features = *backendContext.fDeviceFeatures;
150         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
151                                 features2, instanceVersion, physDevVersion,
152                                 *backendContext.fVkExtensions, backendContext.fProtectedContext));
153     } else {
154         VkPhysicalDeviceFeatures2 features;
155         memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
156         features.pNext = nullptr;
157         if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
158             features.features.geometryShader = true;
159         }
160         if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
161             features.features.dualSrcBlend = true;
162         }
163         if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
164             features.features.sampleRateShading = true;
165         }
166         GrVkExtensions extensions;
167         // The only extension flag that may effect the vulkan backend is the swapchain extension. We
168         // need to know if this is enabled to know if we can transition to a present layout when
169         // flushing a surface.
170         if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
171             const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
172             extensions.init(backendContext.fGetProc, backendContext.fInstance,
173                             backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
174         }
175         caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
176                                 features, instanceVersion, physDevVersion, extensions,
177                                 backendContext.fProtectedContext));
178     }
179 
180     if (!caps) {
181         return nullptr;
182     }
183 
184     sk_sp<GrVkMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
185     if (!memoryAllocator) {
186         // We were not given a memory allocator at creation
187         memoryAllocator = GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
188                                                        backendContext.fPhysicalDevice,
189                                                        backendContext.fDevice, physDevVersion,
190                                                        backendContext.fVkExtensions, interface,
191                                                        caps.get());
192     }
193     if (!memoryAllocator) {
194         SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
195         return nullptr;
196     }
197 
198      sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface,
199                                       instanceVersion, physDevVersion,
200                                       std::move(memoryAllocator)));
201      if (backendContext.fProtectedContext == GrProtected::kYes &&
202          !vkGpu->vkCaps().supportsProtectedMemory()) {
203          return nullptr;
204      }
205      return std::move(vkGpu);
206 }
207 
208 ////////////////////////////////////////////////////////////////////////////////
209 
GrVkGpu(GrDirectContext * direct,const GrVkBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const GrVkInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<GrVkMemoryAllocator> memoryAllocator)210 GrVkGpu::GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendContext,
211                  sk_sp<GrVkCaps> caps, sk_sp<const GrVkInterface> interface,
212                  uint32_t instanceVersion, uint32_t physicalDeviceVersion,
213                  sk_sp<GrVkMemoryAllocator> memoryAllocator)
214         : INHERITED(direct)
215         , fInterface(std::move(interface))
216         , fMemoryAllocator(std::move(memoryAllocator))
217         , fVkCaps(std::move(caps))
218         , fPhysicalDevice(backendContext.fPhysicalDevice)
219         , fDevice(backendContext.fDevice)
220         , fQueue(backendContext.fQueue)
221         , fQueueIndex(backendContext.fGraphicsQueueIndex)
222         , fResourceProvider(this)
223         , fStagingBufferManager(this)
224         , fDisconnected(false)
225         , fProtectedContext(backendContext.fProtectedContext) {
226     SkASSERT(!backendContext.fOwnsInstanceAndDevice);
227     SkASSERT(fMemoryAllocator);
228 
229     this->initCapsAndCompiler(fVkCaps);
230 
231     VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
232     VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
233 
234     fResourceProvider.init();
235 
236     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
237     if (fMainCmdPool) {
238         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
239         SkASSERT(this->currentCommandBuffer());
240         this->currentCommandBuffer()->begin(this);
241     }
242 }
243 
destroyResources()244 void GrVkGpu::destroyResources() {
245     if (fMainCmdPool) {
246         fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
247         fMainCmdPool->close();
248     }
249 
250     // wait for all commands to finish
251     this->finishOutstandingGpuWork();
252 
253     if (fMainCmdPool) {
254         fMainCmdPool->unref();
255         fMainCmdPool = nullptr;
256     }
257 
258     for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
259         fSemaphoresToWaitOn[i]->unref();
260     }
261     fSemaphoresToWaitOn.reset();
262 
263     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
264         fSemaphoresToSignal[i]->unref();
265     }
266     fSemaphoresToSignal.reset();
267 
268     fStagingBufferManager.reset();
269 
270     fMSAALoadManager.destroyResources(this);
271 
272     // must call this just before we destroy the command pool and VkDevice
273     fResourceProvider.destroyResources();
274 }
275 
~GrVkGpu()276 GrVkGpu::~GrVkGpu() {
277     if (!fDisconnected) {
278         this->destroyResources();
279     }
280     // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
281     // clients can continue to delete backend textures even after a context has been abandoned.
282     fMemoryAllocator.reset();
283 }
284 
285 
disconnect(DisconnectType type)286 void GrVkGpu::disconnect(DisconnectType type) {
287     INHERITED::disconnect(type);
288     if (!fDisconnected) {
289         this->destroyResources();
290 
291         fSemaphoresToWaitOn.reset();
292         fSemaphoresToSignal.reset();
293         fMainCmdBuffer = nullptr;
294         fDisconnected = true;
295     }
296 }
297 
pipelineBuilder()298 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
299     return fResourceProvider.pipelineStateCache();
300 }
301 
refPipelineBuilder()302 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
303     return fResourceProvider.refPipelineStateCache();
304 }
305 
306 ///////////////////////////////////////////////////////////////////////////////
307 
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)308 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
309         GrRenderTarget* rt,
310         bool useMSAASurface,
311         GrAttachment* stencil,
312         GrSurfaceOrigin origin,
313         const SkIRect& bounds,
314         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
315         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
316         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
317         GrXferBarrierFlags renderPassXferBarriers) {
318     if (!fCachedOpsRenderPass) {
319         fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
320     }
321 
322     // For the given render target and requested render pass features we need to find a compatible
323     // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
324     // is compatible, but that is part of the framebuffer that we get here.
325     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
326 
327     SkASSERT(!useMSAASurface ||
328              rt->numSamples() > 1 ||
329              (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
330               vkRT->resolveAttachment() &&
331               vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
332 
333     // Covert the GrXferBarrierFlags into render pass self dependency flags
334     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
335     if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
336         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
337     }
338     if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
339         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
340     }
341 
342     // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
343     // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
344     // case we also need to update the color load/store ops since we don't want to ever load or
345     // store the msaa color attachment, but may need to for the resolve attachment.
346     GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
347     bool withResolve = false;
348     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
349     GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
350     if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
351         withResolve = true;
352         localColorInfo.fStoreOp = GrStoreOp::kDiscard;
353         if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
354             loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
355             localColorInfo.fLoadOp = GrLoadOp::kDiscard;
356         } else {
357             resolveInfo.fLoadOp = GrLoadOp::kDiscard;
358         }
359     }
360 
361     // Get the framebuffer to use for the render pass
362    sk_sp<GrVkFramebuffer> framebuffer;
363     if (vkRT->wrapsSecondaryCommandBuffer()) {
364         framebuffer = vkRT->externalFramebuffer();
365     } else {
366         auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
367                                        loadFromResolve);
368         framebuffer = sk_ref_sp(fb);
369     }
370     if (!framebuffer) {
371         return nullptr;
372     }
373 
374     if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
375                                    stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
376                                    sampledProxies)) {
377         return nullptr;
378     }
379     return fCachedOpsRenderPass.get();
380 }
381 
submitCommandBuffer(SyncQueue sync)382 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
383     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
384     if (!this->currentCommandBuffer()) {
385         return false;
386     }
387     SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
388 
389     if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
390         !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
391         // We may have added finished procs during the flush call. Since there is no actual work
392         // we are not submitting the command buffer and may never come back around to submit it.
393         // Thus we call all current finished procs manually, since the work has technically
394         // finished.
395         this->currentCommandBuffer()->callFinishedProcs();
396         SkASSERT(fDrawables.empty());
397         fResourceProvider.checkCommandBuffers();
398         return true;
399     }
400 
401     fMainCmdBuffer->end(this);
402     SkASSERT(fMainCmdPool);
403     fMainCmdPool->close();
404     bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
405                                                    fSemaphoresToWaitOn);
406 
407     if (didSubmit && sync == kForce_SyncQueue) {
408         fMainCmdBuffer->forceSync(this);
409     }
410 
411     // We must delete any drawables that had to wait until submit to destroy.
412     fDrawables.reset();
413 
414     // If we didn't submit the command buffer then we did not wait on any semaphores. We will
415     // continue to hold onto these semaphores and wait on them during the next command buffer
416     // submission.
417     if (didSubmit) {
418         for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
419             fSemaphoresToWaitOn[i]->unref();
420         }
421         fSemaphoresToWaitOn.reset();
422     }
423 
424     // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
425     // not try to recover the work that wasn't submitted and instead just drop it all. The client
426     // will be notified that the semaphores were not submit so that they will not try to wait on
427     // them.
428     for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
429         fSemaphoresToSignal[i]->unref();
430     }
431     fSemaphoresToSignal.reset();
432 
433     // Release old command pool and create a new one
434     fMainCmdPool->unref();
435     fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
436     if (fMainCmdPool) {
437         fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
438         SkASSERT(fMainCmdBuffer);
439         fMainCmdBuffer->begin(this);
440     } else {
441         fMainCmdBuffer = nullptr;
442     }
443     // We must wait to call checkCommandBuffers until after we get a new command buffer. The
444     // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
445     // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
446     // one that was just submitted.
447     fResourceProvider.checkCommandBuffers();
448     return didSubmit;
449 }
450 
451 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)452 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
453                                            GrAccessPattern accessPattern, const void* data) {
454 #ifdef SK_DEBUG
455     switch (type) {
456         case GrGpuBufferType::kVertex:
457         case GrGpuBufferType::kIndex:
458         case GrGpuBufferType::kDrawIndirect:
459             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
460                      accessPattern == kStatic_GrAccessPattern);
461             break;
462         case GrGpuBufferType::kXferCpuToGpu:
463             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
464             break;
465         case GrGpuBufferType::kXferGpuToCpu:
466             SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
467                      accessPattern == kStream_GrAccessPattern);
468             break;
469         case GrGpuBufferType::kUniform:
470             SkASSERT(accessPattern == kDynamic_GrAccessPattern);
471             break;
472     }
473 #endif
474     sk_sp<GrGpuBuffer> buff = GrVkBuffer::Make(this, size, type, accessPattern);
475 
476     if (data && buff) {
477         buff->updateData(data, size);
478     }
479     return buff;
480 }
481 
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)482 bool GrVkGpu::onWritePixels(GrSurface* surface,
483                             SkIRect rect,
484                             GrColorType surfaceColorType,
485                             GrColorType srcColorType,
486                             const GrMipLevel texels[],
487                             int mipLevelCount,
488                             bool prepForTexSampling) {
489     GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
490     if (!texture) {
491         return false;
492     }
493     GrVkImage* texImage = texture->textureImage();
494 
495     // Make sure we have at least the base level
496     if (!mipLevelCount || !texels[0].fPixels) {
497         return false;
498     }
499 
500     SkASSERT(!GrVkFormatIsCompressed(texImage->imageFormat()));
501     bool success = false;
502     bool linearTiling = texImage->isLinearTiled();
503     if (linearTiling) {
504         if (mipLevelCount > 1) {
505             SkDebugf("Can't upload mipmap data to linear tiled texture");
506             return false;
507         }
508         if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
509             // Need to change the layout to general in order to perform a host write
510             texImage->setImageLayout(this,
511                                      VK_IMAGE_LAYOUT_GENERAL,
512                                      VK_ACCESS_HOST_WRITE_BIT,
513                                      VK_PIPELINE_STAGE_HOST_BIT,
514                                      false);
515             if (!this->submitCommandBuffer(kForce_SyncQueue)) {
516                 return false;
517             }
518         }
519         success = this->uploadTexDataLinear(texImage,
520                                             rect,
521                                             srcColorType,
522                                             texels[0].fPixels,
523                                             texels[0].fRowBytes);
524     } else {
525         SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
526         success = this->uploadTexDataOptimal(texImage,
527                                              rect,
528                                              srcColorType,
529                                              texels,
530                                              mipLevelCount);
531         if (1 == mipLevelCount) {
532             texture->markMipmapsDirty();
533         }
534     }
535 
536     if (prepForTexSampling) {
537         texImage->setImageLayout(this,
538                                       VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
539                                       VK_ACCESS_SHADER_READ_BIT,
540                                       VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
541                                       false);
542     }
543 
544     return success;
545 }
546 
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)547 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
548                                  SkIRect rect,
549                                  GrColorType surfaceColorType,
550                                  GrColorType bufferColorType,
551                                  sk_sp<GrGpuBuffer> transferBuffer,
552                                  size_t bufferOffset,
553                                  size_t rowBytes) {
554     if (!this->currentCommandBuffer()) {
555         return false;
556     }
557 
558     size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
559     if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
560         return false;
561     }
562 
563     // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
564     if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
565         return false;
566     }
567     GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
568     if (!tex) {
569         return false;
570     }
571     GrVkImage* vkImage = tex->textureImage();
572     VkFormat format = vkImage->imageFormat();
573 
574     // Can't transfer compressed data
575     SkASSERT(!GrVkFormatIsCompressed(format));
576 
577     if (!transferBuffer) {
578         return false;
579     }
580 
581     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
582         return false;
583     }
584     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
585 
586     SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
587 
588     // Set up copy region
589     VkBufferImageCopy region;
590     memset(&region, 0, sizeof(VkBufferImageCopy));
591     region.bufferOffset = bufferOffset;
592     region.bufferRowLength = (uint32_t)(rowBytes/bpp);
593     region.bufferImageHeight = 0;
594     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
595     region.imageOffset = { rect.left(), rect.top(), 0 };
596     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
597 
598     // Change layout of our target so it can be copied to
599     vkImage->setImageLayout(this,
600                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
601                             VK_ACCESS_TRANSFER_WRITE_BIT,
602                             VK_PIPELINE_STAGE_TRANSFER_BIT,
603                             false);
604 
605     const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
606 
607     // Copy the buffer to the image.
608     this->currentCommandBuffer()->copyBufferToImage(this,
609                                                     vkBuffer->vkBuffer(),
610                                                     vkImage,
611                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
612                                                     1,
613                                                     &region);
614     this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
615 
616     tex->markMipmapsDirty();
617     return true;
618 }
619 
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)620 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
621                                    SkIRect rect,
622                                    GrColorType surfaceColorType,
623                                    GrColorType bufferColorType,
624                                    sk_sp<GrGpuBuffer> transferBuffer,
625                                    size_t offset) {
626     if (!this->currentCommandBuffer()) {
627         return false;
628     }
629     SkASSERT(surface);
630     SkASSERT(transferBuffer);
631     if (fProtectedContext == GrProtected::kYes) {
632         return false;
633     }
634 
635     GrVkImage* srcImage;
636     if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
637         // Reading from render targets that wrap a secondary command buffer is not allowed since
638         // it would require us to know the VkImage, which we don't have, as well as need us to
639         // stop and start the VkRenderPass which we don't have access to.
640         if (rt->wrapsSecondaryCommandBuffer()) {
641             return false;
642         }
643         if (!rt->nonMSAAAttachment()) {
644             return false;
645         }
646         srcImage = rt->nonMSAAAttachment();
647     } else {
648         SkASSERT(surface->asTexture());
649         srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
650     }
651 
652     VkFormat format = srcImage->imageFormat();
653     if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
654         return false;
655     }
656     SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
657 
658     // Set up copy region
659     VkBufferImageCopy region;
660     memset(&region, 0, sizeof(VkBufferImageCopy));
661     region.bufferOffset = offset;
662     region.bufferRowLength = rect.width();
663     region.bufferImageHeight = 0;
664     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
665     region.imageOffset = {rect.left(), rect.top(), 0};
666     region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
667 
668     srcImage->setImageLayout(this,
669                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
670                              VK_ACCESS_TRANSFER_READ_BIT,
671                              VK_PIPELINE_STAGE_TRANSFER_BIT,
672                              false);
673 
674     this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
675                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
676                                                     transferBuffer, 1, &region);
677 
678     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
679     // Make sure the copy to buffer has finished.
680     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
681                                VK_ACCESS_HOST_READ_BIT,
682                                VK_PIPELINE_STAGE_TRANSFER_BIT,
683                                VK_PIPELINE_STAGE_HOST_BIT,
684                                false);
685     return true;
686 }
687 
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)688 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
689                            const SkIPoint& dstPoint) {
690     if (!this->currentCommandBuffer()) {
691         return;
692     }
693 
694     SkASSERT(dst);
695     SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
696 
697     VkImageResolve resolveInfo;
698     resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
699     resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
700     resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
701     resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
702     resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
703 
704     GrVkImage* dstImage;
705     GrRenderTarget* dstRT = dst->asRenderTarget();
706     GrTexture* dstTex = dst->asTexture();
707     if (dstTex) {
708         dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
709     } else {
710         SkASSERT(dst->asRenderTarget());
711         dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
712     }
713     SkASSERT(dstImage);
714 
715     dstImage->setImageLayout(this,
716                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
717                              VK_ACCESS_TRANSFER_WRITE_BIT,
718                              VK_PIPELINE_STAGE_TRANSFER_BIT,
719                              false);
720 
721     src->colorAttachment()->setImageLayout(this,
722                                            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
723                                            VK_ACCESS_TRANSFER_READ_BIT,
724                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
725                                            false);
726     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
727     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
728     this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
729                                                &resolveInfo);
730 }
731 
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)732 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
733     SkASSERT(target->numSamples() > 1);
734     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
735     SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
736 
737     if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
738         // We would have resolved the RT during the render pass;
739         return;
740     }
741 
742     this->resolveImage(target, rt, resolveRect,
743                        SkIPoint::Make(resolveRect.x(), resolveRect.y()));
744 }
745 
uploadTexDataLinear(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const void * data,size_t rowBytes)746 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
747                                   SkIRect rect,
748                                   GrColorType dataColorType,
749                                   const void* data,
750                                   size_t rowBytes) {
751     SkASSERT(data);
752     SkASSERT(texImage->isLinearTiled());
753 
754     SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
755 
756     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
757     size_t trimRowBytes = rect.width() * bpp;
758 
759     SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
760              VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
761     const VkImageSubresource subres = {
762         VK_IMAGE_ASPECT_COLOR_BIT,
763         0,  // mipLevel
764         0,  // arraySlice
765     };
766     VkSubresourceLayout layout;
767 
768     const GrVkInterface* interface = this->vkInterface();
769 
770     GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
771                                                     texImage->image(),
772                                                     &subres,
773                                                     &layout));
774 
775     const GrVkAlloc& alloc = texImage->alloc();
776     if (VK_NULL_HANDLE == alloc.fMemory) {
777         return false;
778     }
779     VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
780     VkDeviceSize size = rect.height()*layout.rowPitch;
781     SkASSERT(size + offset <= alloc.fSize);
782     void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
783     if (!mapPtr) {
784         return false;
785     }
786     mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
787 
788     SkRectMemcpy(mapPtr,
789                  static_cast<size_t>(layout.rowPitch),
790                  data,
791                  rowBytes,
792                  trimRowBytes,
793                  rect.height());
794 
795     GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
796     GrVkMemory::UnmapAlloc(this, alloc);
797 
798     return true;
799 }
800 
801 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
802 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,SkTArray<VkBufferImageCopy> * regions,SkTArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipmapped)803 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
804                                          SkTArray<VkBufferImageCopy>* regions,
805                                          SkTArray<size_t>* individualMipOffsets,
806                                          GrStagingBufferManager::Slice* slice,
807                                          SkImage::CompressionType compression,
808                                          VkFormat vkFormat,
809                                          SkISize dimensions,
810                                          GrMipmapped mipmapped) {
811     SkASSERT(compression != SkImage::CompressionType::kNone);
812     int numMipLevels = 1;
813     if (mipmapped == GrMipmapped::kYes) {
814         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
815     }
816 
817     regions->reserve_back(numMipLevels);
818     individualMipOffsets->reserve_back(numMipLevels);
819 
820     size_t bytesPerBlock = GrVkFormatBytesPerBlock(vkFormat);
821 
822     size_t bufferSize = SkCompressedDataSize(compression,
823                                              dimensions,
824                                              individualMipOffsets,
825                                              mipmapped == GrMipmapped::kYes);
826     SkASSERT(individualMipOffsets->count() == numMipLevels);
827 
828     // Get a staging buffer slice to hold our mip data.
829     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
830     size_t alignment = bytesPerBlock;
831     switch (alignment & 0b11) {
832         case 0:                     break;   // alignment is already a multiple of 4.
833         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
834         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
835     }
836     *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
837     if (!slice->fBuffer) {
838         return 0;
839     }
840 
841     for (int i = 0; i < numMipLevels; ++i) {
842         VkBufferImageCopy& region = regions->push_back();
843         memset(&region, 0, sizeof(VkBufferImageCopy));
844         region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
845         SkISize revisedDimensions = GrCompressedDimensions(compression, dimensions);
846         region.bufferRowLength = revisedDimensions.width();
847         region.bufferImageHeight = revisedDimensions.height();
848         region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
849         region.imageOffset = {0, 0, 0};
850         region.imageExtent = {SkToU32(dimensions.width()),
851                               SkToU32(dimensions.height()), 1};
852 
853         dimensions = {std::max(1, dimensions.width() /2),
854                       std::max(1, dimensions.height()/2)};
855     }
856 
857     return bufferSize;
858 }
859 
uploadTexDataOptimal(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)860 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
861                                    SkIRect rect,
862                                    GrColorType dataColorType,
863                                    const GrMipLevel texels[],
864                                    int mipLevelCount) {
865     if (!this->currentCommandBuffer()) {
866         return false;
867     }
868 
869     SkASSERT(!texImage->isLinearTiled());
870     // The assumption is either that we have no mipmaps, or that our rect is the entire texture
871     SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
872 
873     // We assume that if the texture has mip levels, we either upload to all the levels or just the
874     // first.
875     SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
876 
877     SkASSERT(!rect.isEmpty());
878 
879     SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
880 
881     SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
882     size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
883 
884     // texels is const.
885     // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
886     // Because of this we need to make a non-const shallow copy of texels.
887     SkAutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
888     std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
889 
890     SkTArray<size_t> individualMipOffsets;
891     size_t combinedBufferSize;
892     if (mipLevelCount > 1) {
893         combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
894                                                               rect.size(),
895                                                               &individualMipOffsets,
896                                                               mipLevelCount);
897     } else {
898         SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
899         combinedBufferSize = rect.width()*rect.height()*bpp;
900         individualMipOffsets.push_back(0);
901     }
902     SkASSERT(combinedBufferSize);
903 
904     // Get a staging buffer slice to hold our mip data.
905     // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
906     size_t alignment = bpp;
907     switch (alignment & 0b11) {
908         case 0:                     break;   // alignment is already a multiple of 4.
909         case 2:     alignment *= 2; break;   // alignment is a multiple of 2 but not 4.
910         default:    alignment *= 4; break;   // alignment is not a multiple of 2.
911     }
912     GrStagingBufferManager::Slice slice =
913             fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
914     if (!slice.fBuffer) {
915         return false;
916     }
917 
918     int uploadLeft = rect.left();
919     int uploadTop = rect.top();
920 
921     char* buffer = (char*) slice.fOffsetMapPtr;
922     SkTArray<VkBufferImageCopy> regions(mipLevelCount);
923 
924     int currentWidth = rect.width();
925     int currentHeight = rect.height();
926     for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
927         if (texelsShallowCopy[currentMipLevel].fPixels) {
928             const size_t trimRowBytes = currentWidth * bpp;
929             const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
930 
931             // copy data into the buffer, skipping the trailing bytes
932             char* dst = buffer + individualMipOffsets[currentMipLevel];
933             const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
934             SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
935 
936             VkBufferImageCopy& region = regions.push_back();
937             memset(&region, 0, sizeof(VkBufferImageCopy));
938             region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
939             region.bufferRowLength = currentWidth;
940             region.bufferImageHeight = currentHeight;
941             region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
942             region.imageOffset = {uploadLeft, uploadTop, 0};
943             region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
944         }
945 
946         currentWidth  = std::max(1,  currentWidth/2);
947         currentHeight = std::max(1, currentHeight/2);
948     }
949 
950     // Change layout of our target so it can be copied to
951     texImage->setImageLayout(this,
952                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
953                              VK_ACCESS_TRANSFER_WRITE_BIT,
954                              VK_PIPELINE_STAGE_TRANSFER_BIT,
955                              false);
956 
957     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
958     // because we don't need the command buffer to ref the buffer here. The reason being is that
959     // the buffer is coming from the staging manager and the staging manager will make sure the
960     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
961     // upload in the frame.
962     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
963     this->currentCommandBuffer()->copyBufferToImage(this,
964                                                     vkBuffer->vkBuffer(),
965                                                     texImage,
966                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
967                                                     regions.count(),
968                                                     regions.begin());
969     return true;
970 }
971 
972 // It's probably possible to roll this into uploadTexDataOptimal,
973 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage * uploadTexture,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipMapped,const void * data,size_t dataSize)974 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
975                                       SkImage::CompressionType compression, VkFormat vkFormat,
976                                       SkISize dimensions, GrMipmapped mipMapped,
977                                       const void* data, size_t dataSize) {
978     if (!this->currentCommandBuffer()) {
979         return false;
980     }
981     SkASSERT(data);
982     SkASSERT(!uploadTexture->isLinearTiled());
983     // For now the assumption is that our rect is the entire texture.
984     // Compressed textures are read-only so this should be a reasonable assumption.
985     SkASSERT(dimensions.fWidth == uploadTexture->width() &&
986              dimensions.fHeight == uploadTexture->height());
987 
988     if (dimensions.fWidth == 0 || dimensions.fHeight  == 0) {
989         return false;
990     }
991 
992     SkASSERT(uploadTexture->imageFormat() == vkFormat);
993     SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
994 
995 
996     GrStagingBufferManager::Slice slice;
997     SkTArray<VkBufferImageCopy> regions;
998     SkTArray<size_t> individualMipOffsets;
999     SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1000                                                                         &regions,
1001                                                                         &individualMipOffsets,
1002                                                                         &slice,
1003                                                                         compression,
1004                                                                         vkFormat,
1005                                                                         dimensions,
1006                                                                         mipMapped);
1007     if (!slice.fBuffer) {
1008         return false;
1009     }
1010     SkASSERT(dataSize == combinedBufferSize);
1011 
1012     {
1013         char* buffer = (char*)slice.fOffsetMapPtr;
1014         memcpy(buffer, data, dataSize);
1015     }
1016 
1017     // Change layout of our target so it can be copied to
1018     uploadTexture->setImageLayout(this,
1019                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1020                                   VK_ACCESS_TRANSFER_WRITE_BIT,
1021                                   VK_PIPELINE_STAGE_TRANSFER_BIT,
1022                                   false);
1023 
1024     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1025     // because we don't need the command buffer to ref the buffer here. The reason being is that
1026     // the buffer is coming from the staging manager and the staging manager will make sure the
1027     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1028     // upload in the frame.
1029     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1030     this->currentCommandBuffer()->copyBufferToImage(this,
1031                                                     vkBuffer->vkBuffer(),
1032                                                     uploadTexture,
1033                                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1034                                                     regions.count(),
1035                                                     regions.begin());
1036 
1037     return true;
1038 }
1039 
1040 ////////////////////////////////////////////////////////////////////////////////
1041 // TODO: make this take a GrMipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)1042 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1043                                           const GrBackendFormat& format,
1044                                           GrRenderable renderable,
1045                                           int renderTargetSampleCnt,
1046                                           SkBudgeted budgeted,
1047                                           GrProtected isProtected,
1048                                           int mipLevelCount,
1049                                           uint32_t levelClearMask) {
1050     VkFormat pixelFormat;
1051     SkAssertResult(format.asVkFormat(&pixelFormat));
1052     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1053     SkASSERT(mipLevelCount > 0);
1054 
1055 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
1056     HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "GrVkGpu::onCreateTexture width = %d, height = %d",
1057         dimensions.width(), dimensions.height());
1058 #endif
1059     GrMipmapStatus mipmapStatus =
1060             mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1061 
1062     sk_sp<GrVkTexture> tex;
1063     if (renderable == GrRenderable::kYes) {
1064         tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1065                 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1066                 mipmapStatus, isProtected);
1067     } else {
1068         tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1069                                           mipLevelCount, isProtected, mipmapStatus);
1070     }
1071 
1072     if (!tex) {
1073         return nullptr;
1074     }
1075 
1076     if (levelClearMask) {
1077         if (!this->currentCommandBuffer()) {
1078             return nullptr;
1079         }
1080         SkSTArray<1, VkImageSubresourceRange> ranges;
1081         bool inRange = false;
1082         GrVkImage* texImage = tex->textureImage();
1083         for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1084             if (levelClearMask & (1U << i)) {
1085                 if (inRange) {
1086                     ranges.back().levelCount++;
1087                 } else {
1088                     auto& range = ranges.push_back();
1089                     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1090                     range.baseArrayLayer = 0;
1091                     range.baseMipLevel = i;
1092                     range.layerCount = 1;
1093                     range.levelCount = 1;
1094                     inRange = true;
1095                 }
1096             } else if (inRange) {
1097                 inRange = false;
1098             }
1099         }
1100         SkASSERT(!ranges.empty());
1101         static constexpr VkClearColorValue kZeroClearColor = {};
1102         texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1103                             VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1104         this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1105                                                       ranges.count(), ranges.begin());
1106     }
1107     return std::move(tex);
1108 }
1109 
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)1110 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1111                                                     const GrBackendFormat& format,
1112                                                     SkBudgeted budgeted,
1113                                                     GrMipmapped mipMapped,
1114                                                     GrProtected isProtected,
1115                                                     const void* data, size_t dataSize) {
1116     VkFormat pixelFormat;
1117     SkAssertResult(format.asVkFormat(&pixelFormat));
1118     SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1119 
1120     int numMipLevels = 1;
1121     if (mipMapped == GrMipmapped::kYes) {
1122         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1123     }
1124 
1125     GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
1126                                                                    : GrMipmapStatus::kNotAllocated;
1127 
1128     auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1129                                            numMipLevels, isProtected, mipmapStatus);
1130     if (!tex) {
1131         return nullptr;
1132     }
1133 
1134     SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1135     if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1136                                        dimensions, mipMapped, data, dataSize)) {
1137         return nullptr;
1138     }
1139 
1140     return std::move(tex);
1141 }
1142 
1143 ////////////////////////////////////////////////////////////////////////////////
1144 
copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,sk_sp<GrGpuBuffer> dstBuffer,VkDeviceSize srcOffset,VkDeviceSize dstOffset,VkDeviceSize size)1145 void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
1146                          sk_sp<GrGpuBuffer> dstBuffer,
1147                          VkDeviceSize srcOffset,
1148                          VkDeviceSize dstOffset,
1149                          VkDeviceSize size) {
1150     if (!this->currentCommandBuffer()) {
1151         return;
1152     }
1153     VkBufferCopy copyRegion;
1154     copyRegion.srcOffset = srcOffset;
1155     copyRegion.dstOffset = dstOffset;
1156     copyRegion.size = size;
1157     this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), std::move(dstBuffer), 1,
1158                                              &copyRegion);
1159 }
1160 
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1161 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1162                            VkDeviceSize offset, VkDeviceSize size) {
1163     if (!this->currentCommandBuffer()) {
1164         return false;
1165     }
1166     // Update the buffer
1167     this->currentCommandBuffer()->updateBuffer(this, std::move(buffer), offset, size, src);
1168 
1169     return true;
1170 }
1171 
1172 ////////////////////////////////////////////////////////////////////////////////
1173 
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1174 static bool check_image_info(const GrVkCaps& caps,
1175                              const GrVkImageInfo& info,
1176                              bool needsAllocation,
1177                              uint32_t graphicsQueueIndex) {
1178     if (VK_NULL_HANDLE == info.fImage) {
1179         return false;
1180     }
1181 
1182     if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1183         return false;
1184     }
1185 
1186     if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1187         return false;
1188     }
1189 
1190     if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1191         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1192         info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1193         if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1194             if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1195                 return false;
1196             }
1197         } else {
1198             return false;
1199         }
1200     }
1201 
1202     if (info.fYcbcrConversionInfo.isValid()) {
1203         if (!caps.supportsYcbcrConversion()) {
1204             return false;
1205         }
1206         if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1207             return true;
1208         }
1209     }
1210 
1211     // We currently require everything to be made with transfer bits set
1212     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1213         !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1214         return false;
1215     }
1216 
1217     return true;
1218 }
1219 
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1220 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1221     // We don't support directly importing multisampled textures for sampling from shaders.
1222     if (info.fSampleCount != 1) {
1223         return false;
1224     }
1225 
1226     if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1227         return true;
1228     }
1229     if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1230         if (!caps.isVkFormatTexturable(info.fFormat)) {
1231             return false;
1232         }
1233     } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1234         if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1235             return false;
1236         }
1237     } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1238         if (!caps.supportsDRMFormatModifiers()) {
1239             return false;
1240         }
1241         // To be technically correct we should query the vulkan support for VkFormat and
1242         // drmFormatModifier pairs to confirm the required feature support is there. However, we
1243         // currently don't have our caps and format tables set up to do this effeciently. So
1244         // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1245         // up using valid features (checked below). In practice this should all be safe because
1246         // currently we are setting all drm format modifier textures to have a
1247         // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1248         // a shader. The video decoder isn't going to give us VkImages that don't support being
1249         // sampled.
1250     } else {
1251         SkUNREACHABLE;
1252     }
1253 
1254     // We currently require all textures to be made with sample support
1255     if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1256         return false;
1257     }
1258 
1259     return true;
1260 }
1261 
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1262 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1263     if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1264         return false;
1265     }
1266     if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1267         return false;
1268     }
1269     return true;
1270 }
1271 
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1272 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1273                                                GrWrapOwnership ownership,
1274                                                GrWrapCacheable cacheable,
1275                                                GrIOType ioType) {
1276     GrVkImageInfo imageInfo;
1277     if (!backendTex.getVkImageInfo(&imageInfo)) {
1278         return nullptr;
1279     }
1280 
1281     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1282                           this->queueIndex())) {
1283         return nullptr;
1284     }
1285 
1286     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1287         return nullptr;
1288     }
1289 
1290     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1291         return nullptr;
1292     }
1293 
1294     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1295     SkASSERT(mutableState);
1296     return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1297                                            ioType, imageInfo, std::move(mutableState));
1298 }
1299 
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1300 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1301                                                          GrWrapOwnership ownership,
1302                                                          GrWrapCacheable cacheable) {
1303     return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1304 }
1305 
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1306 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1307                                                          int sampleCnt,
1308                                                          GrWrapOwnership ownership,
1309                                                          GrWrapCacheable cacheable) {
1310     GrVkImageInfo imageInfo;
1311     if (!backendTex.getVkImageInfo(&imageInfo)) {
1312         return nullptr;
1313     }
1314 
1315     if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1316                           this->queueIndex())) {
1317         return nullptr;
1318     }
1319 
1320     if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1321         return nullptr;
1322     }
1323     // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1324     // the wrapped VkImage.
1325     bool resolveOnly = sampleCnt > 1;
1326     if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1327         return nullptr;
1328     }
1329 
1330     if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1331         return nullptr;
1332     }
1333 
1334     sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1335 
1336     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1337     SkASSERT(mutableState);
1338 
1339     return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1340                                                                    sampleCnt, ownership, cacheable,
1341                                                                    imageInfo,
1342                                                                    std::move(mutableState));
1343 }
1344 
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1345 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1346     GrVkImageInfo info;
1347     if (!backendRT.getVkImageInfo(&info)) {
1348         return nullptr;
1349     }
1350 
1351     if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1352         return nullptr;
1353     }
1354 
1355     // We will always render directly to this VkImage.
1356     static bool kResolveOnly = false;
1357     if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1358         return nullptr;
1359     }
1360 
1361     if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1362         return nullptr;
1363     }
1364 
1365     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendRT.getMutableState();
1366     SkASSERT(mutableState);
1367 
1368     sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1369             this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1370 
1371     // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1372     SkASSERT(!backendRT.stencilBits());
1373     if (tgt) {
1374         SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1375     }
1376 
1377     return std::move(tgt);
1378 }
1379 
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1380 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1381         const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1382     int maxSize = this->caps()->maxTextureSize();
1383     if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1384         return nullptr;
1385     }
1386 
1387     GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1388     if (!backendFormat.isValid()) {
1389         return nullptr;
1390     }
1391     int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1392     if (!sampleCnt) {
1393         return nullptr;
1394     }
1395 
1396     return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1397 }
1398 
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkImage * src,const SkIRect & srcRect)1399 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1400                                   const GrVkRenderPass& renderPass,
1401                                   GrAttachment* dst,
1402                                   GrVkImage* src,
1403                                   const SkIRect& srcRect) {
1404     return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1405 }
1406 
onRegenerateMipMapLevels(GrTexture * tex)1407 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1408     if (!this->currentCommandBuffer()) {
1409         return false;
1410     }
1411     auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1412     // don't do anything for linearly tiled textures (can't have mipmaps)
1413     if (vkTex->isLinearTiled()) {
1414         SkDebugf("Trying to create mipmap for linear tiled texture");
1415         return false;
1416     }
1417     SkASSERT(tex->textureType() == GrTextureType::k2D);
1418 
1419     // determine if we can blit to and from this format
1420     const GrVkCaps& caps = this->vkCaps();
1421     if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1422         !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1423         !caps.mipmapSupport()) {
1424         return false;
1425     }
1426 
1427     int width = tex->width();
1428     int height = tex->height();
1429     VkImageBlit blitRegion;
1430     memset(&blitRegion, 0, sizeof(VkImageBlit));
1431 
1432     // SkMipmap doesn't include the base level in the level count so we have to add 1
1433     uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1434     SkASSERT(levelCount == vkTex->mipLevels());
1435 
1436     // change layout of the layers so we can write to them.
1437     vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1438                           VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1439 
1440     // setup memory barrier
1441     SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1442     VkImageMemoryBarrier imageMemoryBarrier = {
1443             VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,  // sType
1444             nullptr,                                 // pNext
1445             VK_ACCESS_TRANSFER_WRITE_BIT,            // srcAccessMask
1446             VK_ACCESS_TRANSFER_READ_BIT,             // dstAccessMask
1447             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,    // oldLayout
1448             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,    // newLayout
1449             VK_QUEUE_FAMILY_IGNORED,                 // srcQueueFamilyIndex
1450             VK_QUEUE_FAMILY_IGNORED,                 // dstQueueFamilyIndex
1451             vkTex->image(),                          // image
1452             {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}  // subresourceRange
1453     };
1454 
1455     // Blit the miplevels
1456     uint32_t mipLevel = 1;
1457     while (mipLevel < levelCount) {
1458         int prevWidth = width;
1459         int prevHeight = height;
1460         width = std::max(1, width / 2);
1461         height = std::max(1, height / 2);
1462 
1463         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1464         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1465                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1466 
1467         blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1468         blitRegion.srcOffsets[0] = { 0, 0, 0 };
1469         blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1470         blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1471         blitRegion.dstOffsets[0] = { 0, 0, 0 };
1472         blitRegion.dstOffsets[1] = { width, height, 1 };
1473         this->currentCommandBuffer()->blitImage(this,
1474                                                 vkTex->resource(),
1475                                                 vkTex->image(),
1476                                                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1477                                                 vkTex->resource(),
1478                                                 vkTex->image(),
1479                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1480                                                 1,
1481                                                 &blitRegion,
1482                                                 VK_FILTER_LINEAR);
1483         ++mipLevel;
1484     }
1485     if (levelCount > 1) {
1486         // This barrier logically is not needed, but it changes the final level to the same layout
1487         // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1488         // layouts and future layout changes easier. The alternative here would be to track layout
1489         // and memory accesses per layer which doesn't seem work it.
1490         imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1491         this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1492                                     VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1493         vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1494     }
1495     return true;
1496 }
1497 
1498 ////////////////////////////////////////////////////////////////////////////////
1499 
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1500 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1501                                                    SkISize dimensions, int numStencilSamples) {
1502     VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1503 
1504     fStats.incStencilAttachmentCreates();
1505     return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1506 }
1507 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless memoryless)1508 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1509                                                 const GrBackendFormat& format,
1510                                                 int numSamples,
1511                                                 GrProtected isProtected,
1512                                                 GrMemoryless memoryless) {
1513     VkFormat pixelFormat;
1514     SkAssertResult(format.asVkFormat(&pixelFormat));
1515     SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1516     SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1517 
1518     fStats.incMSAAAttachmentCreates();
1519     return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1520 }
1521 
1522 ////////////////////////////////////////////////////////////////////////////////
1523 
copy_src_data(char * mapPtr,VkFormat vkFormat,const SkTArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1524 bool copy_src_data(char* mapPtr,
1525                    VkFormat vkFormat,
1526                    const SkTArray<size_t>& individualMipOffsets,
1527                    const GrPixmap srcData[],
1528                    int numMipLevels) {
1529     SkASSERT(srcData && numMipLevels);
1530     SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1531     SkASSERT(individualMipOffsets.count() == numMipLevels);
1532     SkASSERT(mapPtr);
1533 
1534     size_t bytesPerPixel = GrVkFormatBytesPerBlock(vkFormat);
1535 
1536     for (int level = 0; level < numMipLevels; ++level) {
1537         const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1538 
1539         SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1540                      srcData[level].addr(), srcData[level].rowBytes(),
1541                      trimRB, srcData[level].height());
1542     }
1543     return true;
1544 }
1545 
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,GrMipmapped mipMapped,GrVkImageInfo * info,GrProtected isProtected)1546 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1547                                              SkISize dimensions,
1548                                              int sampleCnt,
1549                                              GrTexturable texturable,
1550                                              GrRenderable renderable,
1551                                              GrMipmapped mipMapped,
1552                                              GrVkImageInfo* info,
1553                                              GrProtected isProtected) {
1554     SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1555 
1556     if (fProtectedContext != isProtected) {
1557         return false;
1558     }
1559 
1560     if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1561         return false;
1562     }
1563 
1564     // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1565     if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1566         return false;
1567     }
1568 
1569     if (renderable == GrRenderable::kYes) {
1570         sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1571         if (!sampleCnt) {
1572             return false;
1573         }
1574     }
1575 
1576 
1577     int numMipLevels = 1;
1578     if (mipMapped == GrMipmapped::kYes) {
1579         numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1580     }
1581 
1582     VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1583                                    VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1584     if (texturable == GrTexturable::kYes) {
1585         usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1586     }
1587     if (renderable == GrRenderable::kYes) {
1588         usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1589         // We always make our render targets support being used as input attachments
1590         usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1591     }
1592 
1593     GrVkImage::ImageDesc imageDesc;
1594     imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1595     imageDesc.fFormat = vkFormat;
1596     imageDesc.fWidth = dimensions.width();
1597     imageDesc.fHeight = dimensions.height();
1598     imageDesc.fLevels = numMipLevels;
1599     imageDesc.fSamples = sampleCnt;
1600     imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1601     imageDesc.fUsageFlags = usageFlags;
1602     imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1603     imageDesc.fIsProtected = fProtectedContext;
1604 
1605     if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1606         SkDebugf("Failed to init image info\n");
1607         return false;
1608     }
1609 
1610     return true;
1611 }
1612 
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)1613 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1614                                     sk_sp<GrRefCntedCallback> finishedCallback,
1615                                     std::array<float, 4> color) {
1616     GrVkImageInfo info;
1617     SkAssertResult(backendTexture.getVkImageInfo(&info));
1618 
1619     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1620     SkASSERT(mutableState);
1621     sk_sp<GrVkTexture> texture =
1622                 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1623                                                 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1624                                                 kRW_GrIOType, info, std::move(mutableState));
1625     if (!texture) {
1626         return false;
1627     }
1628     GrVkImage* texImage = texture->textureImage();
1629 
1630     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1631     if (!cmdBuffer) {
1632         return false;
1633     }
1634 
1635     texImage->setImageLayout(this,
1636                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1637                              VK_ACCESS_TRANSFER_WRITE_BIT,
1638                              VK_PIPELINE_STAGE_TRANSFER_BIT,
1639                              false);
1640 
1641     // CmdClearColorImage doesn't work for compressed formats
1642     SkASSERT(!GrVkFormatIsCompressed(info.fFormat));
1643 
1644     VkClearColorValue vkColor;
1645     // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1646     // uint32 union members in those cases.
1647     vkColor.float32[0] = color[0];
1648     vkColor.float32[1] = color[1];
1649     vkColor.float32[2] = color[2];
1650     vkColor.float32[3] = color[3];
1651     VkImageSubresourceRange range;
1652     range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1653     range.baseArrayLayer = 0;
1654     range.baseMipLevel = 0;
1655     range.layerCount = 1;
1656     range.levelCount = info.fLevelCount;
1657     cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1658 
1659     // Change image layout to shader read since if we use this texture as a borrowed
1660     // texture within Ganesh we require that its layout be set to that
1661     texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1662                                   VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1663                                   false);
1664 
1665     if (finishedCallback) {
1666         this->addFinishedCallback(std::move(finishedCallback));
1667     }
1668     return true;
1669 }
1670 
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)1671 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1672                                                  const GrBackendFormat& format,
1673                                                  GrRenderable renderable,
1674                                                  GrMipmapped mipMapped,
1675                                                  GrProtected isProtected) {
1676     const GrVkCaps& caps = this->vkCaps();
1677 
1678     if (fProtectedContext != isProtected) {
1679         return {};
1680     }
1681 
1682     VkFormat vkFormat;
1683     if (!format.asVkFormat(&vkFormat)) {
1684         return {};
1685     }
1686 
1687     // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1688     if (!caps.isVkFormatTexturable(vkFormat)) {
1689         return {};
1690     }
1691 
1692     if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
1693         return {};
1694     }
1695 
1696     GrVkImageInfo info;
1697     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1698                                               renderable, mipMapped, &info, isProtected)) {
1699         return {};
1700     }
1701 
1702     return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1703 }
1704 
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)1705 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
1706         SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1707         GrProtected isProtected) {
1708     return this->onCreateBackendTexture(dimensions, format, GrRenderable::kNo, mipMapped,
1709                                         isProtected);
1710 }
1711 
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)1712 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1713                                                sk_sp<GrRefCntedCallback> finishedCallback,
1714                                                const void* data,
1715                                                size_t size) {
1716     GrVkImageInfo info;
1717     SkAssertResult(backendTexture.getVkImageInfo(&info));
1718 
1719     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1720     SkASSERT(mutableState);
1721     sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1722                                                                  backendTexture.dimensions(),
1723                                                                  kBorrow_GrWrapOwnership,
1724                                                                  GrWrapCacheable::kNo,
1725                                                                  kRW_GrIOType,
1726                                                                  info,
1727                                                                  std::move(mutableState));
1728     if (!texture) {
1729         return false;
1730     }
1731 
1732     GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1733     if (!cmdBuffer) {
1734         return false;
1735     }
1736     GrVkImage* image = texture->textureImage();
1737     image->setImageLayout(this,
1738                           VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1739                           VK_ACCESS_TRANSFER_WRITE_BIT,
1740                           VK_PIPELINE_STAGE_TRANSFER_BIT,
1741                           false);
1742 
1743     SkImage::CompressionType compression =
1744             GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1745 
1746     SkTArray<VkBufferImageCopy> regions;
1747     SkTArray<size_t> individualMipOffsets;
1748     GrStagingBufferManager::Slice slice;
1749 
1750     fill_in_compressed_regions(&fStagingBufferManager,
1751                                &regions,
1752                                &individualMipOffsets,
1753                                &slice,
1754                                compression,
1755                                info.fFormat,
1756                                backendTexture.dimensions(),
1757                                backendTexture.fMipmapped);
1758 
1759     if (!slice.fBuffer) {
1760         return false;
1761     }
1762 
1763     memcpy(slice.fOffsetMapPtr, data, size);
1764 
1765     cmdBuffer->addGrSurface(texture);
1766     // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1767     // because we don't need the command buffer to ref the buffer here. The reason being is that
1768     // the buffer is coming from the staging manager and the staging manager will make sure the
1769     // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1770     // every upload in the frame.
1771     cmdBuffer->copyBufferToImage(this,
1772                                  static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1773                                  image,
1774                                  image->currentLayout(),
1775                                  regions.count(),
1776                                  regions.begin());
1777 
1778     // Change image layout to shader read since if we use this texture as a borrowed
1779     // texture within Ganesh we require that its layout be set to that
1780     image->setImageLayout(this,
1781                           VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1782                           VK_ACCESS_SHADER_READ_BIT,
1783                           VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1784                           false);
1785 
1786     if (finishedCallback) {
1787         this->addFinishedCallback(std::move(finishedCallback));
1788     }
1789     return true;
1790 }
1791 
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,const GrVkSharedImageInfo & newInfo)1792 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1793                                              const GrVkSharedImageInfo& newInfo) {
1794     // Even though internally we use this helper for getting src access flags and stages they
1795     // can also be used for general dst flags since we don't know exactly what the client
1796     // plans on using the image for.
1797     VkImageLayout newLayout = newInfo.getImageLayout();
1798     if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1799         newLayout = image->currentLayout();
1800     }
1801     VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1802     VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1803 
1804     uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1805     uint32_t newQueueFamilyIndex = newInfo.getQueueFamilyIndex();
1806     auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1807         return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1808                queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1809     };
1810     if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1811         // It is illegal to have both the new and old queue be special queue families (i.e. external
1812         // or foreign).
1813         return;
1814     }
1815 
1816     image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1817                                        newQueueFamilyIndex);
1818 }
1819 
setBackendSurfaceState(GrVkImageInfo info,sk_sp<GrBackendSurfaceMutableStateImpl> currentState,SkISize dimensions,const GrVkSharedImageInfo & newInfo,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1820 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1821                                      sk_sp<GrBackendSurfaceMutableStateImpl> currentState,
1822                                      SkISize dimensions,
1823                                      const GrVkSharedImageInfo& newInfo,
1824                                      GrBackendSurfaceMutableState* previousState,
1825                                      sk_sp<GrRefCntedCallback> finishedCallback) {
1826     sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
1827                                                       dimensions,
1828                                                       info,
1829                                                       std::move(currentState),
1830                                                       GrVkImage::UsageFlags::kColorAttachment,
1831                                                       kBorrow_GrWrapOwnership,
1832                                                       GrWrapCacheable::kNo,
1833                                                       /*forSecondaryCB=*/false);
1834     SkASSERT(texture);
1835     if (!texture) {
1836         return false;
1837     }
1838     if (previousState) {
1839         previousState->setVulkanState(texture->currentLayout(),
1840                                       texture->currentQueueFamilyIndex());
1841     }
1842     set_layout_and_queue_from_mutable_state(this, texture.get(), newInfo);
1843     if (finishedCallback) {
1844         this->addFinishedCallback(std::move(finishedCallback));
1845     }
1846     return true;
1847 }
1848 
setBackendTextureState(const GrBackendTexture & backendTeture,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1849 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1850                                      const GrBackendSurfaceMutableState& newState,
1851                                      GrBackendSurfaceMutableState* previousState,
1852                                      sk_sp<GrRefCntedCallback> finishedCallback) {
1853     GrVkImageInfo info;
1854     SkAssertResult(backendTeture.getVkImageInfo(&info));
1855     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendTeture.getMutableState();
1856     SkASSERT(currentState);
1857     SkASSERT(newState.isValid() && newState.fBackend == GrBackend::kVulkan);
1858     return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1859                                         newState.fVkState, previousState,
1860                                         std::move(finishedCallback));
1861 }
1862 
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1863 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1864                                           const GrBackendSurfaceMutableState& newState,
1865                                           GrBackendSurfaceMutableState* previousState,
1866                                           sk_sp<GrRefCntedCallback> finishedCallback) {
1867     GrVkImageInfo info;
1868     SkAssertResult(backendRenderTarget.getVkImageInfo(&info));
1869     sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendRenderTarget.getMutableState();
1870     SkASSERT(currentState);
1871     SkASSERT(newState.fBackend == GrBackend::kVulkan);
1872     return this->setBackendSurfaceState(info, std::move(currentState),
1873                                         backendRenderTarget.dimensions(), newState.fVkState,
1874                                         previousState, std::move(finishedCallback));
1875 }
1876 
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)1877 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
1878     GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1879     VkPipelineStageFlags dstStage;
1880     VkAccessFlags dstAccess;
1881     if (barrierType == kBlend_GrXferBarrierType) {
1882         dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1883         dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
1884     } else {
1885         SkASSERT(barrierType == kTexture_GrXferBarrierType);
1886         dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1887         dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
1888     }
1889     GrVkImage* image = vkRT->colorAttachment();
1890     VkImageMemoryBarrier barrier;
1891     barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1892     barrier.pNext = nullptr;
1893     barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1894     barrier.dstAccessMask = dstAccess;
1895     barrier.oldLayout = image->currentLayout();
1896     barrier.newLayout = barrier.oldLayout;
1897     barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1898     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1899     barrier.image = image->image();
1900     barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
1901     this->addImageMemoryBarrier(image->resource(),
1902                                 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1903                                 dstStage, true, &barrier);
1904 }
1905 
deleteBackendTexture(const GrBackendTexture & tex)1906 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1907     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1908 
1909     GrVkImageInfo info;
1910     if (tex.getVkImageInfo(&info)) {
1911         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1912     }
1913 }
1914 
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)1915 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1916     GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
1917     GrVkRenderPass::AttachmentFlags attachmentFlags;
1918     GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
1919                                                        &attachmentsDescriptor, &attachmentFlags);
1920 
1921     GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
1922     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
1923         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
1924     }
1925     if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
1926         selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
1927     }
1928 
1929     GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
1930     if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
1931         programInfo.colorLoadOp() == GrLoadOp::kLoad) {
1932         loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
1933     }
1934     sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
1935             &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
1936     if (!renderPass) {
1937         return false;
1938     }
1939 
1940     GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
1941 
1942     auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1943                                     desc,
1944                                     programInfo,
1945                                     renderPass->vkRenderPass(),
1946                                     &stat);
1947     if (!pipelineState) {
1948         return false;
1949     }
1950 
1951     return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
1952 }
1953 
1954 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const1955 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1956     SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1957 
1958     GrVkImageInfo backend;
1959     if (!tex.getVkImageInfo(&backend)) {
1960         return false;
1961     }
1962 
1963     if (backend.fImage && backend.fAlloc.fMemory) {
1964         VkMemoryRequirements req;
1965         memset(&req, 0, sizeof(req));
1966         GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1967                                                                    backend.fImage,
1968                                                                    &req));
1969         // TODO: find a better check
1970         // This will probably fail with a different driver
1971         return (req.size > 0) && (req.size <= 8192 * 8192);
1972     }
1973 
1974     return false;
1975 }
1976 
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)1977 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1978                                                                     GrColorType ct,
1979                                                                     int sampleCnt,
1980                                                                     GrProtected isProtected) {
1981     if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
1982         dimensions.height() > this->caps()->maxRenderTargetSize()) {
1983         return {};
1984     }
1985 
1986     VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
1987 
1988     GrVkImageInfo info;
1989     if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo,
1990                                               GrRenderable::kYes, GrMipmapped::kNo, &info,
1991                                               isProtected)) {
1992         return {};
1993     }
1994     return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info);
1995 }
1996 
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)1997 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1998     SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
1999 
2000     GrVkImageInfo info;
2001     if (rt.getVkImageInfo(&info)) {
2002         // something in the command buffer may still be using this, so force submit
2003         SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2004         GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2005     }
2006 }
2007 #endif
2008 
2009 ////////////////////////////////////////////////////////////////////////////////
2010 
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2011 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2012                                      VkPipelineStageFlags srcStageMask,
2013                                      VkPipelineStageFlags dstStageMask,
2014                                      bool byRegion,
2015                                      VkBufferMemoryBarrier* barrier) const {
2016     if (!this->currentCommandBuffer()) {
2017         return;
2018     }
2019     SkASSERT(resource);
2020     this->currentCommandBuffer()->pipelineBarrier(this,
2021                                                   resource,
2022                                                   srcStageMask,
2023                                                   dstStageMask,
2024                                                   byRegion,
2025                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2026                                                   barrier);
2027 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2028 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2029                                      VkPipelineStageFlags dstStageMask,
2030                                      bool byRegion,
2031                                      VkBufferMemoryBarrier* barrier) const {
2032     if (!this->currentCommandBuffer()) {
2033         return;
2034     }
2035     // We don't pass in a resource here to the command buffer. The command buffer only is using it
2036     // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2037     // command with the buffer on the command buffer. Thus those other commands will already cause
2038     // the command buffer to be holding a ref to the buffer.
2039     this->currentCommandBuffer()->pipelineBarrier(this,
2040                                                   /*resource=*/nullptr,
2041                                                   srcStageMask,
2042                                                   dstStageMask,
2043                                                   byRegion,
2044                                                   GrVkCommandBuffer::kBufferMemory_BarrierType,
2045                                                   barrier);
2046 }
2047 
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2048 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2049                                     VkPipelineStageFlags srcStageMask,
2050                                     VkPipelineStageFlags dstStageMask,
2051                                     bool byRegion,
2052                                     VkImageMemoryBarrier* barrier) const {
2053     // If we are in the middle of destroying or abandoning the context we may hit a release proc
2054     // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2055     // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2056     // have a current command buffer. Thus we won't do the queue transfer.
2057     if (!this->currentCommandBuffer()) {
2058         return;
2059     }
2060     SkASSERT(resource);
2061     this->currentCommandBuffer()->pipelineBarrier(this,
2062                                                   resource,
2063                                                   srcStageMask,
2064                                                   dstStageMask,
2065                                                   byRegion,
2066                                                   GrVkCommandBuffer::kImageMemory_BarrierType,
2067                                                   barrier);
2068 }
2069 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)2070 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2071         SkSpan<GrSurfaceProxy*> proxies,
2072         SkSurface::BackendSurfaceAccess access,
2073         const GrBackendSurfaceMutableState* newState) {
2074     // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2075     // not effect what we do here.
2076     if (!proxies.empty() && (access == SkSurface::BackendSurfaceAccess::kPresent || newState)) {
2077         // We currently don't support passing in new surface state for multiple proxies here. The
2078         // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2079         // state updates anyways. Additionally if we have a newState than we must not have any
2080         // BackendSurfaceAccess.
2081         SkASSERT(!newState || proxies.size() == 1);
2082         SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
2083         GrVkImage* image;
2084         for (GrSurfaceProxy* proxy : proxies) {
2085             SkASSERT(proxy->isInstantiated());
2086             if (GrTexture* tex = proxy->peekTexture()) {
2087                 image = static_cast<GrVkTexture*>(tex)->textureImage();
2088             } else {
2089                 GrRenderTarget* rt = proxy->peekRenderTarget();
2090                 SkASSERT(rt);
2091                 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2092                 image = vkRT->externalAttachment();
2093             }
2094             if (newState) {
2095                 const GrVkSharedImageInfo& newInfo = newState->fVkState;
2096                 set_layout_and_queue_from_mutable_state(this, image, newInfo);
2097             } else {
2098                 SkASSERT(access == SkSurface::BackendSurfaceAccess::kPresent);
2099                 image->prepareForPresent(this);
2100             }
2101         }
2102     }
2103 }
2104 
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)2105 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2106                               GrGpuFinishedContext finishedContext) {
2107     SkASSERT(finishedProc);
2108     this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
2109 }
2110 
addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback)2111 void GrVkGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
2112     SkASSERT(finishedCallback);
2113     fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2114 }
2115 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2116 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2117     this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2118 }
2119 
onSubmitToGpu(bool syncCpu)2120 bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
2121     if (syncCpu) {
2122         return this->submitCommandBuffer(kForce_SyncQueue);
2123     } else {
2124         return this->submitCommandBuffer(kSkip_SyncQueue);
2125     }
2126 }
2127 
finishOutstandingGpuWork()2128 void GrVkGpu::finishOutstandingGpuWork() {
2129     VK_CALL(QueueWaitIdle(fQueue));
2130 
2131     if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2132         fResourceProvider.forceSyncAllCommandBuffers();
2133     }
2134 }
2135 
onReportSubmitHistograms()2136 void GrVkGpu::onReportSubmitHistograms() {
2137 #if SK_HISTOGRAMS_ENABLED
2138     uint64_t allocatedMemory = fMemoryAllocator->totalAllocatedMemory();
2139     uint64_t usedMemory = fMemoryAllocator->totalUsedMemory();
2140     SkASSERT(usedMemory <= allocatedMemory);
2141     if (allocatedMemory > 0) {
2142         SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2143                                 (usedMemory * 100) / allocatedMemory);
2144     }
2145     // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2146     // supports samples up to around 500MB which should support the amounts of memory we allocate.
2147     SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2148 #endif
2149 }
2150 
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2151 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2152                                      GrSurface* src,
2153                                      GrVkImage* dstImage,
2154                                      GrVkImage* srcImage,
2155                                      const SkIRect& srcRect,
2156                                      const SkIPoint& dstPoint) {
2157     if (!this->currentCommandBuffer()) {
2158         return;
2159     }
2160 
2161 #ifdef SK_DEBUG
2162     int dstSampleCnt = dstImage->numSamples();
2163     int srcSampleCnt = srcImage->numSamples();
2164     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2165     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2166     VkFormat dstFormat = dstImage->imageFormat();
2167     VkFormat srcFormat;
2168     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2169     SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2170                                          srcFormat, srcSampleCnt, srcHasYcbcr));
2171 #endif
2172     if (src->isProtected() && !dst->isProtected()) {
2173         SkDebugf("Can't copy from protected memory to non-protected");
2174         return;
2175     }
2176 
2177     // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2178     // the cache is flushed since it is only being written to.
2179     dstImage->setImageLayout(this,
2180                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2181                              VK_ACCESS_TRANSFER_WRITE_BIT,
2182                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2183                              false);
2184 
2185     srcImage->setImageLayout(this,
2186                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2187                              VK_ACCESS_TRANSFER_READ_BIT,
2188                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2189                              false);
2190 
2191     VkImageCopy copyRegion;
2192     memset(&copyRegion, 0, sizeof(VkImageCopy));
2193     copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2194     copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2195     copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2196     copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2197     copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2198 
2199     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2200     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2201     this->currentCommandBuffer()->copyImage(this,
2202                                             srcImage,
2203                                             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2204                                             dstImage,
2205                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2206                                             1,
2207                                             &copyRegion);
2208 
2209     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2210                                         srcRect.width(), srcRect.height());
2211     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2212     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2213 }
2214 
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2215 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2216                                 GrSurface* src,
2217                                 GrVkImage* dstImage,
2218                                 GrVkImage* srcImage,
2219                                 const SkIRect& srcRect,
2220                                 const SkIPoint& dstPoint) {
2221     if (!this->currentCommandBuffer()) {
2222         return;
2223     }
2224 
2225 #ifdef SK_DEBUG
2226     int dstSampleCnt = dstImage->numSamples();
2227     int srcSampleCnt = srcImage->numSamples();
2228     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2229     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2230     VkFormat dstFormat = dstImage->imageFormat();
2231     VkFormat srcFormat;
2232     SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2233     SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2234                                           dstSampleCnt,
2235                                           dstImage->isLinearTiled(),
2236                                           dstHasYcbcr,
2237                                           srcFormat,
2238                                           srcSampleCnt,
2239                                           srcImage->isLinearTiled(),
2240                                           srcHasYcbcr));
2241 
2242 #endif
2243     if (src->isProtected() && !dst->isProtected()) {
2244         SkDebugf("Can't copy from protected memory to non-protected");
2245         return;
2246     }
2247 
2248     dstImage->setImageLayout(this,
2249                              VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2250                              VK_ACCESS_TRANSFER_WRITE_BIT,
2251                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2252                              false);
2253 
2254     srcImage->setImageLayout(this,
2255                              VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2256                              VK_ACCESS_TRANSFER_READ_BIT,
2257                              VK_PIPELINE_STAGE_TRANSFER_BIT,
2258                              false);
2259 
2260     // Flip rect if necessary
2261     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
2262                                         srcRect.height());
2263 
2264     VkImageBlit blitRegion;
2265     memset(&blitRegion, 0, sizeof(VkImageBlit));
2266     blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2267     blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2268     blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2269     blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2270     blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2271     blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2272 
2273     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2274     this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2275     this->currentCommandBuffer()->blitImage(this,
2276                                             *srcImage,
2277                                             *dstImage,
2278                                             1,
2279                                             &blitRegion,
2280                                             VK_FILTER_NEAREST); // We never scale so any filter works here
2281 
2282     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2283     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2284 }
2285 
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2286 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2287                                    const SkIPoint& dstPoint) {
2288     if (src->isProtected() && !dst->isProtected()) {
2289         SkDebugf("Can't copy from protected memory to non-protected");
2290         return;
2291     }
2292     GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2293     this->resolveImage(dst, srcRT, srcRect, dstPoint);
2294     SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2295                                         srcRect.width(), srcRect.height());
2296     // The rect is already in device space so we pass in kTopLeft so no flip is done.
2297     this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2298 }
2299 
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2300 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2301                             const SkIPoint& dstPoint) {
2302 #ifdef SK_DEBUG
2303     if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2304         SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2305     }
2306     if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2307         SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2308     }
2309 #endif
2310     if (src->isProtected() && !dst->isProtected()) {
2311         SkDebugf("Can't copy from protected memory to non-protected");
2312         return false;
2313     }
2314 
2315     GrVkImage* dstImage;
2316     GrVkImage* srcImage;
2317     GrRenderTarget* dstRT = dst->asRenderTarget();
2318     if (dstRT) {
2319         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2320         if (vkRT->wrapsSecondaryCommandBuffer()) {
2321             return false;
2322         }
2323         // This will technically return true for single sample rts that used DMSAA in which case we
2324         // don't have to pick the resolve attachment. But in that case the resolve and color
2325         // attachments will be the same anyways.
2326         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2327             dstImage = vkRT->resolveAttachment();
2328         } else {
2329             dstImage = vkRT->colorAttachment();
2330         }
2331     } else if (dst->asTexture()) {
2332         dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2333     } else {
2334         // The surface in a GrAttachment already
2335         dstImage = static_cast<GrVkImage*>(dst);
2336     }
2337     GrRenderTarget* srcRT = src->asRenderTarget();
2338     if (srcRT) {
2339         GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2340         // This will technically return true for single sample rts that used DMSAA in which case we
2341         // don't have to pick the resolve attachment. But in that case the resolve and color
2342         // attachments will be the same anyways.
2343         if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2344             srcImage = vkRT->resolveAttachment();
2345         } else {
2346             srcImage = vkRT->colorAttachment();
2347         }
2348     } else if (src->asTexture()) {
2349         SkASSERT(src->asTexture());
2350         srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2351     } else {
2352         // The surface in a GrAttachment already
2353         srcImage = static_cast<GrVkImage*>(src);
2354     }
2355 
2356     VkFormat dstFormat = dstImage->imageFormat();
2357     VkFormat srcFormat = srcImage->imageFormat();
2358 
2359     int dstSampleCnt = dstImage->numSamples();
2360     int srcSampleCnt = srcImage->numSamples();
2361 
2362     bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2363     bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2364 
2365     if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2366                                         srcFormat, srcSampleCnt, srcHasYcbcr)) {
2367         this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2368         return true;
2369     }
2370 
2371     if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2372                                     srcFormat, srcSampleCnt, srcHasYcbcr)) {
2373         this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2374         return true;
2375     }
2376 
2377     if (this->vkCaps().canCopyAsBlit(dstFormat,
2378                                      dstSampleCnt,
2379                                      dstImage->isLinearTiled(),
2380                                      dstHasYcbcr,
2381                                      srcFormat,
2382                                      srcSampleCnt,
2383                                      srcImage->isLinearTiled(),
2384                                      srcHasYcbcr)) {
2385         this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
2386         return true;
2387     }
2388 
2389     return false;
2390 }
2391 
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2392 bool GrVkGpu::onReadPixels(GrSurface* surface,
2393                            SkIRect rect,
2394                            GrColorType surfaceColorType,
2395                            GrColorType dstColorType,
2396                            void* buffer,
2397                            size_t rowBytes) {
2398     if (surface->isProtected()) {
2399         return false;
2400     }
2401 
2402     if (!this->currentCommandBuffer()) {
2403         return false;
2404     }
2405 
2406     GrVkImage* image = nullptr;
2407     GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2408     if (rt) {
2409         // Reading from render targets that wrap a secondary command buffer is not allowed since
2410         // it would require us to know the VkImage, which we don't have, as well as need us to
2411         // stop and start the VkRenderPass which we don't have access to.
2412         if (rt->wrapsSecondaryCommandBuffer()) {
2413             return false;
2414         }
2415         image = rt->nonMSAAAttachment();
2416     } else {
2417         image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2418     }
2419 
2420     if (!image) {
2421         return false;
2422     }
2423 
2424     if (dstColorType == GrColorType::kUnknown ||
2425         dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2426         return false;
2427     }
2428 
2429     // Change layout of our target so it can be used as copy
2430     image->setImageLayout(this,
2431                           VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2432                           VK_ACCESS_TRANSFER_READ_BIT,
2433                           VK_PIPELINE_STAGE_TRANSFER_BIT,
2434                           false);
2435 
2436     size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2437     if (GrVkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2438         return false;
2439     }
2440     size_t tightRowBytes = bpp*rect.width();
2441 
2442     VkBufferImageCopy region;
2443     memset(&region, 0, sizeof(VkBufferImageCopy));
2444     VkOffset3D offset = { rect.left(), rect.top(), 0 };
2445     region.imageOffset = offset;
2446     region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2447 
2448     size_t transBufferRowBytes = bpp * region.imageExtent.width;
2449     size_t imageRows = region.imageExtent.height;
2450     GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2451     sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2452             transBufferRowBytes * imageRows, GrGpuBufferType::kXferGpuToCpu,
2453             kDynamic_GrAccessPattern);
2454 
2455     if (!transferBuffer) {
2456         return false;
2457     }
2458 
2459     GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2460 
2461     // Copy the image to a buffer so we can map it to cpu memory
2462     region.bufferOffset = 0;
2463     region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2464     region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2465     region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2466 
2467     this->currentCommandBuffer()->copyImageToBuffer(this,
2468                                                     image,
2469                                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2470                                                     transferBuffer,
2471                                                     1,
2472                                                     &region);
2473 
2474     // make sure the copy to buffer has finished
2475     vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2476                                VK_ACCESS_HOST_READ_BIT,
2477                                VK_PIPELINE_STAGE_TRANSFER_BIT,
2478                                VK_PIPELINE_STAGE_HOST_BIT,
2479                                false);
2480 
2481     // We need to submit the current command buffer to the Queue and make sure it finishes before
2482     // we can copy the data out of the buffer.
2483     if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2484         return false;
2485     }
2486     void* mappedMemory = transferBuffer->map();
2487 
2488     SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2489 
2490     transferBuffer->unmap();
2491     return true;
2492 }
2493 
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2494 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2495                               sk_sp<const GrVkFramebuffer> framebuffer,
2496                               const VkClearValue* colorClear,
2497                               const GrSurface* target,
2498                               const SkIRect& renderPassBounds,
2499                               bool forSecondaryCB) {
2500     if (!this->currentCommandBuffer()) {
2501         return false;
2502     }
2503     SkASSERT (!framebuffer->isExternal());
2504 
2505 #ifdef SK_DEBUG
2506     uint32_t index;
2507     bool result = renderPass->colorAttachmentIndex(&index);
2508     SkASSERT(result && 0 == index);
2509     result = renderPass->stencilAttachmentIndex(&index);
2510     if (result) {
2511         SkASSERT(1 == index);
2512     }
2513 #endif
2514     VkClearValue clears[3];
2515     int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2516     clears[0].color = colorClear->color;
2517     clears[stencilIndex].depthStencil.depth = 0.0f;
2518     clears[stencilIndex].depthStencil.stencil = 0;
2519 
2520    return this->currentCommandBuffer()->beginRenderPass(
2521         this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2522 }
2523 
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2524 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2525                             const SkIRect& bounds) {
2526     // We had a command buffer when we started the render pass, we should have one now as well.
2527     SkASSERT(this->currentCommandBuffer());
2528     this->currentCommandBuffer()->endRenderPass(this);
2529     this->didWriteToSurface(target, origin, &bounds);
2530 }
2531 
checkVkResult(VkResult result)2532 bool GrVkGpu::checkVkResult(VkResult result) {
2533     switch (result) {
2534         case VK_SUCCESS:
2535             return true;
2536         case VK_ERROR_DEVICE_LOST:
2537             fDeviceIsLost = true;
2538             return false;
2539         case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2540         case VK_ERROR_OUT_OF_HOST_MEMORY:
2541             this->setOOMed();
2542             return false;
2543         default:
2544             return false;
2545     }
2546 }
2547 
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2548 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2549     if (!this->currentCommandBuffer()) {
2550         return;
2551     }
2552     this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2553 }
2554 
submit(GrOpsRenderPass * renderPass)2555 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2556     SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2557 
2558     fCachedOpsRenderPass->submit();
2559     fCachedOpsRenderPass->reset();
2560 }
2561 
insertFence()2562 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2563     VkFenceCreateInfo createInfo;
2564     memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2565     createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2566     createInfo.pNext = nullptr;
2567     createInfo.flags = 0;
2568     VkFence fence = VK_NULL_HANDLE;
2569     VkResult result;
2570 
2571     VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
2572     if (result != VK_SUCCESS) {
2573         return 0;
2574     }
2575     VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
2576     if (result != VK_SUCCESS) {
2577         VK_CALL(DestroyFence(this->device(), fence, nullptr));
2578         return 0;
2579     }
2580 
2581     static_assert(sizeof(GrFence) >= sizeof(VkFence));
2582     return (GrFence)fence;
2583 }
2584 
waitFence(GrFence fence)2585 bool GrVkGpu::waitFence(GrFence fence) {
2586     SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2587 
2588     VkResult result;
2589     VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0));
2590     return (VK_SUCCESS == result);
2591 }
2592 
deleteFence(GrFence fence) const2593 void GrVkGpu::deleteFence(GrFence fence) const {
2594     VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2595 }
2596 
makeSemaphore(bool isOwned)2597 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2598     return GrVkSemaphore::Make(this, isOwned);
2599 }
2600 
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType wrapType,GrWrapOwnership ownership)2601 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2602                                                            GrSemaphoreWrapType wrapType,
2603                                                            GrWrapOwnership ownership) {
2604     return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2605 }
2606 
insertSemaphore(GrSemaphore * semaphore)2607 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2608     SkASSERT(semaphore);
2609 
2610     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2611 
2612     GrVkSemaphore::Resource* resource = vkSem->getResource();
2613     if (resource->shouldSignal()) {
2614         resource->ref();
2615         fSemaphoresToSignal.push_back(resource);
2616     }
2617 }
2618 
waitSemaphore(GrSemaphore * semaphore)2619 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2620     SkASSERT(semaphore);
2621 
2622     GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2623 
2624     GrVkSemaphore::Resource* resource = vkSem->getResource();
2625     if (resource->shouldWait()) {
2626         resource->ref();
2627         fSemaphoresToWaitOn.push_back(resource);
2628     }
2629 }
2630 
prepareTextureForCrossContextUsage(GrTexture * texture)2631 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2632     SkASSERT(texture);
2633     GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2634     vkTexture->setImageLayout(this,
2635                               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2636                               VK_ACCESS_SHADER_READ_BIT,
2637                               VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2638                               false);
2639     // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2640     // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2641     // Eventually we will abandon the whole GPU if this fails.
2642     this->submitToGpu(false);
2643 
2644     // The image layout change serves as a barrier, so no semaphore is needed.
2645     // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2646     // thread safe so that only the first thread that tries to use the semaphore actually submits
2647     // it. This additionally would also require thread safety in command buffer submissions to
2648     // queues in general.
2649     return nullptr;
2650 }
2651 
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2652 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2653     fDrawables.emplace_back(std::move(drawable));
2654 }
2655 
storeVkPipelineCacheData()2656 void GrVkGpu::storeVkPipelineCacheData() {
2657     if (this->getContext()->priv().getPersistentCache()) {
2658         this->resourceProvider().storePipelineCacheData();
2659     }
2660 }
2661