1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkGpu.h"
9
10 #include "include/core/SkTextureCompressionType.h"
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrContextOptions.h"
14 #include "include/gpu/GrDirectContext.h"
15 #include "include/gpu/ganesh/vk/GrVkBackendSemaphore.h"
16 #include "include/gpu/ganesh/vk/GrVkBackendSurface.h"
17 #include "include/gpu/vk/GrVkTypes.h"
18 #include "include/gpu/vk/VulkanBackendContext.h"
19 #include "include/gpu/vk/VulkanExtensions.h"
20 #include "include/private/base/SkTo.h"
21 #include "src/base/SkRectMemcpy.h"
22 #include "src/core/SkCompressedDataUtils.h"
23 #include "src/core/SkMipmap.h"
24 #include "src/core/SkTraceEvent.h"
25 #include "src/gpu/DataUtils.h"
26 #include "src/gpu/ganesh/GrBackendUtils.h"
27 #include "src/gpu/ganesh/GrDataUtils.h"
28 #include "src/gpu/ganesh/GrDirectContextPriv.h"
29 #include "src/gpu/ganesh/GrGeometryProcessor.h"
30 #include "src/gpu/ganesh/GrGpuResourceCacheAccess.h"
31 #include "src/gpu/ganesh/GrNativeRect.h"
32 #include "src/gpu/ganesh/GrPipeline.h"
33 #include "src/gpu/ganesh/GrPixmap.h"
34 #include "src/gpu/ganesh/GrRenderTarget.h"
35 #include "src/gpu/ganesh/GrResourceProvider.h"
36 #include "src/gpu/ganesh/GrTexture.h"
37 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
38 #include "src/gpu/ganesh/SkGr.h"
39 #include "src/gpu/ganesh/image/SkImage_Ganesh.h"
40 #include "src/gpu/ganesh/surface/SkSurface_Ganesh.h"
41 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
42 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
43 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
44 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
45 #include "src/gpu/ganesh/vk/GrVkImage.h"
46 #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h"
47 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
48 #include "src/gpu/ganesh/vk/GrVkPipelineState.h"
49 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
50 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
51 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
52 #include "src/gpu/ganesh/vk/GrVkTexture.h"
53 #include "src/gpu/ganesh/vk/GrVkTextureRenderTarget.h"
54 #include "src/gpu/vk/VulkanInterface.h"
55 #include "src/gpu/vk/VulkanMemory.h"
56 #include "src/gpu/vk/VulkanUtilsPriv.h"
57
58 #include "include/gpu/vk/VulkanTypes.h"
59 #include "include/private/gpu/vk/SkiaVulkan.h"
60
61 #if defined(SK_USE_VMA)
62 #include "src/gpu/vk/VulkanAMDMemoryAllocator.h"
63 #endif
64
65 using namespace skia_private;
66
67 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
68 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
69
Make(const skgpu::VulkanBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)70 std::unique_ptr<GrGpu> GrVkGpu::Make(const skgpu::VulkanBackendContext& backendContext,
71 const GrContextOptions& options,
72 GrDirectContext* direct) {
73 if (backendContext.fInstance == VK_NULL_HANDLE ||
74 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
75 backendContext.fDevice == VK_NULL_HANDLE ||
76 backendContext.fQueue == VK_NULL_HANDLE) {
77 return nullptr;
78 }
79 if (!backendContext.fGetProc) {
80 return nullptr;
81 }
82
83 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
84 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
85 backendContext.fGetProc("vkEnumerateInstanceVersion",
86 VK_NULL_HANDLE, VK_NULL_HANDLE));
87 uint32_t instanceVersion = 0;
88 if (!localEnumerateInstanceVersion) {
89 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
90 } else {
91 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
92 if (err) {
93 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
94 return nullptr;
95 }
96 }
97
98 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
99 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
100 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
101 backendContext.fInstance,
102 VK_NULL_HANDLE));
103
104 if (!localGetPhysicalDeviceProperties) {
105 return nullptr;
106 }
107 VkPhysicalDeviceProperties physDeviceProperties;
108 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
109 uint32_t physDevVersion = physDeviceProperties.apiVersion;
110
111 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
112 : instanceVersion;
113
114 instanceVersion = std::min(instanceVersion, apiVersion);
115 physDevVersion = std::min(physDevVersion, apiVersion);
116
117 skgpu::VulkanExtensions noExtensions;
118 const skgpu::VulkanExtensions* extensions =
119 backendContext.fVkExtensions ? backendContext.fVkExtensions : &noExtensions;
120
121 auto interface = sk_make_sp<skgpu::VulkanInterface>(backendContext.fGetProc,
122 backendContext.fInstance,
123 backendContext.fDevice,
124 instanceVersion,
125 physDevVersion,
126 extensions);
127 SkASSERT(interface);
128 if (!interface->validate(instanceVersion, physDevVersion, extensions)) {
129 return nullptr;
130 }
131
132 sk_sp<GrVkCaps> caps;
133 if (backendContext.fDeviceFeatures2) {
134 caps.reset(new GrVkCaps(options,
135 interface.get(),
136 backendContext.fPhysicalDevice,
137 *backendContext.fDeviceFeatures2,
138 instanceVersion,
139 physDevVersion,
140 *extensions,
141 backendContext.fProtectedContext));
142 } else if (backendContext.fDeviceFeatures) {
143 VkPhysicalDeviceFeatures2 features2;
144 features2.pNext = nullptr;
145 features2.features = *backendContext.fDeviceFeatures;
146 caps.reset(new GrVkCaps(options,
147 interface.get(),
148 backendContext.fPhysicalDevice,
149 features2,
150 instanceVersion,
151 physDevVersion,
152 *extensions,
153 backendContext.fProtectedContext));
154 } else {
155 VkPhysicalDeviceFeatures2 features;
156 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
157
158 caps.reset(new GrVkCaps(options,
159 interface.get(),
160 backendContext.fPhysicalDevice,
161 features,
162 instanceVersion,
163 physDevVersion,
164 *extensions,
165 backendContext.fProtectedContext));
166 }
167
168 if (!caps) {
169 return nullptr;
170 }
171
172 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
173 #if defined(SK_USE_VMA)
174 if (!memoryAllocator) {
175 // We were not given a memory allocator at creation
176 memoryAllocator = skgpu::VulkanAMDMemoryAllocator::Make(backendContext.fInstance,
177 backendContext.fPhysicalDevice,
178 backendContext.fDevice,
179 physDevVersion,
180 extensions,
181 interface.get(),
182 skgpu::ThreadSafe::kNo);
183 }
184 #endif
185 if (!memoryAllocator) {
186 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
187 return nullptr;
188 }
189
190 std::unique_ptr<GrVkGpu> vkGpu(new GrVkGpu(direct,
191 backendContext,
192 std::move(caps),
193 interface,
194 instanceVersion,
195 physDevVersion,
196 std::move(memoryAllocator)));
197 if (backendContext.fProtectedContext == GrProtected::kYes &&
198 !vkGpu->vkCaps().supportsProtectedContent()) {
199 return nullptr;
200 }
201 return vkGpu;
202 }
203
204 ////////////////////////////////////////////////////////////////////////////////
205
GrVkGpu(GrDirectContext * direct,const skgpu::VulkanBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const skgpu::VulkanInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator)206 GrVkGpu::GrVkGpu(GrDirectContext* direct,
207 const skgpu::VulkanBackendContext& backendContext,
208 sk_sp<GrVkCaps> caps,
209 sk_sp<const skgpu::VulkanInterface> interface,
210 uint32_t instanceVersion,
211 uint32_t physicalDeviceVersion,
212 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator)
213 : INHERITED(direct)
214 , fInterface(std::move(interface))
215 , fMemoryAllocator(std::move(memoryAllocator))
216 , fVkCaps(std::move(caps))
217 , fPhysicalDevice(backendContext.fPhysicalDevice)
218 , fDevice(backendContext.fDevice)
219 , fQueue(backendContext.fQueue)
220 , fQueueIndex(backendContext.fGraphicsQueueIndex)
221 , fResourceProvider(this)
222 , fStagingBufferManager(this)
223 , fDisconnected(false)
224 , fProtectedContext(backendContext.fProtectedContext)
225 , fDeviceLostContext(backendContext.fDeviceLostContext)
226 , fDeviceLostProc(backendContext.fDeviceLostProc) {
227 SkASSERT(fMemoryAllocator);
228
229 this->initCaps(fVkCaps);
230
231 VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
232 VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
233
234 fResourceProvider.init();
235
236 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
237 if (fMainCmdPool) {
238 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
239 SkASSERT(this->currentCommandBuffer());
240 this->currentCommandBuffer()->begin(this);
241 }
242 }
243
destroyResources()244 void GrVkGpu::destroyResources() {
245 if (fMainCmdPool) {
246 fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
247 fMainCmdPool->close();
248 }
249
250 // wait for all commands to finish
251 this->finishOutstandingGpuWork();
252
253 if (fMainCmdPool) {
254 fMainCmdPool->unref();
255 fMainCmdPool = nullptr;
256 }
257
258 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
259 fSemaphoresToWaitOn[i]->unref();
260 }
261 fSemaphoresToWaitOn.clear();
262
263 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
264 fSemaphoresToSignal[i]->unref();
265 }
266 fSemaphoresToSignal.clear();
267
268 fStagingBufferManager.reset();
269
270 fMSAALoadManager.destroyResources(this);
271
272 // must call this just before we destroy the command pool and VkDevice
273 fResourceProvider.destroyResources();
274 }
275
~GrVkGpu()276 GrVkGpu::~GrVkGpu() {
277 if (!fDisconnected) {
278 this->destroyResources();
279 }
280 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
281 // clients can continue to delete backend textures even after a context has been abandoned.
282 fMemoryAllocator.reset();
283 }
284
285
disconnect(DisconnectType type)286 void GrVkGpu::disconnect(DisconnectType type) {
287 INHERITED::disconnect(type);
288 if (!fDisconnected) {
289 this->destroyResources();
290
291 fSemaphoresToWaitOn.clear();
292 fSemaphoresToSignal.clear();
293 fMainCmdBuffer = nullptr;
294 fDisconnected = true;
295 }
296 }
297
pipelineBuilder()298 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
299 return fResourceProvider.pipelineStateCache();
300 }
301
refPipelineBuilder()302 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
303 return fResourceProvider.refPipelineStateCache();
304 }
305
306 ///////////////////////////////////////////////////////////////////////////////
307
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)308 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
309 GrRenderTarget* rt,
310 bool useMSAASurface,
311 GrAttachment* stencil,
312 GrSurfaceOrigin origin,
313 const SkIRect& bounds,
314 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
315 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
316 const TArray<GrSurfaceProxy*, true>& sampledProxies,
317 GrXferBarrierFlags renderPassXferBarriers) {
318 if (!fCachedOpsRenderPass) {
319 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
320 }
321
322 // For the given render target and requested render pass features we need to find a compatible
323 // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
324 // is compatible, but that is part of the framebuffer that we get here.
325 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
326
327 SkASSERT(!useMSAASurface ||
328 rt->numSamples() > 1 ||
329 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
330 vkRT->resolveAttachment() &&
331 vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
332
333 // Covert the GrXferBarrierFlags into render pass self dependency flags
334 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
335 if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
336 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
337 }
338 if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
339 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
340 }
341
342 // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
343 // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
344 // case we also need to update the color load/store ops since we don't want to ever load or
345 // store the msaa color attachment, but may need to for the resolve attachment.
346 GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
347 bool withResolve = false;
348 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
349 GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
350 if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
351 withResolve = true;
352 localColorInfo.fStoreOp = GrStoreOp::kDiscard;
353 if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
354 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
355 localColorInfo.fLoadOp = GrLoadOp::kDiscard;
356 } else {
357 resolveInfo.fLoadOp = GrLoadOp::kDiscard;
358 }
359 }
360
361 // Get the framebuffer to use for the render pass
362 sk_sp<GrVkFramebuffer> framebuffer;
363 if (vkRT->wrapsSecondaryCommandBuffer()) {
364 framebuffer = vkRT->externalFramebuffer();
365 } else {
366 auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
367 loadFromResolve);
368 framebuffer = sk_ref_sp(fb);
369 }
370 if (!framebuffer) {
371 return nullptr;
372 }
373
374 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
375 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
376 sampledProxies)) {
377 return nullptr;
378 }
379 return fCachedOpsRenderPass.get();
380 }
381
submitCommandBuffer(SyncQueue sync)382 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
383 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
384 if (!this->currentCommandBuffer()) {
385 return false;
386 }
387 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
388
389 if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
390 fSemaphoresToSignal.empty() && fSemaphoresToWaitOn.empty()) {
391 // We may have added finished procs during the flush call. Since there is no actual work
392 // we are not submitting the command buffer and may never come back around to submit it.
393 // Thus we call all current finished procs manually, since the work has technically
394 // finished.
395 this->currentCommandBuffer()->callFinishedProcs();
396 SkASSERT(fDrawables.empty());
397 fResourceProvider.checkCommandBuffers();
398 return true;
399 }
400
401 fMainCmdBuffer->end(this);
402 SkASSERT(fMainCmdPool);
403 fMainCmdPool->close();
404 bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
405 fSemaphoresToWaitOn);
406
407 if (didSubmit && sync == kForce_SyncQueue) {
408 fMainCmdBuffer->forceSync(this);
409 }
410
411 // We must delete any drawables that had to wait until submit to destroy.
412 fDrawables.clear();
413
414 // If we didn't submit the command buffer then we did not wait on any semaphores. We will
415 // continue to hold onto these semaphores and wait on them during the next command buffer
416 // submission.
417 if (didSubmit) {
418 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
419 fSemaphoresToWaitOn[i]->unref();
420 }
421 fSemaphoresToWaitOn.clear();
422 }
423
424 // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
425 // not try to recover the work that wasn't submitted and instead just drop it all. The client
426 // will be notified that the semaphores were not submit so that they will not try to wait on
427 // them.
428 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
429 fSemaphoresToSignal[i]->unref();
430 }
431 fSemaphoresToSignal.clear();
432
433 // Release old command pool and create a new one
434 fMainCmdPool->unref();
435 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
436 if (fMainCmdPool) {
437 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
438 SkASSERT(fMainCmdBuffer);
439 fMainCmdBuffer->begin(this);
440 } else {
441 fMainCmdBuffer = nullptr;
442 }
443 // We must wait to call checkCommandBuffers until after we get a new command buffer. The
444 // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
445 // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
446 // one that was just submitted.
447 fResourceProvider.checkCommandBuffers();
448 return didSubmit;
449 }
450
451 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern)452 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size,
453 GrGpuBufferType type,
454 GrAccessPattern accessPattern) {
455 #ifdef SK_DEBUG
456 switch (type) {
457 case GrGpuBufferType::kVertex:
458 case GrGpuBufferType::kIndex:
459 case GrGpuBufferType::kDrawIndirect:
460 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
461 accessPattern == kStatic_GrAccessPattern);
462 break;
463 case GrGpuBufferType::kXferCpuToGpu:
464 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
465 break;
466 case GrGpuBufferType::kXferGpuToCpu:
467 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
468 accessPattern == kStream_GrAccessPattern);
469 break;
470 case GrGpuBufferType::kUniform:
471 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
472 break;
473 }
474 #endif
475 return GrVkBuffer::Make(this, size, type, accessPattern);
476 }
477
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)478 bool GrVkGpu::onWritePixels(GrSurface* surface,
479 SkIRect rect,
480 GrColorType surfaceColorType,
481 GrColorType srcColorType,
482 const GrMipLevel texels[],
483 int mipLevelCount,
484 bool prepForTexSampling) {
485 GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
486 if (!texture) {
487 return false;
488 }
489 GrVkImage* texImage = texture->textureImage();
490
491 // Make sure we have at least the base level
492 if (!mipLevelCount || !texels[0].fPixels) {
493 return false;
494 }
495
496 SkASSERT(!skgpu::VkFormatIsCompressed(texImage->imageFormat()));
497 bool success = false;
498 bool linearTiling = texImage->isLinearTiled();
499 if (linearTiling) {
500 if (mipLevelCount > 1) {
501 SkDebugf("Can't upload mipmap data to linear tiled texture");
502 return false;
503 }
504 if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
505 // Need to change the layout to general in order to perform a host write
506 texImage->setImageLayout(this,
507 VK_IMAGE_LAYOUT_GENERAL,
508 VK_ACCESS_HOST_WRITE_BIT,
509 VK_PIPELINE_STAGE_HOST_BIT,
510 false);
511 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
512 return false;
513 }
514 }
515 success = this->uploadTexDataLinear(texImage,
516 rect,
517 srcColorType,
518 texels[0].fPixels,
519 texels[0].fRowBytes);
520 } else {
521 SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
522 success = this->uploadTexDataOptimal(texImage,
523 rect,
524 srcColorType,
525 texels,
526 mipLevelCount);
527 if (1 == mipLevelCount) {
528 texture->markMipmapsDirty();
529 }
530 }
531
532 if (prepForTexSampling) {
533 texImage->setImageLayout(this,
534 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
535 VK_ACCESS_SHADER_READ_BIT,
536 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
537 false);
538 }
539
540 return success;
541 }
542
543 // When we update vertex/index buffers via transfers we assume that they may have been used
544 // previously in draws and will be used again in draws afterwards. So we put a barrier before and
545 // after. If we had a mechanism for gathering the buffers that will be used in a GrVkOpsRenderPass
546 // *before* we begin a subpass we could do this lazily and non-redundantly by tracking the "last
547 // usage" on the GrVkBuffer. Then Pass 1 draw, xfer, xfer, xfer, Pass 2 draw would insert just two
548 // barriers: one before the first xfer and one before Pass 2. Currently, we'd use six barriers.
549 // Pass false as "after" before the transfer and true after the transfer.
add_transfer_dst_buffer_mem_barrier(GrVkGpu * gpu,GrVkBuffer * dst,size_t offset,size_t size,bool after)550 static void add_transfer_dst_buffer_mem_barrier(GrVkGpu* gpu,
551 GrVkBuffer* dst,
552 size_t offset,
553 size_t size,
554 bool after) {
555 if (dst->intendedType() != GrGpuBufferType::kIndex &&
556 dst->intendedType() != GrGpuBufferType::kVertex) {
557 return;
558 }
559
560 VkAccessFlags srcAccessMask = dst->intendedType() == GrGpuBufferType::kIndex
561 ? VK_ACCESS_INDEX_READ_BIT
562 : VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
563 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
564
565 VkPipelineStageFlagBits srcPipelineStageFlags = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
566 VkPipelineStageFlagBits dstPipelineStageFlags = VK_PIPELINE_STAGE_TRANSFER_BIT;
567
568 if (after) {
569 using std::swap;
570 swap(srcAccessMask, dstAccessMask );
571 swap(srcPipelineStageFlags, dstPipelineStageFlags);
572 }
573
574 VkBufferMemoryBarrier bufferMemoryBarrier = {
575 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
576 nullptr, // pNext
577 srcAccessMask, // srcAccessMask
578 dstAccessMask, // dstAccessMask
579 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
580 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
581 dst->vkBuffer(), // buffer
582 offset, // offset
583 size, // size
584 };
585
586 gpu->addBufferMemoryBarrier(srcPipelineStageFlags,
587 dstPipelineStageFlags,
588 /*byRegion=*/false,
589 &bufferMemoryBarrier);
590 }
591
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)592 bool GrVkGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
593 size_t srcOffset,
594 sk_sp<GrGpuBuffer> dst,
595 size_t dstOffset,
596 size_t size) {
597 if (!this->currentCommandBuffer()) {
598 return false;
599 }
600
601 VkBufferCopy copyRegion;
602 copyRegion.srcOffset = srcOffset;
603 copyRegion.dstOffset = dstOffset;
604 copyRegion.size = size;
605
606 add_transfer_dst_buffer_mem_barrier(this,
607 static_cast<GrVkBuffer*>(dst.get()),
608 dstOffset,
609 size,
610 /*after=*/false);
611 this->currentCommandBuffer()->copyBuffer(this, std::move(src), dst, 1, ©Region);
612 add_transfer_dst_buffer_mem_barrier(this,
613 static_cast<GrVkBuffer*>(dst.get()),
614 dstOffset,
615 size,
616 /*after=*/true);
617
618 return true;
619 }
620
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)621 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
622 SkIRect rect,
623 GrColorType surfaceColorType,
624 GrColorType bufferColorType,
625 sk_sp<GrGpuBuffer> transferBuffer,
626 size_t bufferOffset,
627 size_t rowBytes) {
628 if (!this->currentCommandBuffer()) {
629 return false;
630 }
631
632 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
633 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
634 return false;
635 }
636
637 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
638 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
639 return false;
640 }
641 GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
642 if (!tex) {
643 return false;
644 }
645 GrVkImage* vkImage = tex->textureImage();
646 VkFormat format = vkImage->imageFormat();
647
648 // Can't transfer compressed data
649 SkASSERT(!skgpu::VkFormatIsCompressed(format));
650
651 if (!transferBuffer) {
652 return false;
653 }
654
655 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
656 return false;
657 }
658 SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
659
660 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
661
662 // Set up copy region
663 VkBufferImageCopy region;
664 memset(®ion, 0, sizeof(VkBufferImageCopy));
665 region.bufferOffset = bufferOffset;
666 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
667 region.bufferImageHeight = 0;
668 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
669 region.imageOffset = { rect.left(), rect.top(), 0 };
670 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
671
672 // Change layout of our target so it can be copied to
673 vkImage->setImageLayout(this,
674 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
675 VK_ACCESS_TRANSFER_WRITE_BIT,
676 VK_PIPELINE_STAGE_TRANSFER_BIT,
677 false);
678
679 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
680
681 // Copy the buffer to the image.
682 this->currentCommandBuffer()->copyBufferToImage(this,
683 vkBuffer->vkBuffer(),
684 vkImage,
685 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
686 1,
687 ®ion);
688 this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
689
690 tex->markMipmapsDirty();
691 return true;
692 }
693
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)694 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
695 SkIRect rect,
696 GrColorType surfaceColorType,
697 GrColorType bufferColorType,
698 sk_sp<GrGpuBuffer> transferBuffer,
699 size_t offset) {
700 if (!this->currentCommandBuffer()) {
701 return false;
702 }
703 SkASSERT(surface);
704 SkASSERT(transferBuffer);
705 if (fProtectedContext == GrProtected::kYes) {
706 return false;
707 }
708
709 GrVkImage* srcImage;
710 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
711 // Reading from render targets that wrap a secondary command buffer is not allowed since
712 // it would require us to know the VkImage, which we don't have, as well as need us to
713 // stop and start the VkRenderPass which we don't have access to.
714 if (rt->wrapsSecondaryCommandBuffer()) {
715 return false;
716 }
717 if (!rt->nonMSAAAttachment()) {
718 return false;
719 }
720 srcImage = rt->nonMSAAAttachment();
721 } else {
722 SkASSERT(surface->asTexture());
723 srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
724 }
725
726 VkFormat format = srcImage->imageFormat();
727 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
728 return false;
729 }
730 SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
731
732 // Set up copy region
733 VkBufferImageCopy region;
734 memset(®ion, 0, sizeof(VkBufferImageCopy));
735 region.bufferOffset = offset;
736 region.bufferRowLength = rect.width();
737 region.bufferImageHeight = 0;
738 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
739 region.imageOffset = {rect.left(), rect.top(), 0};
740 region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
741
742 srcImage->setImageLayout(this,
743 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
744 VK_ACCESS_TRANSFER_READ_BIT,
745 VK_PIPELINE_STAGE_TRANSFER_BIT,
746 false);
747
748 this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
749 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
750 transferBuffer, 1, ®ion);
751
752 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
753 // Make sure the copy to buffer has finished.
754 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
755 VK_ACCESS_HOST_READ_BIT,
756 VK_PIPELINE_STAGE_TRANSFER_BIT,
757 VK_PIPELINE_STAGE_HOST_BIT,
758 false);
759 return true;
760 }
761
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)762 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
763 const SkIPoint& dstPoint) {
764 if (!this->currentCommandBuffer()) {
765 return;
766 }
767
768 SkASSERT(dst);
769 SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
770
771 VkImageResolve resolveInfo;
772 resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
773 resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
774 resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
775 resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
776 resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
777
778 GrVkImage* dstImage;
779 GrRenderTarget* dstRT = dst->asRenderTarget();
780 GrTexture* dstTex = dst->asTexture();
781 if (dstTex) {
782 dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
783 } else {
784 SkASSERT(dst->asRenderTarget());
785 dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
786 }
787 SkASSERT(dstImage);
788
789 dstImage->setImageLayout(this,
790 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
791 VK_ACCESS_TRANSFER_WRITE_BIT,
792 VK_PIPELINE_STAGE_TRANSFER_BIT,
793 false);
794
795 src->colorAttachment()->setImageLayout(this,
796 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
797 VK_ACCESS_TRANSFER_READ_BIT,
798 VK_PIPELINE_STAGE_TRANSFER_BIT,
799 false);
800 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
801 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
802 this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
803 &resolveInfo);
804 }
805
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)806 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
807 SkASSERT(target->numSamples() > 1);
808 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
809 SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
810
811 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
812 // We would have resolved the RT during the render pass;
813 return;
814 }
815
816 this->resolveImage(target, rt, resolveRect,
817 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
818 }
819
uploadTexDataLinear(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const void * data,size_t rowBytes)820 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
821 SkIRect rect,
822 GrColorType dataColorType,
823 const void* data,
824 size_t rowBytes) {
825 SkASSERT(data);
826 SkASSERT(texImage->isLinearTiled());
827
828 SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
829
830 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
831 size_t trimRowBytes = rect.width() * bpp;
832
833 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
834 VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
835 const VkImageSubresource subres = {
836 VK_IMAGE_ASPECT_COLOR_BIT,
837 0, // mipLevel
838 0, // arraySlice
839 };
840 VkSubresourceLayout layout;
841
842 const skgpu::VulkanInterface* interface = this->vkInterface();
843
844 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
845 texImage->image(),
846 &subres,
847 &layout));
848
849 const skgpu::VulkanAlloc& alloc = texImage->alloc();
850 if (VK_NULL_HANDLE == alloc.fMemory) {
851 return false;
852 }
853 VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
854 VkDeviceSize size = rect.height()*layout.rowPitch;
855 SkASSERT(size + offset <= alloc.fSize);
856 auto checkResult = [this](VkResult result) {
857 return this->checkVkResult(result);
858 };
859 auto allocator = this->memoryAllocator();
860 void* mapPtr = skgpu::VulkanMemory::MapAlloc(allocator, alloc, checkResult);
861 if (!mapPtr) {
862 return false;
863 }
864 mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
865
866 SkRectMemcpy(mapPtr,
867 static_cast<size_t>(layout.rowPitch),
868 data,
869 rowBytes,
870 trimRowBytes,
871 rect.height());
872
873 skgpu::VulkanMemory::FlushMappedAlloc(allocator, alloc, offset, size, checkResult);
874 skgpu::VulkanMemory::UnmapAlloc(allocator, alloc);
875
876 return true;
877 }
878
879 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
880 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,TArray<VkBufferImageCopy> * regions,TArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkTextureCompressionType compression,VkFormat vkFormat,SkISize dimensions,skgpu::Mipmapped mipmapped)881 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
882 TArray<VkBufferImageCopy>* regions,
883 TArray<size_t>* individualMipOffsets,
884 GrStagingBufferManager::Slice* slice,
885 SkTextureCompressionType compression,
886 VkFormat vkFormat,
887 SkISize dimensions,
888 skgpu::Mipmapped mipmapped) {
889 SkASSERT(compression != SkTextureCompressionType::kNone);
890 int numMipLevels = 1;
891 if (mipmapped == skgpu::Mipmapped::kYes) {
892 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
893 }
894
895 regions->reserve_exact(regions->size() + numMipLevels);
896 individualMipOffsets->reserve_exact(individualMipOffsets->size() + numMipLevels);
897
898 size_t bytesPerBlock = skgpu::VkFormatBytesPerBlock(vkFormat);
899
900 size_t bufferSize = SkCompressedDataSize(
901 compression, dimensions, individualMipOffsets, mipmapped == skgpu::Mipmapped::kYes);
902 SkASSERT(individualMipOffsets->size() == numMipLevels);
903
904 // Get a staging buffer slice to hold our mip data.
905 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
906 size_t alignment = bytesPerBlock;
907 switch (alignment & 0b11) {
908 case 0: break; // alignment is already a multiple of 4.
909 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
910 default: alignment *= 4; break; // alignment is not a multiple of 2.
911 }
912 *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
913 if (!slice->fBuffer) {
914 return 0;
915 }
916
917 for (int i = 0; i < numMipLevels; ++i) {
918 VkBufferImageCopy& region = regions->push_back();
919 memset(®ion, 0, sizeof(VkBufferImageCopy));
920 region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
921 SkISize revisedDimensions = skgpu::CompressedDimensions(compression, dimensions);
922 region.bufferRowLength = revisedDimensions.width();
923 region.bufferImageHeight = revisedDimensions.height();
924 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
925 region.imageOffset = {0, 0, 0};
926 region.imageExtent = {SkToU32(dimensions.width()),
927 SkToU32(dimensions.height()), 1};
928
929 dimensions = {std::max(1, dimensions.width() /2),
930 std::max(1, dimensions.height()/2)};
931 }
932
933 return bufferSize;
934 }
935
uploadTexDataOptimal(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)936 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
937 SkIRect rect,
938 GrColorType dataColorType,
939 const GrMipLevel texels[],
940 int mipLevelCount) {
941 if (!this->currentCommandBuffer()) {
942 return false;
943 }
944
945 SkASSERT(!texImage->isLinearTiled());
946 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
947 SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
948
949 // We assume that if the texture has mip levels, we either upload to all the levels or just the
950 // first.
951 SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
952
953 SkASSERT(!rect.isEmpty());
954
955 SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
956
957 SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
958 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
959
960 // texels is const.
961 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
962 // Because of this we need to make a non-const shallow copy of texels.
963 AutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
964 std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
965
966 TArray<size_t> individualMipOffsets;
967 size_t combinedBufferSize;
968 if (mipLevelCount > 1) {
969 combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
970 rect.size(),
971 &individualMipOffsets,
972 mipLevelCount);
973 } else {
974 SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
975 combinedBufferSize = rect.width()*rect.height()*bpp;
976 individualMipOffsets.push_back(0);
977 }
978 SkASSERT(combinedBufferSize);
979
980 // Get a staging buffer slice to hold our mip data.
981 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
982 size_t alignment = bpp;
983 switch (alignment & 0b11) {
984 case 0: break; // alignment is already a multiple of 4.
985 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
986 default: alignment *= 4; break; // alignment is not a multiple of 2.
987 }
988 GrStagingBufferManager::Slice slice =
989 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
990 if (!slice.fBuffer) {
991 return false;
992 }
993
994 int uploadLeft = rect.left();
995 int uploadTop = rect.top();
996
997 char* buffer = (char*) slice.fOffsetMapPtr;
998 TArray<VkBufferImageCopy> regions(mipLevelCount);
999
1000 int currentWidth = rect.width();
1001 int currentHeight = rect.height();
1002 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1003 if (texelsShallowCopy[currentMipLevel].fPixels) {
1004 const size_t trimRowBytes = currentWidth * bpp;
1005 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1006
1007 // copy data into the buffer, skipping the trailing bytes
1008 char* dst = buffer + individualMipOffsets[currentMipLevel];
1009 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1010 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
1011
1012 VkBufferImageCopy& region = regions.push_back();
1013 memset(®ion, 0, sizeof(VkBufferImageCopy));
1014 region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1015 region.bufferRowLength = currentWidth;
1016 region.bufferImageHeight = currentHeight;
1017 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1018 region.imageOffset = {uploadLeft, uploadTop, 0};
1019 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1020 }
1021
1022 currentWidth = std::max(1, currentWidth/2);
1023 currentHeight = std::max(1, currentHeight/2);
1024 }
1025
1026 // Change layout of our target so it can be copied to
1027 texImage->setImageLayout(this,
1028 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1029 VK_ACCESS_TRANSFER_WRITE_BIT,
1030 VK_PIPELINE_STAGE_TRANSFER_BIT,
1031 false);
1032
1033 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1034 // because we don't need the command buffer to ref the buffer here. The reason being is that
1035 // the buffer is coming from the staging manager and the staging manager will make sure the
1036 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1037 // upload in the frame.
1038 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1039 this->currentCommandBuffer()->copyBufferToImage(this,
1040 vkBuffer->vkBuffer(),
1041 texImage,
1042 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1043 regions.size(),
1044 regions.begin());
1045 return true;
1046 }
1047
1048 // It's probably possible to roll this into uploadTexDataOptimal,
1049 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage * uploadTexture,SkTextureCompressionType compression,VkFormat vkFormat,SkISize dimensions,skgpu::Mipmapped mipmapped,const void * data,size_t dataSize)1050 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1051 SkTextureCompressionType compression,
1052 VkFormat vkFormat,
1053 SkISize dimensions,
1054 skgpu::Mipmapped mipmapped,
1055 const void* data,
1056 size_t dataSize) {
1057 if (!this->currentCommandBuffer()) {
1058 return false;
1059 }
1060 SkASSERT(data);
1061 SkASSERT(!uploadTexture->isLinearTiled());
1062 // For now the assumption is that our rect is the entire texture.
1063 // Compressed textures are read-only so this should be a reasonable assumption.
1064 SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1065 dimensions.fHeight == uploadTexture->height());
1066
1067 if (dimensions.fWidth == 0 || dimensions.fHeight == 0) {
1068 return false;
1069 }
1070
1071 SkASSERT(uploadTexture->imageFormat() == vkFormat);
1072 SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1073
1074
1075 GrStagingBufferManager::Slice slice;
1076 TArray<VkBufferImageCopy> regions;
1077 TArray<size_t> individualMipOffsets;
1078 SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1079 ®ions,
1080 &individualMipOffsets,
1081 &slice,
1082 compression,
1083 vkFormat,
1084 dimensions,
1085 mipmapped);
1086 if (!slice.fBuffer) {
1087 return false;
1088 }
1089 SkASSERT(dataSize == combinedBufferSize);
1090
1091 {
1092 char* buffer = (char*)slice.fOffsetMapPtr;
1093 memcpy(buffer, data, dataSize);
1094 }
1095
1096 // Change layout of our target so it can be copied to
1097 uploadTexture->setImageLayout(this,
1098 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1099 VK_ACCESS_TRANSFER_WRITE_BIT,
1100 VK_PIPELINE_STAGE_TRANSFER_BIT,
1101 false);
1102
1103 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1104 // because we don't need the command buffer to ref the buffer here. The reason being is that
1105 // the buffer is coming from the staging manager and the staging manager will make sure the
1106 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1107 // upload in the frame.
1108 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1109 this->currentCommandBuffer()->copyBufferToImage(this,
1110 vkBuffer->vkBuffer(),
1111 uploadTexture,
1112 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1113 regions.size(),
1114 regions.begin());
1115
1116 return true;
1117 }
1118
1119 ////////////////////////////////////////////////////////////////////////////////
1120 // TODO: make this take a skgpu::Mipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)1121 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1122 const GrBackendFormat& format,
1123 GrRenderable renderable,
1124 int renderTargetSampleCnt,
1125 skgpu::Budgeted budgeted,
1126 GrProtected isProtected,
1127 int mipLevelCount,
1128 uint32_t levelClearMask,
1129 std::string_view label) {
1130 VkFormat pixelFormat;
1131 SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1132 SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1133 SkASSERT(mipLevelCount > 0);
1134
1135 GrMipmapStatus mipmapStatus =
1136 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1137
1138 sk_sp<GrVkTexture> tex;
1139 if (renderable == GrRenderable::kYes) {
1140 tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1141 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1142 mipmapStatus, isProtected, label);
1143 } else {
1144 tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1145 mipLevelCount, isProtected, mipmapStatus, label);
1146 }
1147
1148 if (!tex) {
1149 return nullptr;
1150 }
1151
1152 if (levelClearMask) {
1153 if (!this->currentCommandBuffer()) {
1154 return nullptr;
1155 }
1156 STArray<1, VkImageSubresourceRange> ranges;
1157 bool inRange = false;
1158 GrVkImage* texImage = tex->textureImage();
1159 for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1160 if (levelClearMask & (1U << i)) {
1161 if (inRange) {
1162 ranges.back().levelCount++;
1163 } else {
1164 auto& range = ranges.push_back();
1165 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1166 range.baseArrayLayer = 0;
1167 range.baseMipLevel = i;
1168 range.layerCount = 1;
1169 range.levelCount = 1;
1170 inRange = true;
1171 }
1172 } else if (inRange) {
1173 inRange = false;
1174 }
1175 }
1176 SkASSERT(!ranges.empty());
1177 static constexpr VkClearColorValue kZeroClearColor = {};
1178 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1179 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1180 this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1181 ranges.size(), ranges.begin());
1182 }
1183 return tex;
1184 }
1185
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)1186 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1187 const GrBackendFormat& format,
1188 skgpu::Budgeted budgeted,
1189 skgpu::Mipmapped mipmapped,
1190 GrProtected isProtected,
1191 const void* data,
1192 size_t dataSize) {
1193 VkFormat pixelFormat;
1194 SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1195 SkASSERT(skgpu::VkFormatIsCompressed(pixelFormat));
1196
1197 int numMipLevels = 1;
1198 if (mipmapped == skgpu::Mipmapped::kYes) {
1199 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1200 }
1201
1202 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
1203 ? GrMipmapStatus::kValid
1204 : GrMipmapStatus::kNotAllocated;
1205
1206 auto tex = GrVkTexture::MakeNewTexture(this,
1207 budgeted,
1208 dimensions,
1209 pixelFormat,
1210 numMipLevels,
1211 isProtected,
1212 mipmapStatus,
1213 /*label=*/"VkGpu_CreateCompressedTexture");
1214 if (!tex) {
1215 return nullptr;
1216 }
1217
1218 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1219 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1220 dimensions, mipmapped, data, dataSize)) {
1221 return nullptr;
1222 }
1223
1224 return tex;
1225 }
1226
1227 ////////////////////////////////////////////////////////////////////////////////
1228
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1229 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1230 VkDeviceSize offset, VkDeviceSize size) {
1231 if (!this->currentCommandBuffer()) {
1232 return false;
1233 }
1234 add_transfer_dst_buffer_mem_barrier(this,
1235 static_cast<GrVkBuffer*>(buffer.get()),
1236 offset,
1237 size,
1238 /*after=*/false);
1239 this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1240 add_transfer_dst_buffer_mem_barrier(this,
1241 static_cast<GrVkBuffer*>(buffer.get()),
1242 offset,
1243 size,
1244 /*after=*/true);
1245
1246 return true;
1247 }
1248
zeroBuffer(sk_sp<GrGpuBuffer> buffer)1249 bool GrVkGpu::zeroBuffer(sk_sp<GrGpuBuffer> buffer) {
1250 if (!this->currentCommandBuffer()) {
1251 return false;
1252 }
1253
1254 add_transfer_dst_buffer_mem_barrier(this,
1255 static_cast<GrVkBuffer*>(buffer.get()),
1256 /*offset=*/0,
1257 buffer->size(),
1258 /*after=*/false);
1259 this->currentCommandBuffer()->fillBuffer(this,
1260 buffer,
1261 /*offset=*/0,
1262 buffer->size(),
1263 /*data=*/0);
1264 add_transfer_dst_buffer_mem_barrier(this,
1265 static_cast<GrVkBuffer*>(buffer.get()),
1266 /*offset=*/0,
1267 buffer->size(),
1268 /*after=*/true);
1269
1270 return true;
1271 }
1272
1273 ////////////////////////////////////////////////////////////////////////////////
1274
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1275 static bool check_image_info(const GrVkCaps& caps,
1276 const GrVkImageInfo& info,
1277 bool needsAllocation,
1278 uint32_t graphicsQueueIndex) {
1279 if (VK_NULL_HANDLE == info.fImage) {
1280 return false;
1281 }
1282
1283 if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1284 return false;
1285 }
1286
1287 if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1288 return false;
1289 }
1290
1291 if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1292 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1293 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1294 if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1295 if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1296 return false;
1297 }
1298 } else {
1299 return false;
1300 }
1301 }
1302
1303 if (info.fYcbcrConversionInfo.isValid()) {
1304 if (!caps.supportsYcbcrConversion()) {
1305 return false;
1306 }
1307 if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1308 return true;
1309 }
1310 }
1311
1312 // We currently require everything to be made with transfer bits set
1313 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1314 !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1315 return false;
1316 }
1317
1318 return true;
1319 }
1320
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1321 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1322 // We don't support directly importing multisampled textures for sampling from shaders.
1323 if (info.fSampleCount != 1) {
1324 return false;
1325 }
1326
1327 if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1328 return true;
1329 }
1330 if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1331 if (!caps.isVkFormatTexturable(info.fFormat)) {
1332 return false;
1333 }
1334 } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1335 if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1336 return false;
1337 }
1338 } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1339 if (!caps.supportsDRMFormatModifiers()) {
1340 return false;
1341 }
1342 // To be technically correct we should query the vulkan support for VkFormat and
1343 // drmFormatModifier pairs to confirm the required feature support is there. However, we
1344 // currently don't have our caps and format tables set up to do this effeciently. So
1345 // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1346 // up using valid features (checked below). In practice this should all be safe because
1347 // currently we are setting all drm format modifier textures to have a
1348 // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1349 // a shader. The video decoder isn't going to give us VkImages that don't support being
1350 // sampled.
1351 } else {
1352 SkUNREACHABLE;
1353 }
1354
1355 // We currently require all textures to be made with sample support
1356 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1357 return false;
1358 }
1359
1360 return true;
1361 }
1362
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1363 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1364 if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1365 return false;
1366 }
1367 if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1368 return false;
1369 }
1370 return true;
1371 }
1372
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1373 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1374 GrWrapOwnership ownership,
1375 GrWrapCacheable cacheable,
1376 GrIOType ioType) {
1377 GrVkImageInfo imageInfo;
1378 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1379 return nullptr;
1380 }
1381
1382 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1383 this->queueIndex())) {
1384 return nullptr;
1385 }
1386
1387 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1388 return nullptr;
1389 }
1390
1391 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1392 return nullptr;
1393 }
1394
1395 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1396 SkASSERT(mutableState);
1397 return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1398 ioType, imageInfo, std::move(mutableState));
1399 }
1400
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1401 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1402 GrWrapOwnership ownership,
1403 GrWrapCacheable cacheable) {
1404 return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1405 }
1406
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1407 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1408 int sampleCnt,
1409 GrWrapOwnership ownership,
1410 GrWrapCacheable cacheable) {
1411 GrVkImageInfo imageInfo;
1412 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1413 return nullptr;
1414 }
1415
1416 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1417 this->queueIndex())) {
1418 return nullptr;
1419 }
1420
1421 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1422 return nullptr;
1423 }
1424 // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1425 // the wrapped VkImage.
1426 bool resolveOnly = sampleCnt > 1;
1427 if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1428 return nullptr;
1429 }
1430
1431 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1432 return nullptr;
1433 }
1434
1435 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1436
1437 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1438 SkASSERT(mutableState);
1439
1440 return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1441 sampleCnt, ownership, cacheable,
1442 imageInfo,
1443 std::move(mutableState));
1444 }
1445
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1446 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1447 GrVkImageInfo info;
1448 if (!GrBackendRenderTargets::GetVkImageInfo(backendRT, &info)) {
1449 return nullptr;
1450 }
1451
1452 if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1453 return nullptr;
1454 }
1455
1456 // We will always render directly to this VkImage.
1457 static bool kResolveOnly = false;
1458 if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1459 return nullptr;
1460 }
1461
1462 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1463 return nullptr;
1464 }
1465
1466 sk_sp<skgpu::MutableTextureState> mutableState = backendRT.getMutableState();
1467 SkASSERT(mutableState);
1468
1469 sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1470 this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1471
1472 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1473 SkASSERT(!backendRT.stencilBits());
1474 if (tgt) {
1475 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1476 }
1477
1478 return tgt;
1479 }
1480
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1481 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1482 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1483 int maxSize = this->caps()->maxTextureSize();
1484 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1485 return nullptr;
1486 }
1487
1488 GrBackendFormat backendFormat = GrBackendFormats::MakeVk(vkInfo.fFormat);
1489 if (!backendFormat.isValid()) {
1490 return nullptr;
1491 }
1492 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1493 if (!sampleCnt) {
1494 return nullptr;
1495 }
1496
1497 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1498 }
1499
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkImage * src,const SkIRect & srcRect)1500 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1501 const GrVkRenderPass& renderPass,
1502 GrAttachment* dst,
1503 GrVkImage* src,
1504 const SkIRect& srcRect) {
1505 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1506 }
1507
onRegenerateMipMapLevels(GrTexture * tex)1508 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1509 if (!this->currentCommandBuffer()) {
1510 return false;
1511 }
1512 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1513 // don't do anything for linearly tiled textures (can't have mipmaps)
1514 if (vkTex->isLinearTiled()) {
1515 SkDebugf("Trying to create mipmap for linear tiled texture");
1516 return false;
1517 }
1518 SkASSERT(tex->textureType() == GrTextureType::k2D);
1519
1520 // determine if we can blit to and from this format
1521 const GrVkCaps& caps = this->vkCaps();
1522 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1523 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1524 !caps.mipmapSupport()) {
1525 return false;
1526 }
1527
1528 int width = tex->width();
1529 int height = tex->height();
1530 VkImageBlit blitRegion;
1531 memset(&blitRegion, 0, sizeof(VkImageBlit));
1532
1533 // SkMipmap doesn't include the base level in the level count so we have to add 1
1534 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1535 SkASSERT(levelCount == vkTex->mipLevels());
1536
1537 // change layout of the layers so we can write to them.
1538 vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1539 VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1540
1541 // setup memory barrier
1542 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1543 VkImageMemoryBarrier imageMemoryBarrier = {
1544 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1545 nullptr, // pNext
1546 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1547 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1548 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout
1549 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1550 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1551 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1552 vkTex->image(), // image
1553 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1554 };
1555
1556 // Blit the miplevels
1557 uint32_t mipLevel = 1;
1558 while (mipLevel < levelCount) {
1559 int prevWidth = width;
1560 int prevHeight = height;
1561 width = std::max(1, width / 2);
1562 height = std::max(1, height / 2);
1563
1564 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1565 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1566 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1567
1568 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1569 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1570 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1571 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1572 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1573 blitRegion.dstOffsets[1] = { width, height, 1 };
1574 this->currentCommandBuffer()->blitImage(this,
1575 vkTex->resource(),
1576 vkTex->image(),
1577 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1578 vkTex->resource(),
1579 vkTex->image(),
1580 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1581 1,
1582 &blitRegion,
1583 VK_FILTER_LINEAR);
1584 ++mipLevel;
1585 }
1586 if (levelCount > 1) {
1587 // This barrier logically is not needed, but it changes the final level to the same layout
1588 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1589 // layouts and future layout changes easier. The alternative here would be to track layout
1590 // and memory accesses per layer which doesn't seem work it.
1591 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1592 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1593 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1594 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1595 }
1596 return true;
1597 }
1598
1599 ////////////////////////////////////////////////////////////////////////////////
1600
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1601 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1602 SkISize dimensions, int numStencilSamples) {
1603 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1604
1605 fStats.incStencilAttachmentCreates();
1606 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1607 }
1608
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless memoryless)1609 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1610 const GrBackendFormat& format,
1611 int numSamples,
1612 GrProtected isProtected,
1613 GrMemoryless memoryless) {
1614 VkFormat pixelFormat;
1615 SkAssertResult(GrBackendFormats::AsVkFormat(format, &pixelFormat));
1616 SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1617 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1618
1619 fStats.incMSAAAttachmentCreates();
1620 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1621 }
1622
1623 ////////////////////////////////////////////////////////////////////////////////
1624
copy_src_data(char * mapPtr,VkFormat vkFormat,const TArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1625 bool copy_src_data(char* mapPtr,
1626 VkFormat vkFormat,
1627 const TArray<size_t>& individualMipOffsets,
1628 const GrPixmap srcData[],
1629 int numMipLevels) {
1630 SkASSERT(srcData && numMipLevels);
1631 SkASSERT(!skgpu::VkFormatIsCompressed(vkFormat));
1632 SkASSERT(individualMipOffsets.size() == numMipLevels);
1633 SkASSERT(mapPtr);
1634
1635 size_t bytesPerPixel = skgpu::VkFormatBytesPerBlock(vkFormat);
1636
1637 for (int level = 0; level < numMipLevels; ++level) {
1638 const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1639
1640 SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1641 srcData[level].addr(), srcData[level].rowBytes(),
1642 trimRB, srcData[level].height());
1643 }
1644 return true;
1645 }
1646
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrVkImageInfo * info,GrProtected isProtected)1647 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1648 SkISize dimensions,
1649 int sampleCnt,
1650 GrTexturable texturable,
1651 GrRenderable renderable,
1652 skgpu::Mipmapped mipmapped,
1653 GrVkImageInfo* info,
1654 GrProtected isProtected) {
1655 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1656
1657 if (fProtectedContext != isProtected) {
1658 return false;
1659 }
1660
1661 if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1662 return false;
1663 }
1664
1665 // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1666 if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1667 return false;
1668 }
1669
1670 if (renderable == GrRenderable::kYes) {
1671 sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1672 if (!sampleCnt) {
1673 return false;
1674 }
1675 }
1676
1677
1678 int numMipLevels = 1;
1679 if (mipmapped == skgpu::Mipmapped::kYes) {
1680 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1681 }
1682
1683 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1684 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1685 if (texturable == GrTexturable::kYes) {
1686 usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1687 }
1688 if (renderable == GrRenderable::kYes) {
1689 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1690 // We always make our render targets support being used as input attachments
1691 usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1692 }
1693
1694 GrVkImage::ImageDesc imageDesc;
1695 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1696 imageDesc.fFormat = vkFormat;
1697 imageDesc.fWidth = dimensions.width();
1698 imageDesc.fHeight = dimensions.height();
1699 imageDesc.fLevels = numMipLevels;
1700 imageDesc.fSamples = sampleCnt;
1701 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1702 imageDesc.fUsageFlags = usageFlags;
1703 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1704 imageDesc.fIsProtected = fProtectedContext;
1705
1706 if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1707 SkDebugf("Failed to init image info\n");
1708 return false;
1709 }
1710
1711 return true;
1712 }
1713
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)1714 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1715 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1716 std::array<float, 4> color) {
1717 GrVkImageInfo info;
1718 SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTexture, &info));
1719
1720 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1721 SkASSERT(mutableState);
1722 sk_sp<GrVkTexture> texture =
1723 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1724 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1725 kRW_GrIOType, info, std::move(mutableState));
1726 if (!texture) {
1727 return false;
1728 }
1729 GrVkImage* texImage = texture->textureImage();
1730
1731 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1732 if (!cmdBuffer) {
1733 return false;
1734 }
1735
1736 texImage->setImageLayout(this,
1737 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1738 VK_ACCESS_TRANSFER_WRITE_BIT,
1739 VK_PIPELINE_STAGE_TRANSFER_BIT,
1740 false);
1741
1742 // CmdClearColorImage doesn't work for compressed formats
1743 SkASSERT(!skgpu::VkFormatIsCompressed(info.fFormat));
1744
1745 VkClearColorValue vkColor;
1746 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1747 // uint32 union members in those cases.
1748 vkColor.float32[0] = color[0];
1749 vkColor.float32[1] = color[1];
1750 vkColor.float32[2] = color[2];
1751 vkColor.float32[3] = color[3];
1752 VkImageSubresourceRange range;
1753 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1754 range.baseArrayLayer = 0;
1755 range.baseMipLevel = 0;
1756 range.layerCount = 1;
1757 range.levelCount = info.fLevelCount;
1758 cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1759
1760 // Change image layout to shader read since if we use this texture as a borrowed
1761 // texture within Ganesh we require that its layout be set to that
1762 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1763 VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1764 false);
1765
1766 if (finishedCallback) {
1767 this->addFinishedCallback(std::move(finishedCallback));
1768 }
1769 return true;
1770 }
1771
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)1772 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1773 const GrBackendFormat& format,
1774 GrRenderable renderable,
1775 skgpu::Mipmapped mipmapped,
1776 GrProtected isProtected,
1777 std::string_view label) {
1778 const GrVkCaps& caps = this->vkCaps();
1779
1780 if (fProtectedContext != isProtected) {
1781 return {};
1782 }
1783
1784 VkFormat vkFormat;
1785 if (!GrBackendFormats::AsVkFormat(format, &vkFormat)) {
1786 return {};
1787 }
1788
1789 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1790 if (!caps.isVkFormatTexturable(vkFormat)) {
1791 return {};
1792 }
1793
1794 if (skgpu::VkFormatNeedsYcbcrSampler(vkFormat)) {
1795 return {};
1796 }
1797
1798 GrVkImageInfo info;
1799 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1800 renderable, mipmapped, &info, isProtected)) {
1801 return {};
1802 }
1803
1804 return GrBackendTextures::MakeVk(dimensions.width(), dimensions.height(), info);
1805 }
1806
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)1807 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1808 const GrBackendFormat& format,
1809 skgpu::Mipmapped mipmapped,
1810 GrProtected isProtected) {
1811 return this->onCreateBackendTexture(dimensions,
1812 format,
1813 GrRenderable::kNo,
1814 mipmapped,
1815 isProtected,
1816 /*label=*/"VkGpu_CreateCompressedBackendTexture");
1817 }
1818
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)1819 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1820 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1821 const void* data,
1822 size_t size) {
1823 GrVkImageInfo info;
1824 SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTexture, &info));
1825
1826 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1827 SkASSERT(mutableState);
1828 sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1829 backendTexture.dimensions(),
1830 kBorrow_GrWrapOwnership,
1831 GrWrapCacheable::kNo,
1832 kRW_GrIOType,
1833 info,
1834 std::move(mutableState));
1835 if (!texture) {
1836 return false;
1837 }
1838
1839 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1840 if (!cmdBuffer) {
1841 return false;
1842 }
1843 GrVkImage* image = texture->textureImage();
1844 image->setImageLayout(this,
1845 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1846 VK_ACCESS_TRANSFER_WRITE_BIT,
1847 VK_PIPELINE_STAGE_TRANSFER_BIT,
1848 false);
1849
1850 SkTextureCompressionType compression =
1851 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1852
1853 TArray<VkBufferImageCopy> regions;
1854 TArray<size_t> individualMipOffsets;
1855 GrStagingBufferManager::Slice slice;
1856
1857 fill_in_compressed_regions(&fStagingBufferManager,
1858 ®ions,
1859 &individualMipOffsets,
1860 &slice,
1861 compression,
1862 info.fFormat,
1863 backendTexture.dimensions(),
1864 backendTexture.fMipmapped);
1865
1866 if (!slice.fBuffer) {
1867 return false;
1868 }
1869
1870 memcpy(slice.fOffsetMapPtr, data, size);
1871
1872 cmdBuffer->addGrSurface(texture);
1873 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1874 // because we don't need the command buffer to ref the buffer here. The reason being is that
1875 // the buffer is coming from the staging manager and the staging manager will make sure the
1876 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1877 // every upload in the frame.
1878 cmdBuffer->copyBufferToImage(this,
1879 static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1880 image,
1881 image->currentLayout(),
1882 regions.size(),
1883 regions.begin());
1884
1885 // Change image layout to shader read since if we use this texture as a borrowed
1886 // texture within Ganesh we require that its layout be set to that
1887 image->setImageLayout(this,
1888 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1889 VK_ACCESS_SHADER_READ_BIT,
1890 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1891 false);
1892
1893 if (finishedCallback) {
1894 this->addFinishedCallback(std::move(finishedCallback));
1895 }
1896 return true;
1897 }
1898
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,VkImageLayout newLayout,uint32_t newQueueFamilyIndex)1899 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1900 VkImageLayout newLayout,
1901 uint32_t newQueueFamilyIndex) {
1902 // Even though internally we use this helper for getting src access flags and stages they
1903 // can also be used for general dst flags since we don't know exactly what the client
1904 // plans on using the image for.
1905 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1906 newLayout = image->currentLayout();
1907 }
1908 VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1909 VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1910
1911 uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1912 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1913 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1914 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1915 };
1916 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1917 // It is illegal to have both the new and old queue be special queue families (i.e. external
1918 // or foreign).
1919 return;
1920 }
1921
1922 image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1923 newQueueFamilyIndex);
1924 }
1925
setBackendSurfaceState(GrVkImageInfo info,sk_sp<skgpu::MutableTextureState> currentState,SkISize dimensions,VkImageLayout newLayout,uint32_t newQueueFamilyIndex,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1926 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1927 sk_sp<skgpu::MutableTextureState> currentState,
1928 SkISize dimensions,
1929 VkImageLayout newLayout,
1930 uint32_t newQueueFamilyIndex,
1931 skgpu::MutableTextureState* previousState,
1932 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1933 sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
1934 dimensions,
1935 info,
1936 std::move(currentState),
1937 GrVkImage::UsageFlags::kColorAttachment,
1938 kBorrow_GrWrapOwnership,
1939 GrWrapCacheable::kNo,
1940 "VkGpu_SetBackendSurfaceState",
1941 /*forSecondaryCB=*/false);
1942 SkASSERT(texture);
1943 if (!texture) {
1944 return false;
1945 }
1946 if (previousState) {
1947 previousState->set(*texture->getMutableState());
1948 }
1949 set_layout_and_queue_from_mutable_state(this, texture.get(), newLayout, newQueueFamilyIndex);
1950 if (finishedCallback) {
1951 this->addFinishedCallback(std::move(finishedCallback));
1952 }
1953 return true;
1954 }
1955
setBackendTextureState(const GrBackendTexture & backendTeture,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1956 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1957 const skgpu::MutableTextureState& newState,
1958 skgpu::MutableTextureState* previousState,
1959 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1960 GrVkImageInfo info;
1961 SkAssertResult(GrBackendTextures::GetVkImageInfo(backendTeture, &info));
1962 sk_sp<skgpu::MutableTextureState> currentState = backendTeture.getMutableState();
1963 SkASSERT(currentState);
1964 SkASSERT(newState.isValid() && newState.backend() == skgpu::BackendApi::kVulkan);
1965 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1966 skgpu::MutableTextureStates::GetVkImageLayout(newState),
1967 skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState),
1968 previousState,
1969 std::move(finishedCallback));
1970 }
1971
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1972 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1973 const skgpu::MutableTextureState& newState,
1974 skgpu::MutableTextureState* previousState,
1975 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1976 GrVkImageInfo info;
1977 SkAssertResult(GrBackendRenderTargets::GetVkImageInfo(backendRenderTarget, &info));
1978 sk_sp<skgpu::MutableTextureState> currentState = backendRenderTarget.getMutableState();
1979 SkASSERT(currentState);
1980 SkASSERT(newState.backend() == skgpu::BackendApi::kVulkan);
1981 return this->setBackendSurfaceState(info, std::move(currentState),
1982 backendRenderTarget.dimensions(),
1983 skgpu::MutableTextureStates::GetVkImageLayout(newState),
1984 skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState),
1985 previousState, std::move(finishedCallback));
1986 }
1987
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)1988 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
1989 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1990 VkPipelineStageFlags dstStage;
1991 VkAccessFlags dstAccess;
1992 if (barrierType == kBlend_GrXferBarrierType) {
1993 dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1994 dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
1995 } else {
1996 SkASSERT(barrierType == kTexture_GrXferBarrierType);
1997 dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1998 dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
1999 }
2000 GrVkImage* image = vkRT->colorAttachment();
2001 VkImageMemoryBarrier barrier;
2002 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
2003 barrier.pNext = nullptr;
2004 barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
2005 barrier.dstAccessMask = dstAccess;
2006 barrier.oldLayout = image->currentLayout();
2007 barrier.newLayout = barrier.oldLayout;
2008 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2009 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2010 barrier.image = image->image();
2011 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2012 this->addImageMemoryBarrier(image->resource(),
2013 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2014 dstStage, true, &barrier);
2015 }
2016
deleteBackendTexture(const GrBackendTexture & tex)2017 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
2018 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2019
2020 GrVkImageInfo info;
2021 if (GrBackendTextures::GetVkImageInfo(tex, &info)) {
2022 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2023 }
2024 }
2025
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)2026 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2027 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2028 GrVkRenderPass::AttachmentFlags attachmentFlags;
2029 GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
2030 &attachmentsDescriptor, &attachmentFlags);
2031
2032 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
2033 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2034 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
2035 }
2036 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2037 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
2038 }
2039
2040 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
2041 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2042 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2043 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
2044 }
2045 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2046 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2047 if (!renderPass) {
2048 return false;
2049 }
2050
2051 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
2052
2053 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2054 desc,
2055 programInfo,
2056 renderPass->vkRenderPass(),
2057 &stat);
2058 if (!pipelineState) {
2059 return false;
2060 }
2061
2062 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
2063 }
2064
2065 #if defined(GR_TEST_UTILS)
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const2066 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2067 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2068
2069 GrVkImageInfo backend;
2070 if (!GrBackendTextures::GetVkImageInfo(tex, &backend)) {
2071 return false;
2072 }
2073
2074 if (backend.fImage && backend.fAlloc.fMemory) {
2075 VkMemoryRequirements req;
2076 memset(&req, 0, sizeof(req));
2077 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2078 backend.fImage,
2079 &req));
2080 // TODO: find a better check
2081 // This will probably fail with a different driver
2082 return (req.size > 0) && (req.size <= 8192 * 8192);
2083 }
2084
2085 return false;
2086 }
2087
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)2088 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2089 GrColorType ct,
2090 int sampleCnt,
2091 GrProtected isProtected) {
2092 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
2093 dimensions.height() > this->caps()->maxRenderTargetSize()) {
2094 return {};
2095 }
2096
2097 VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2098
2099 GrVkImageInfo info;
2100 if (!this->createVkImageForBackendSurface(vkFormat,
2101 dimensions,
2102 sampleCnt,
2103 GrTexturable::kNo,
2104 GrRenderable::kYes,
2105 skgpu::Mipmapped::kNo,
2106 &info,
2107 isProtected)) {
2108 return {};
2109 }
2110 return GrBackendRenderTargets::MakeVk(dimensions.width(), dimensions.height(), info);
2111 }
2112
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)2113 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2114 SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2115
2116 GrVkImageInfo info;
2117 if (GrBackendRenderTargets::GetVkImageInfo(rt, &info)) {
2118 // something in the command buffer may still be using this, so force submit
2119 SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2120 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2121 }
2122 }
2123 #endif
2124
2125 ////////////////////////////////////////////////////////////////////////////////
2126
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2127 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2128 VkPipelineStageFlags srcStageMask,
2129 VkPipelineStageFlags dstStageMask,
2130 bool byRegion,
2131 VkBufferMemoryBarrier* barrier) const {
2132 if (!this->currentCommandBuffer()) {
2133 return;
2134 }
2135 SkASSERT(resource);
2136 this->currentCommandBuffer()->pipelineBarrier(this,
2137 resource,
2138 srcStageMask,
2139 dstStageMask,
2140 byRegion,
2141 GrVkCommandBuffer::kBufferMemory_BarrierType,
2142 barrier);
2143 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2144 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2145 VkPipelineStageFlags dstStageMask,
2146 bool byRegion,
2147 VkBufferMemoryBarrier* barrier) const {
2148 if (!this->currentCommandBuffer()) {
2149 return;
2150 }
2151 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2152 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2153 // command with the buffer on the command buffer. Thus those other commands will already cause
2154 // the command buffer to be holding a ref to the buffer.
2155 this->currentCommandBuffer()->pipelineBarrier(this,
2156 /*resource=*/nullptr,
2157 srcStageMask,
2158 dstStageMask,
2159 byRegion,
2160 GrVkCommandBuffer::kBufferMemory_BarrierType,
2161 barrier);
2162 }
2163
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2164 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2165 VkPipelineStageFlags srcStageMask,
2166 VkPipelineStageFlags dstStageMask,
2167 bool byRegion,
2168 VkImageMemoryBarrier* barrier) const {
2169 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2170 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2171 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2172 // have a current command buffer. Thus we won't do the queue transfer.
2173 if (!this->currentCommandBuffer()) {
2174 return;
2175 }
2176 SkASSERT(resource);
2177 this->currentCommandBuffer()->pipelineBarrier(this,
2178 resource,
2179 srcStageMask,
2180 dstStageMask,
2181 byRegion,
2182 GrVkCommandBuffer::kImageMemory_BarrierType,
2183 barrier);
2184 }
2185
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)2186 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2187 SkSpan<GrSurfaceProxy*> proxies,
2188 SkSurfaces::BackendSurfaceAccess access,
2189 const skgpu::MutableTextureState* newState) {
2190 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2191 // not effect what we do here.
2192 if (!proxies.empty() && (access == SkSurfaces::BackendSurfaceAccess::kPresent || newState)) {
2193 // We currently don't support passing in new surface state for multiple proxies here. The
2194 // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2195 // state updates anyways. Additionally if we have a newState than we must not have any
2196 // BackendSurfaceAccess.
2197 SkASSERT(!newState || proxies.size() == 1);
2198 SkASSERT(!newState || access == SkSurfaces::BackendSurfaceAccess::kNoAccess);
2199 GrVkImage* image;
2200 for (GrSurfaceProxy* proxy : proxies) {
2201 SkASSERT(proxy->isInstantiated());
2202 if (GrTexture* tex = proxy->peekTexture()) {
2203 image = static_cast<GrVkTexture*>(tex)->textureImage();
2204 } else {
2205 GrRenderTarget* rt = proxy->peekRenderTarget();
2206 SkASSERT(rt);
2207 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2208 image = vkRT->externalAttachment();
2209 }
2210 if (newState) {
2211 VkImageLayout newLayout =
2212 skgpu::MutableTextureStates::GetVkImageLayout(newState);
2213 uint32_t newIndex =
2214 skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState);
2215 set_layout_and_queue_from_mutable_state(this, image, newLayout, newIndex);
2216 } else {
2217 SkASSERT(access == SkSurfaces::BackendSurfaceAccess::kPresent);
2218 image->prepareForPresent(this);
2219 }
2220 }
2221 }
2222 }
2223
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)2224 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2225 GrGpuFinishedContext finishedContext) {
2226 SkASSERT(finishedProc);
2227 this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
2228 }
2229
addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback)2230 void GrVkGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2231 SkASSERT(finishedCallback);
2232 fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2233 }
2234
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2235 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2236 this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2237 }
2238
onSubmitToGpu(GrSyncCpu sync)2239 bool GrVkGpu::onSubmitToGpu(GrSyncCpu sync) {
2240 if (sync == GrSyncCpu::kYes) {
2241 return this->submitCommandBuffer(kForce_SyncQueue);
2242 } else {
2243 return this->submitCommandBuffer(kSkip_SyncQueue);
2244 }
2245 }
2246
finishOutstandingGpuWork()2247 void GrVkGpu::finishOutstandingGpuWork() {
2248 VK_CALL(QueueWaitIdle(fQueue));
2249
2250 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2251 fResourceProvider.forceSyncAllCommandBuffers();
2252 }
2253 }
2254
onReportSubmitHistograms()2255 void GrVkGpu::onReportSubmitHistograms() {
2256 #if SK_HISTOGRAMS_ENABLED
2257 uint64_t allocatedMemory = 0, usedMemory = 0;
2258 std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory();
2259 SkASSERT(usedMemory <= allocatedMemory);
2260 if (allocatedMemory > 0) {
2261 SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2262 (usedMemory * 100) / allocatedMemory);
2263 }
2264 // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2265 // supports samples up to around 500MB which should support the amounts of memory we allocate.
2266 SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2267 #endif // SK_HISTOGRAMS_ENABLED
2268 }
2269
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2270 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2271 GrSurface* src,
2272 GrVkImage* dstImage,
2273 GrVkImage* srcImage,
2274 const SkIRect& srcRect,
2275 const SkIPoint& dstPoint) {
2276 if (!this->currentCommandBuffer()) {
2277 return;
2278 }
2279
2280 #ifdef SK_DEBUG
2281 int dstSampleCnt = dstImage->numSamples();
2282 int srcSampleCnt = srcImage->numSamples();
2283 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2284 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2285 VkFormat dstFormat = dstImage->imageFormat();
2286 VkFormat srcFormat;
2287 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2288 SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2289 srcFormat, srcSampleCnt, srcHasYcbcr));
2290 #endif
2291 if (src->isProtected() && !dst->isProtected()) {
2292 SkDebugf("Can't copy from protected memory to non-protected");
2293 return;
2294 }
2295
2296 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2297 // the cache is flushed since it is only being written to.
2298 dstImage->setImageLayout(this,
2299 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2300 VK_ACCESS_TRANSFER_WRITE_BIT,
2301 VK_PIPELINE_STAGE_TRANSFER_BIT,
2302 false);
2303
2304 srcImage->setImageLayout(this,
2305 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2306 VK_ACCESS_TRANSFER_READ_BIT,
2307 VK_PIPELINE_STAGE_TRANSFER_BIT,
2308 false);
2309
2310 VkImageCopy copyRegion;
2311 memset(©Region, 0, sizeof(VkImageCopy));
2312 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2313 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2314 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2315 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2316 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2317
2318 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2319 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2320 this->currentCommandBuffer()->copyImage(this,
2321 srcImage,
2322 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2323 dstImage,
2324 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2325 1,
2326 ©Region);
2327
2328 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2329 srcRect.width(), srcRect.height());
2330 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2331 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2332 }
2333
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)2334 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2335 GrSurface* src,
2336 GrVkImage* dstImage,
2337 GrVkImage* srcImage,
2338 const SkIRect& srcRect,
2339 const SkIRect& dstRect,
2340 GrSamplerState::Filter filter) {
2341 if (!this->currentCommandBuffer()) {
2342 return;
2343 }
2344
2345 #ifdef SK_DEBUG
2346 int dstSampleCnt = dstImage->numSamples();
2347 int srcSampleCnt = srcImage->numSamples();
2348 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2349 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2350 VkFormat dstFormat = dstImage->imageFormat();
2351 VkFormat srcFormat;
2352 SkAssertResult(GrBackendFormats::AsVkFormat(dst->backendFormat(), &srcFormat));
2353 SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2354 dstSampleCnt,
2355 dstImage->isLinearTiled(),
2356 dstHasYcbcr,
2357 srcFormat,
2358 srcSampleCnt,
2359 srcImage->isLinearTiled(),
2360 srcHasYcbcr));
2361
2362 #endif
2363 if (src->isProtected() && !dst->isProtected()) {
2364 SkDebugf("Can't copy from protected memory to non-protected");
2365 return;
2366 }
2367
2368 dstImage->setImageLayout(this,
2369 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2370 VK_ACCESS_TRANSFER_WRITE_BIT,
2371 VK_PIPELINE_STAGE_TRANSFER_BIT,
2372 false);
2373
2374 srcImage->setImageLayout(this,
2375 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2376 VK_ACCESS_TRANSFER_READ_BIT,
2377 VK_PIPELINE_STAGE_TRANSFER_BIT,
2378 false);
2379
2380 VkImageBlit blitRegion;
2381 memset(&blitRegion, 0, sizeof(VkImageBlit));
2382 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2383 blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2384 blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2385 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2386 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2387 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2388
2389 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2390 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2391 this->currentCommandBuffer()->blitImage(this,
2392 *srcImage,
2393 *dstImage,
2394 1,
2395 &blitRegion,
2396 filter == GrSamplerState::Filter::kNearest ?
2397 VK_FILTER_NEAREST : VK_FILTER_LINEAR);
2398
2399 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2400 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2401 }
2402
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2403 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2404 const SkIPoint& dstPoint) {
2405 if (src->isProtected() && !dst->isProtected()) {
2406 SkDebugf("Can't copy from protected memory to non-protected");
2407 return;
2408 }
2409 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2410 this->resolveImage(dst, srcRT, srcRect, dstPoint);
2411 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2412 srcRect.width(), srcRect.height());
2413 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2414 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2415 }
2416
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)2417 bool GrVkGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
2418 GrSurface* src, const SkIRect& srcRect,
2419 GrSamplerState::Filter filter) {
2420 #ifdef SK_DEBUG
2421 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2422 SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2423 }
2424 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2425 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2426 }
2427 #endif
2428 if (src->isProtected() && !dst->isProtected()) {
2429 SkDebugf("Can't copy from protected memory to non-protected");
2430 return false;
2431 }
2432
2433 GrVkImage* dstImage;
2434 GrVkImage* srcImage;
2435 GrRenderTarget* dstRT = dst->asRenderTarget();
2436 if (dstRT) {
2437 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2438 if (vkRT->wrapsSecondaryCommandBuffer()) {
2439 return false;
2440 }
2441 // This will technically return true for single sample rts that used DMSAA in which case we
2442 // don't have to pick the resolve attachment. But in that case the resolve and color
2443 // attachments will be the same anyways.
2444 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2445 dstImage = vkRT->resolveAttachment();
2446 } else {
2447 dstImage = vkRT->colorAttachment();
2448 }
2449 } else if (dst->asTexture()) {
2450 dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2451 } else {
2452 // The surface in a GrAttachment already
2453 dstImage = static_cast<GrVkImage*>(dst);
2454 }
2455 GrRenderTarget* srcRT = src->asRenderTarget();
2456 if (srcRT) {
2457 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2458 // This will technically return true for single sample rts that used DMSAA in which case we
2459 // don't have to pick the resolve attachment. But in that case the resolve and color
2460 // attachments will be the same anyways.
2461 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2462 srcImage = vkRT->resolveAttachment();
2463 } else {
2464 srcImage = vkRT->colorAttachment();
2465 }
2466 } else if (src->asTexture()) {
2467 SkASSERT(src->asTexture());
2468 srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2469 } else {
2470 // The surface in a GrAttachment already
2471 srcImage = static_cast<GrVkImage*>(src);
2472 }
2473
2474 VkFormat dstFormat = dstImage->imageFormat();
2475 VkFormat srcFormat = srcImage->imageFormat();
2476
2477 int dstSampleCnt = dstImage->numSamples();
2478 int srcSampleCnt = srcImage->numSamples();
2479
2480 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2481 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2482
2483 if (srcRect.size() == dstRect.size()) {
2484 // Prefer resolves or copy-image commands when there is no scaling
2485 const SkIPoint dstPoint = dstRect.topLeft();
2486 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2487 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2488 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2489 return true;
2490 }
2491
2492 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2493 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2494 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2495 return true;
2496 }
2497 }
2498
2499 if (this->vkCaps().canCopyAsBlit(dstFormat,
2500 dstSampleCnt,
2501 dstImage->isLinearTiled(),
2502 dstHasYcbcr,
2503 srcFormat,
2504 srcSampleCnt,
2505 srcImage->isLinearTiled(),
2506 srcHasYcbcr)) {
2507 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter);
2508 return true;
2509 }
2510
2511 return false;
2512 }
2513
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2514 bool GrVkGpu::onReadPixels(GrSurface* surface,
2515 SkIRect rect,
2516 GrColorType surfaceColorType,
2517 GrColorType dstColorType,
2518 void* buffer,
2519 size_t rowBytes) {
2520 if (surface->isProtected()) {
2521 return false;
2522 }
2523
2524 if (!this->currentCommandBuffer()) {
2525 return false;
2526 }
2527
2528 GrVkImage* image = nullptr;
2529 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2530 if (rt) {
2531 // Reading from render targets that wrap a secondary command buffer is not allowed since
2532 // it would require us to know the VkImage, which we don't have, as well as need us to
2533 // stop and start the VkRenderPass which we don't have access to.
2534 if (rt->wrapsSecondaryCommandBuffer()) {
2535 return false;
2536 }
2537 image = rt->nonMSAAAttachment();
2538 } else {
2539 image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2540 }
2541
2542 if (!image) {
2543 return false;
2544 }
2545
2546 if (dstColorType == GrColorType::kUnknown ||
2547 dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2548 return false;
2549 }
2550
2551 // Change layout of our target so it can be used as copy
2552 image->setImageLayout(this,
2553 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2554 VK_ACCESS_TRANSFER_READ_BIT,
2555 VK_PIPELINE_STAGE_TRANSFER_BIT,
2556 false);
2557
2558 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2559 if (skgpu::VkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2560 return false;
2561 }
2562 size_t tightRowBytes = bpp*rect.width();
2563
2564 VkBufferImageCopy region;
2565 memset(®ion, 0, sizeof(VkBufferImageCopy));
2566 VkOffset3D offset = { rect.left(), rect.top(), 0 };
2567 region.imageOffset = offset;
2568 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2569
2570 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2571 size_t imageRows = region.imageExtent.height;
2572 GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2573 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2574 transBufferRowBytes * imageRows,
2575 GrGpuBufferType::kXferGpuToCpu,
2576 kDynamic_GrAccessPattern,
2577 GrResourceProvider::ZeroInit::kNo);
2578
2579 if (!transferBuffer) {
2580 return false;
2581 }
2582
2583 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2584
2585 // Copy the image to a buffer so we can map it to cpu memory
2586 region.bufferOffset = 0;
2587 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2588 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2589 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2590
2591 this->currentCommandBuffer()->copyImageToBuffer(this,
2592 image,
2593 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2594 transferBuffer,
2595 1,
2596 ®ion);
2597
2598 // make sure the copy to buffer has finished
2599 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2600 VK_ACCESS_HOST_READ_BIT,
2601 VK_PIPELINE_STAGE_TRANSFER_BIT,
2602 VK_PIPELINE_STAGE_HOST_BIT,
2603 false);
2604
2605 // We need to submit the current command buffer to the Queue and make sure it finishes before
2606 // we can copy the data out of the buffer.
2607 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2608 return false;
2609 }
2610 void* mappedMemory = transferBuffer->map();
2611 if (!mappedMemory) {
2612 return false;
2613 }
2614
2615 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2616
2617 transferBuffer->unmap();
2618 return true;
2619 }
2620
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2621 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2622 sk_sp<const GrVkFramebuffer> framebuffer,
2623 const VkClearValue* colorClear,
2624 const GrSurface* target,
2625 const SkIRect& renderPassBounds,
2626 bool forSecondaryCB) {
2627 if (!this->currentCommandBuffer()) {
2628 return false;
2629 }
2630 SkASSERT (!framebuffer->isExternal());
2631
2632 #ifdef SK_DEBUG
2633 uint32_t index;
2634 bool result = renderPass->colorAttachmentIndex(&index);
2635 SkASSERT(result && 0 == index);
2636 result = renderPass->stencilAttachmentIndex(&index);
2637 if (result) {
2638 SkASSERT(1 == index);
2639 }
2640 #endif
2641 VkClearValue clears[3];
2642 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2643 clears[0].color = colorClear->color;
2644 clears[stencilIndex].depthStencil.depth = 0.0f;
2645 clears[stencilIndex].depthStencil.stencil = 0;
2646
2647 return this->currentCommandBuffer()->beginRenderPass(
2648 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2649 }
2650
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2651 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2652 const SkIRect& bounds) {
2653 // We had a command buffer when we started the render pass, we should have one now as well.
2654 SkASSERT(this->currentCommandBuffer());
2655 this->currentCommandBuffer()->endRenderPass(this);
2656 this->didWriteToSurface(target, origin, &bounds);
2657 }
2658
checkVkResult(VkResult result)2659 bool GrVkGpu::checkVkResult(VkResult result) {
2660 switch (result) {
2661 case VK_SUCCESS:
2662 return true;
2663 case VK_ERROR_DEVICE_LOST:
2664 if (!fDeviceIsLost) {
2665 // Callback should only be invoked once, and device should be marked as lost first.
2666 fDeviceIsLost = true;
2667 skgpu::InvokeDeviceLostCallback(vkInterface(),
2668 device(),
2669 fDeviceLostContext,
2670 fDeviceLostProc,
2671 vkCaps().supportsDeviceFaultInfo());
2672 }
2673 return false;
2674 case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2675 case VK_ERROR_OUT_OF_HOST_MEMORY:
2676 this->setOOMed();
2677 return false;
2678 default:
2679 return false;
2680 }
2681 }
2682
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2683 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2684 if (!this->currentCommandBuffer()) {
2685 return;
2686 }
2687 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2688 }
2689
submit(GrOpsRenderPass * renderPass)2690 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2691 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2692
2693 fCachedOpsRenderPass->submit();
2694 fCachedOpsRenderPass->reset();
2695 }
2696
makeSemaphore(bool isOwned)2697 [[nodiscard]] std::unique_ptr<GrSemaphore> GrVkGpu::makeSemaphore(bool isOwned) {
2698 return GrVkSemaphore::Make(this, isOwned);
2699 }
2700
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType wrapType,GrWrapOwnership ownership)2701 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2702 GrSemaphoreWrapType wrapType,
2703 GrWrapOwnership ownership) {
2704 return GrVkSemaphore::MakeWrapped(this, GrBackendSemaphores::GetVkSemaphore(semaphore),
2705 wrapType, ownership);
2706 }
2707
insertSemaphore(GrSemaphore * semaphore)2708 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2709 SkASSERT(semaphore);
2710
2711 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2712
2713 GrVkSemaphore::Resource* resource = vkSem->getResource();
2714 if (resource->shouldSignal()) {
2715 resource->ref();
2716 fSemaphoresToSignal.push_back(resource);
2717 }
2718 }
2719
waitSemaphore(GrSemaphore * semaphore)2720 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2721 SkASSERT(semaphore);
2722
2723 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2724
2725 GrVkSemaphore::Resource* resource = vkSem->getResource();
2726 if (resource->shouldWait()) {
2727 resource->ref();
2728 fSemaphoresToWaitOn.push_back(resource);
2729 }
2730 }
2731
prepareTextureForCrossContextUsage(GrTexture * texture)2732 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2733 SkASSERT(texture);
2734 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2735 vkTexture->setImageLayout(this,
2736 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2737 VK_ACCESS_SHADER_READ_BIT,
2738 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2739 false);
2740 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2741 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2742 // Eventually we will abandon the whole GPU if this fails.
2743 this->submitToGpu(GrSyncCpu::kNo);
2744
2745 // The image layout change serves as a barrier, so no semaphore is needed.
2746 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2747 // thread safe so that only the first thread that tries to use the semaphore actually submits
2748 // it. This additionally would also require thread safety in command buffer submissions to
2749 // queues in general.
2750 return nullptr;
2751 }
2752
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2753 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2754 fDrawables.emplace_back(std::move(drawable));
2755 }
2756
storeVkPipelineCacheData()2757 void GrVkGpu::storeVkPipelineCacheData() {
2758 if (this->getContext()->priv().getPersistentCache()) {
2759 this->resourceProvider().storePipelineCacheData();
2760 }
2761 }
2762