1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/core/SkMipmap.h"
18 #include "src/core/SkTraceEvent.h"
19 #include "src/gpu/GrBackendUtils.h"
20 #include "src/gpu/GrDataUtils.h"
21 #include "src/gpu/GrDirectContextPriv.h"
22 #include "src/gpu/GrGeometryProcessor.h"
23 #include "src/gpu/GrGpuResourceCacheAccess.h"
24 #include "src/gpu/GrNativeRect.h"
25 #include "src/gpu/GrPipeline.h"
26 #include "src/gpu/GrRenderTarget.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrTexture.h"
29 #include "src/gpu/GrThreadSafePipelineBuilder.h"
30 #include "src/gpu/SkGr.h"
31 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
32 #include "src/gpu/vk/GrVkBuffer.h"
33 #include "src/gpu/vk/GrVkCommandBuffer.h"
34 #include "src/gpu/vk/GrVkCommandPool.h"
35 #include "src/gpu/vk/GrVkFramebuffer.h"
36 #include "src/gpu/vk/GrVkImage.h"
37 #include "src/gpu/vk/GrVkInterface.h"
38 #include "src/gpu/vk/GrVkMemory.h"
39 #include "src/gpu/vk/GrVkOpsRenderPass.h"
40 #include "src/gpu/vk/GrVkPipeline.h"
41 #include "src/gpu/vk/GrVkPipelineState.h"
42 #include "src/gpu/vk/GrVkRenderPass.h"
43 #include "src/gpu/vk/GrVkResourceProvider.h"
44 #include "src/gpu/vk/GrVkSemaphore.h"
45 #include "src/gpu/vk/GrVkTexture.h"
46 #include "src/gpu/vk/GrVkTextureRenderTarget.h"
47 #include "src/image/SkImage_Gpu.h"
48 #include "src/image/SkSurface_Gpu.h"
49
50 #include "include/gpu/vk/GrVkExtensions.h"
51 #include "include/gpu/vk/GrVkTypes.h"
52
53 #include <utility>
54
55 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
56 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
57
Make(const GrVkBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)58 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
59 const GrContextOptions& options, GrDirectContext* direct) {
60 if (backendContext.fInstance == VK_NULL_HANDLE ||
61 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
62 backendContext.fDevice == VK_NULL_HANDLE ||
63 backendContext.fQueue == VK_NULL_HANDLE) {
64 return nullptr;
65 }
66 if (!backendContext.fGetProc) {
67 return nullptr;
68 }
69
70 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
71 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
72 backendContext.fGetProc("vkEnumerateInstanceVersion",
73 VK_NULL_HANDLE, VK_NULL_HANDLE));
74 uint32_t instanceVersion = 0;
75 if (!localEnumerateInstanceVersion) {
76 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
77 } else {
78 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
79 if (err) {
80 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
81 return nullptr;
82 }
83 }
84
85 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
86 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
87 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
88 backendContext.fInstance,
89 VK_NULL_HANDLE));
90
91 if (!localGetPhysicalDeviceProperties) {
92 return nullptr;
93 }
94 VkPhysicalDeviceProperties physDeviceProperties;
95 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
96 uint32_t physDevVersion = physDeviceProperties.apiVersion;
97
98 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
99 : instanceVersion;
100
101 instanceVersion = std::min(instanceVersion, apiVersion);
102 physDevVersion = std::min(physDevVersion, apiVersion);
103
104 sk_sp<const GrVkInterface> interface;
105
106 if (backendContext.fVkExtensions) {
107 interface.reset(new GrVkInterface(backendContext.fGetProc,
108 backendContext.fInstance,
109 backendContext.fDevice,
110 instanceVersion,
111 physDevVersion,
112 backendContext.fVkExtensions));
113 if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
114 return nullptr;
115 }
116 } else {
117 GrVkExtensions extensions;
118 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
119 // need to know if this is enabled to know if we can transition to a present layout when
120 // flushing a surface.
121 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
122 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
123 extensions.init(backendContext.fGetProc, backendContext.fInstance,
124 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
125 }
126 interface.reset(new GrVkInterface(backendContext.fGetProc,
127 backendContext.fInstance,
128 backendContext.fDevice,
129 instanceVersion,
130 physDevVersion,
131 &extensions));
132 if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
133 return nullptr;
134 }
135 }
136
137 sk_sp<GrVkCaps> caps;
138 if (backendContext.fDeviceFeatures2) {
139 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
140 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
141 *backendContext.fVkExtensions, backendContext.fProtectedContext));
142 } else if (backendContext.fDeviceFeatures) {
143 VkPhysicalDeviceFeatures2 features2;
144 features2.pNext = nullptr;
145 features2.features = *backendContext.fDeviceFeatures;
146 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
147 features2, instanceVersion, physDevVersion,
148 *backendContext.fVkExtensions, backendContext.fProtectedContext));
149 } else {
150 VkPhysicalDeviceFeatures2 features;
151 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
152 features.pNext = nullptr;
153 if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
154 features.features.geometryShader = true;
155 }
156 if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
157 features.features.dualSrcBlend = true;
158 }
159 if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
160 features.features.sampleRateShading = true;
161 }
162 GrVkExtensions extensions;
163 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
164 // need to know if this is enabled to know if we can transition to a present layout when
165 // flushing a surface.
166 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
167 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
168 extensions.init(backendContext.fGetProc, backendContext.fInstance,
169 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
170 }
171 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
172 features, instanceVersion, physDevVersion, extensions,
173 backendContext.fProtectedContext));
174 }
175
176 if (!caps) {
177 return nullptr;
178 }
179
180 sk_sp<GrVkMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
181 if (!memoryAllocator) {
182 // We were not given a memory allocator at creation
183 memoryAllocator = GrVkAMDMemoryAllocator::Make(backendContext.fInstance,
184 backendContext.fPhysicalDevice,
185 backendContext.fDevice, physDevVersion,
186 backendContext.fVkExtensions, interface,
187 caps.get());
188 }
189 if (!memoryAllocator) {
190 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
191 return nullptr;
192 }
193
194 sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface,
195 instanceVersion, physDevVersion,
196 std::move(memoryAllocator)));
197 if (backendContext.fProtectedContext == GrProtected::kYes &&
198 !vkGpu->vkCaps().supportsProtectedMemory()) {
199 return nullptr;
200 }
201 return std::move(vkGpu);
202 }
203
204 ////////////////////////////////////////////////////////////////////////////////
205
GrVkGpu(GrDirectContext * direct,const GrVkBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const GrVkInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<GrVkMemoryAllocator> memoryAllocator)206 GrVkGpu::GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendContext,
207 sk_sp<GrVkCaps> caps, sk_sp<const GrVkInterface> interface,
208 uint32_t instanceVersion, uint32_t physicalDeviceVersion,
209 sk_sp<GrVkMemoryAllocator> memoryAllocator)
210 : INHERITED(direct)
211 , fInterface(std::move(interface))
212 , fMemoryAllocator(std::move(memoryAllocator))
213 , fVkCaps(std::move(caps))
214 , fPhysicalDevice(backendContext.fPhysicalDevice)
215 , fDevice(backendContext.fDevice)
216 , fQueue(backendContext.fQueue)
217 , fQueueIndex(backendContext.fGraphicsQueueIndex)
218 , fResourceProvider(this)
219 , fStagingBufferManager(this)
220 , fDisconnected(false)
221 , fProtectedContext(backendContext.fProtectedContext) {
222 SkASSERT(!backendContext.fOwnsInstanceAndDevice);
223 SkASSERT(fMemoryAllocator);
224
225 this->initCapsAndCompiler(fVkCaps);
226
227 VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
228 VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
229
230 fResourceProvider.init();
231
232 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
233 if (fMainCmdPool) {
234 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
235 SkASSERT(this->currentCommandBuffer());
236 this->currentCommandBuffer()->begin(this);
237 }
238 }
239
destroyResources()240 void GrVkGpu::destroyResources() {
241 if (fMainCmdPool) {
242 fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
243 fMainCmdPool->close();
244 }
245
246 // wait for all commands to finish
247 this->finishOutstandingGpuWork();
248
249 if (fMainCmdPool) {
250 fMainCmdPool->unref();
251 fMainCmdPool = nullptr;
252 }
253
254 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
255 fSemaphoresToWaitOn[i]->unref();
256 }
257 fSemaphoresToWaitOn.reset();
258
259 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
260 fSemaphoresToSignal[i]->unref();
261 }
262 fSemaphoresToSignal.reset();
263
264 fStagingBufferManager.reset();
265
266 fMSAALoadManager.destroyResources(this);
267
268 // must call this just before we destroy the command pool and VkDevice
269 fResourceProvider.destroyResources();
270 }
271
~GrVkGpu()272 GrVkGpu::~GrVkGpu() {
273 if (!fDisconnected) {
274 this->destroyResources();
275 }
276 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
277 // clients can continue to delete backend textures even after a context has been abandoned.
278 fMemoryAllocator.reset();
279 }
280
281
disconnect(DisconnectType type)282 void GrVkGpu::disconnect(DisconnectType type) {
283 INHERITED::disconnect(type);
284 if (!fDisconnected) {
285 this->destroyResources();
286
287 fSemaphoresToWaitOn.reset();
288 fSemaphoresToSignal.reset();
289 fMainCmdBuffer = nullptr;
290 fDisconnected = true;
291 }
292 }
293
pipelineBuilder()294 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
295 return fResourceProvider.pipelineStateCache();
296 }
297
refPipelineBuilder()298 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
299 return fResourceProvider.refPipelineStateCache();
300 }
301
302 ///////////////////////////////////////////////////////////////////////////////
303
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)304 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
305 GrRenderTarget* rt,
306 bool useMSAASurface,
307 GrAttachment* stencil,
308 GrSurfaceOrigin origin,
309 const SkIRect& bounds,
310 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
311 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
312 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
313 GrXferBarrierFlags renderPassXferBarriers) {
314 if (!fCachedOpsRenderPass) {
315 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
316 }
317
318 // For the given render target and requested render pass features we need to find a compatible
319 // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
320 // is compatible, but that is part of the framebuffer that we get here.
321 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
322
323 SkASSERT(!useMSAASurface ||
324 rt->numSamples() > 1 ||
325 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
326 vkRT->resolveAttachment() &&
327 vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
328
329 // Covert the GrXferBarrierFlags into render pass self dependency flags
330 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
331 if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
332 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
333 }
334 if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
335 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
336 }
337
338 // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
339 // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
340 // case we also need to update the color load/store ops since we don't want to ever load or
341 // store the msaa color attachment, but may need to for the resolve attachment.
342 GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
343 bool withResolve = false;
344 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
345 GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
346 if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
347 withResolve = true;
348 localColorInfo.fStoreOp = GrStoreOp::kDiscard;
349 if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
350 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
351 localColorInfo.fLoadOp = GrLoadOp::kDiscard;
352 } else {
353 resolveInfo.fLoadOp = GrLoadOp::kDiscard;
354 }
355 }
356
357 // Get the framebuffer to use for the render pass
358 sk_sp<GrVkFramebuffer> framebuffer;
359 if (vkRT->wrapsSecondaryCommandBuffer()) {
360 framebuffer = vkRT->externalFramebuffer();
361 } else {
362 auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
363 loadFromResolve);
364 framebuffer = sk_ref_sp(fb);
365 }
366 if (!framebuffer) {
367 return nullptr;
368 }
369
370 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
371 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
372 sampledProxies)) {
373 return nullptr;
374 }
375 return fCachedOpsRenderPass.get();
376 }
377
submitCommandBuffer(SyncQueue sync)378 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
379 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
380 if (!this->currentCommandBuffer()) {
381 return false;
382 }
383 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
384
385 if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
386 !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
387 // We may have added finished procs during the flush call. Since there is no actual work
388 // we are not submitting the command buffer and may never come back around to submit it.
389 // Thus we call all current finished procs manually, since the work has technically
390 // finished.
391 this->currentCommandBuffer()->callFinishedProcs();
392 SkASSERT(fDrawables.empty());
393 fResourceProvider.checkCommandBuffers();
394 return true;
395 }
396
397 fMainCmdBuffer->end(this);
398 SkASSERT(fMainCmdPool);
399 fMainCmdPool->close();
400 bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
401 fSemaphoresToWaitOn);
402
403 if (didSubmit && sync == kForce_SyncQueue) {
404 fMainCmdBuffer->forceSync(this);
405 }
406
407 // We must delete any drawables that had to wait until submit to destroy.
408 fDrawables.reset();
409
410 // If we didn't submit the command buffer then we did not wait on any semaphores. We will
411 // continue to hold onto these semaphores and wait on them during the next command buffer
412 // submission.
413 if (didSubmit) {
414 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
415 fSemaphoresToWaitOn[i]->unref();
416 }
417 fSemaphoresToWaitOn.reset();
418 }
419
420 // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
421 // not try to recover the work that wasn't submitted and instead just drop it all. The client
422 // will be notified that the semaphores were not submit so that they will not try to wait on
423 // them.
424 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
425 fSemaphoresToSignal[i]->unref();
426 }
427 fSemaphoresToSignal.reset();
428
429 // Release old command pool and create a new one
430 fMainCmdPool->unref();
431 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
432 if (fMainCmdPool) {
433 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
434 SkASSERT(fMainCmdBuffer);
435 fMainCmdBuffer->begin(this);
436 } else {
437 fMainCmdBuffer = nullptr;
438 }
439 // We must wait to call checkCommandBuffers until after we get a new command buffer. The
440 // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
441 // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
442 // one that was just submitted.
443 fResourceProvider.checkCommandBuffers();
444 return didSubmit;
445 }
446
447 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern,const void * data)448 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
449 GrAccessPattern accessPattern, const void* data) {
450 #ifdef SK_DEBUG
451 switch (type) {
452 case GrGpuBufferType::kVertex:
453 case GrGpuBufferType::kIndex:
454 case GrGpuBufferType::kDrawIndirect:
455 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
456 accessPattern == kStatic_GrAccessPattern);
457 break;
458 case GrGpuBufferType::kXferCpuToGpu:
459 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
460 break;
461 case GrGpuBufferType::kXferGpuToCpu:
462 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
463 accessPattern == kStream_GrAccessPattern);
464 break;
465 case GrGpuBufferType::kUniform:
466 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
467 break;
468 }
469 #endif
470 sk_sp<GrGpuBuffer> buff = GrVkBuffer::Make(this, size, type, accessPattern);
471
472 if (data && buff) {
473 buff->updateData(data, size);
474 }
475 return buff;
476 }
477
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)478 bool GrVkGpu::onWritePixels(GrSurface* surface,
479 SkIRect rect,
480 GrColorType surfaceColorType,
481 GrColorType srcColorType,
482 const GrMipLevel texels[],
483 int mipLevelCount,
484 bool prepForTexSampling) {
485 GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
486 if (!texture) {
487 return false;
488 }
489 GrVkImage* texImage = texture->textureImage();
490
491 // Make sure we have at least the base level
492 if (!mipLevelCount || !texels[0].fPixels) {
493 return false;
494 }
495
496 SkASSERT(!GrVkFormatIsCompressed(texImage->imageFormat()));
497 bool success = false;
498 bool linearTiling = texImage->isLinearTiled();
499 if (linearTiling) {
500 if (mipLevelCount > 1) {
501 SkDebugf("Can't upload mipmap data to linear tiled texture");
502 return false;
503 }
504 if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
505 // Need to change the layout to general in order to perform a host write
506 texImage->setImageLayout(this,
507 VK_IMAGE_LAYOUT_GENERAL,
508 VK_ACCESS_HOST_WRITE_BIT,
509 VK_PIPELINE_STAGE_HOST_BIT,
510 false);
511 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
512 return false;
513 }
514 }
515 success = this->uploadTexDataLinear(texImage,
516 rect,
517 srcColorType,
518 texels[0].fPixels,
519 texels[0].fRowBytes);
520 } else {
521 SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
522 success = this->uploadTexDataOptimal(texImage,
523 rect,
524 srcColorType,
525 texels,
526 mipLevelCount);
527 if (1 == mipLevelCount) {
528 texture->markMipmapsDirty();
529 }
530 }
531
532 if (prepForTexSampling) {
533 texImage->setImageLayout(this,
534 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
535 VK_ACCESS_SHADER_READ_BIT,
536 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
537 false);
538 }
539
540 return success;
541 }
542
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)543 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
544 SkIRect rect,
545 GrColorType surfaceColorType,
546 GrColorType bufferColorType,
547 sk_sp<GrGpuBuffer> transferBuffer,
548 size_t bufferOffset,
549 size_t rowBytes) {
550 if (!this->currentCommandBuffer()) {
551 return false;
552 }
553
554 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
555 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
556 return false;
557 }
558
559 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
560 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
561 return false;
562 }
563 GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
564 if (!tex) {
565 return false;
566 }
567 GrVkImage* vkImage = tex->textureImage();
568 VkFormat format = vkImage->imageFormat();
569
570 // Can't transfer compressed data
571 SkASSERT(!GrVkFormatIsCompressed(format));
572
573 if (!transferBuffer) {
574 return false;
575 }
576
577 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
578 return false;
579 }
580 SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
581
582 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
583
584 // Set up copy region
585 VkBufferImageCopy region;
586 memset(®ion, 0, sizeof(VkBufferImageCopy));
587 region.bufferOffset = bufferOffset;
588 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
589 region.bufferImageHeight = 0;
590 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
591 region.imageOffset = { rect.left(), rect.top(), 0 };
592 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
593
594 // Change layout of our target so it can be copied to
595 vkImage->setImageLayout(this,
596 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
597 VK_ACCESS_TRANSFER_WRITE_BIT,
598 VK_PIPELINE_STAGE_TRANSFER_BIT,
599 false);
600
601 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
602
603 // Copy the buffer to the image.
604 this->currentCommandBuffer()->copyBufferToImage(this,
605 vkBuffer->vkBuffer(),
606 vkImage,
607 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
608 1,
609 ®ion);
610 this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
611
612 tex->markMipmapsDirty();
613 return true;
614 }
615
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)616 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
617 SkIRect rect,
618 GrColorType surfaceColorType,
619 GrColorType bufferColorType,
620 sk_sp<GrGpuBuffer> transferBuffer,
621 size_t offset) {
622 if (!this->currentCommandBuffer()) {
623 return false;
624 }
625 SkASSERT(surface);
626 SkASSERT(transferBuffer);
627 if (fProtectedContext == GrProtected::kYes) {
628 return false;
629 }
630
631 GrVkImage* srcImage;
632 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
633 // Reading from render targets that wrap a secondary command buffer is not allowed since
634 // it would require us to know the VkImage, which we don't have, as well as need us to
635 // stop and start the VkRenderPass which we don't have access to.
636 if (rt->wrapsSecondaryCommandBuffer()) {
637 return false;
638 }
639 if (!rt->nonMSAAAttachment()) {
640 return false;
641 }
642 srcImage = rt->nonMSAAAttachment();
643 } else {
644 SkASSERT(surface->asTexture());
645 srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
646 }
647
648 VkFormat format = srcImage->imageFormat();
649 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
650 return false;
651 }
652 SkASSERT(GrVkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
653
654 // Set up copy region
655 VkBufferImageCopy region;
656 memset(®ion, 0, sizeof(VkBufferImageCopy));
657 region.bufferOffset = offset;
658 region.bufferRowLength = rect.width();
659 region.bufferImageHeight = 0;
660 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
661 region.imageOffset = {rect.left(), rect.top(), 0};
662 region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
663
664 srcImage->setImageLayout(this,
665 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
666 VK_ACCESS_TRANSFER_READ_BIT,
667 VK_PIPELINE_STAGE_TRANSFER_BIT,
668 false);
669
670 this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
671 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
672 transferBuffer, 1, ®ion);
673
674 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
675 // Make sure the copy to buffer has finished.
676 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
677 VK_ACCESS_HOST_READ_BIT,
678 VK_PIPELINE_STAGE_TRANSFER_BIT,
679 VK_PIPELINE_STAGE_HOST_BIT,
680 false);
681 return true;
682 }
683
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)684 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
685 const SkIPoint& dstPoint) {
686 if (!this->currentCommandBuffer()) {
687 return;
688 }
689
690 SkASSERT(dst);
691 SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
692
693 VkImageResolve resolveInfo;
694 resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
695 resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
696 resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
697 resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
698 resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
699
700 GrVkImage* dstImage;
701 GrRenderTarget* dstRT = dst->asRenderTarget();
702 GrTexture* dstTex = dst->asTexture();
703 if (dstTex) {
704 dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
705 } else {
706 SkASSERT(dst->asRenderTarget());
707 dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
708 }
709 SkASSERT(dstImage);
710
711 dstImage->setImageLayout(this,
712 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
713 VK_ACCESS_TRANSFER_WRITE_BIT,
714 VK_PIPELINE_STAGE_TRANSFER_BIT,
715 false);
716
717 src->colorAttachment()->setImageLayout(this,
718 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
719 VK_ACCESS_TRANSFER_READ_BIT,
720 VK_PIPELINE_STAGE_TRANSFER_BIT,
721 false);
722 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
723 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
724 this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
725 &resolveInfo);
726 }
727
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)728 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
729 SkASSERT(target->numSamples() > 1);
730 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
731 SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
732
733 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
734 // We would have resolved the RT during the render pass;
735 return;
736 }
737
738 this->resolveImage(target, rt, resolveRect,
739 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
740 }
741
uploadTexDataLinear(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const void * data,size_t rowBytes)742 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
743 SkIRect rect,
744 GrColorType dataColorType,
745 const void* data,
746 size_t rowBytes) {
747 SkASSERT(data);
748 SkASSERT(texImage->isLinearTiled());
749
750 SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
751
752 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
753 size_t trimRowBytes = rect.width() * bpp;
754
755 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
756 VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
757 const VkImageSubresource subres = {
758 VK_IMAGE_ASPECT_COLOR_BIT,
759 0, // mipLevel
760 0, // arraySlice
761 };
762 VkSubresourceLayout layout;
763
764 const GrVkInterface* interface = this->vkInterface();
765
766 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
767 texImage->image(),
768 &subres,
769 &layout));
770
771 const GrVkAlloc& alloc = texImage->alloc();
772 if (VK_NULL_HANDLE == alloc.fMemory) {
773 return false;
774 }
775 VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
776 VkDeviceSize size = rect.height()*layout.rowPitch;
777 SkASSERT(size + offset <= alloc.fSize);
778 void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
779 if (!mapPtr) {
780 return false;
781 }
782 mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
783
784 SkRectMemcpy(mapPtr,
785 static_cast<size_t>(layout.rowPitch),
786 data,
787 rowBytes,
788 trimRowBytes,
789 rect.height());
790
791 GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
792 GrVkMemory::UnmapAlloc(this, alloc);
793
794 return true;
795 }
796
797 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
798 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,SkTArray<VkBufferImageCopy> * regions,SkTArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipmapped)799 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
800 SkTArray<VkBufferImageCopy>* regions,
801 SkTArray<size_t>* individualMipOffsets,
802 GrStagingBufferManager::Slice* slice,
803 SkImage::CompressionType compression,
804 VkFormat vkFormat,
805 SkISize dimensions,
806 GrMipmapped mipmapped) {
807 SkASSERT(compression != SkImage::CompressionType::kNone);
808 int numMipLevels = 1;
809 if (mipmapped == GrMipmapped::kYes) {
810 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
811 }
812
813 regions->reserve_back(numMipLevels);
814 individualMipOffsets->reserve_back(numMipLevels);
815
816 size_t bytesPerBlock = GrVkFormatBytesPerBlock(vkFormat);
817
818 size_t bufferSize = SkCompressedDataSize(compression,
819 dimensions,
820 individualMipOffsets,
821 mipmapped == GrMipmapped::kYes);
822 SkASSERT(individualMipOffsets->count() == numMipLevels);
823
824 // Get a staging buffer slice to hold our mip data.
825 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
826 size_t alignment = bytesPerBlock;
827 switch (alignment & 0b11) {
828 case 0: break; // alignment is already a multiple of 4.
829 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
830 default: alignment *= 4; break; // alignment is not a multiple of 2.
831 }
832 *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
833 if (!slice->fBuffer) {
834 return 0;
835 }
836
837 for (int i = 0; i < numMipLevels; ++i) {
838 VkBufferImageCopy& region = regions->push_back();
839 memset(®ion, 0, sizeof(VkBufferImageCopy));
840 region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
841 SkISize revisedDimensions = GrCompressedDimensions(compression, dimensions);
842 region.bufferRowLength = revisedDimensions.width();
843 region.bufferImageHeight = revisedDimensions.height();
844 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
845 region.imageOffset = {0, 0, 0};
846 region.imageExtent = {SkToU32(dimensions.width()),
847 SkToU32(dimensions.height()), 1};
848
849 dimensions = {std::max(1, dimensions.width() /2),
850 std::max(1, dimensions.height()/2)};
851 }
852
853 return bufferSize;
854 }
855
uploadTexDataOptimal(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)856 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
857 SkIRect rect,
858 GrColorType dataColorType,
859 const GrMipLevel texels[],
860 int mipLevelCount) {
861 if (!this->currentCommandBuffer()) {
862 return false;
863 }
864
865 SkASSERT(!texImage->isLinearTiled());
866 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
867 SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
868
869 // We assume that if the texture has mip levels, we either upload to all the levels or just the
870 // first.
871 SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
872
873 SkASSERT(!rect.isEmpty());
874
875 SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
876
877 SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
878 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
879
880 // texels is const.
881 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
882 // Because of this we need to make a non-const shallow copy of texels.
883 SkAutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
884 std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
885
886 SkTArray<size_t> individualMipOffsets;
887 size_t combinedBufferSize;
888 if (mipLevelCount > 1) {
889 combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
890 rect.size(),
891 &individualMipOffsets,
892 mipLevelCount);
893 } else {
894 SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
895 combinedBufferSize = rect.width()*rect.height()*bpp;
896 individualMipOffsets.push_back(0);
897 }
898 SkASSERT(combinedBufferSize);
899
900 // Get a staging buffer slice to hold our mip data.
901 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
902 size_t alignment = bpp;
903 switch (alignment & 0b11) {
904 case 0: break; // alignment is already a multiple of 4.
905 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
906 default: alignment *= 4; break; // alignment is not a multiple of 2.
907 }
908 GrStagingBufferManager::Slice slice =
909 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
910 if (!slice.fBuffer) {
911 return false;
912 }
913
914 int uploadLeft = rect.left();
915 int uploadTop = rect.top();
916
917 char* buffer = (char*) slice.fOffsetMapPtr;
918 SkTArray<VkBufferImageCopy> regions(mipLevelCount);
919
920 int currentWidth = rect.width();
921 int currentHeight = rect.height();
922 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
923 if (texelsShallowCopy[currentMipLevel].fPixels) {
924 const size_t trimRowBytes = currentWidth * bpp;
925 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
926
927 // copy data into the buffer, skipping the trailing bytes
928 char* dst = buffer + individualMipOffsets[currentMipLevel];
929 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
930 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
931
932 VkBufferImageCopy& region = regions.push_back();
933 memset(®ion, 0, sizeof(VkBufferImageCopy));
934 region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
935 region.bufferRowLength = currentWidth;
936 region.bufferImageHeight = currentHeight;
937 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
938 region.imageOffset = {uploadLeft, uploadTop, 0};
939 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
940 }
941
942 currentWidth = std::max(1, currentWidth/2);
943 currentHeight = std::max(1, currentHeight/2);
944 }
945
946 // Change layout of our target so it can be copied to
947 texImage->setImageLayout(this,
948 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
949 VK_ACCESS_TRANSFER_WRITE_BIT,
950 VK_PIPELINE_STAGE_TRANSFER_BIT,
951 false);
952
953 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
954 // because we don't need the command buffer to ref the buffer here. The reason being is that
955 // the buffer is coming from the staging manager and the staging manager will make sure the
956 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
957 // upload in the frame.
958 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
959 this->currentCommandBuffer()->copyBufferToImage(this,
960 vkBuffer->vkBuffer(),
961 texImage,
962 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
963 regions.count(),
964 regions.begin());
965 return true;
966 }
967
968 // It's probably possible to roll this into uploadTexDataOptimal,
969 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage * uploadTexture,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipMapped,const void * data,size_t dataSize)970 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
971 SkImage::CompressionType compression, VkFormat vkFormat,
972 SkISize dimensions, GrMipmapped mipMapped,
973 const void* data, size_t dataSize) {
974 if (!this->currentCommandBuffer()) {
975 return false;
976 }
977 SkASSERT(data);
978 SkASSERT(!uploadTexture->isLinearTiled());
979 // For now the assumption is that our rect is the entire texture.
980 // Compressed textures are read-only so this should be a reasonable assumption.
981 SkASSERT(dimensions.fWidth == uploadTexture->width() &&
982 dimensions.fHeight == uploadTexture->height());
983
984 if (dimensions.fWidth == 0 || dimensions.fHeight == 0) {
985 return false;
986 }
987
988 SkASSERT(uploadTexture->imageFormat() == vkFormat);
989 SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
990
991
992 GrStagingBufferManager::Slice slice;
993 SkTArray<VkBufferImageCopy> regions;
994 SkTArray<size_t> individualMipOffsets;
995 SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
996 ®ions,
997 &individualMipOffsets,
998 &slice,
999 compression,
1000 vkFormat,
1001 dimensions,
1002 mipMapped);
1003 if (!slice.fBuffer) {
1004 return false;
1005 }
1006 SkASSERT(dataSize == combinedBufferSize);
1007
1008 {
1009 char* buffer = (char*)slice.fOffsetMapPtr;
1010 memcpy(buffer, data, dataSize);
1011 }
1012
1013 // Change layout of our target so it can be copied to
1014 uploadTexture->setImageLayout(this,
1015 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1016 VK_ACCESS_TRANSFER_WRITE_BIT,
1017 VK_PIPELINE_STAGE_TRANSFER_BIT,
1018 false);
1019
1020 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1021 // because we don't need the command buffer to ref the buffer here. The reason being is that
1022 // the buffer is coming from the staging manager and the staging manager will make sure the
1023 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1024 // upload in the frame.
1025 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1026 this->currentCommandBuffer()->copyBufferToImage(this,
1027 vkBuffer->vkBuffer(),
1028 uploadTexture,
1029 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1030 regions.count(),
1031 regions.begin());
1032
1033 return true;
1034 }
1035
1036 ////////////////////////////////////////////////////////////////////////////////
1037 // TODO: make this take a GrMipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)1038 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1039 const GrBackendFormat& format,
1040 GrRenderable renderable,
1041 int renderTargetSampleCnt,
1042 SkBudgeted budgeted,
1043 GrProtected isProtected,
1044 int mipLevelCount,
1045 uint32_t levelClearMask) {
1046 VkFormat pixelFormat;
1047 SkAssertResult(format.asVkFormat(&pixelFormat));
1048 SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1049 SkASSERT(mipLevelCount > 0);
1050
1051 GrMipmapStatus mipmapStatus =
1052 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1053
1054 sk_sp<GrVkTexture> tex;
1055 if (renderable == GrRenderable::kYes) {
1056 tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1057 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1058 mipmapStatus, isProtected);
1059 } else {
1060 tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1061 mipLevelCount, isProtected, mipmapStatus);
1062 }
1063
1064 if (!tex) {
1065 return nullptr;
1066 }
1067
1068 if (levelClearMask) {
1069 if (!this->currentCommandBuffer()) {
1070 return nullptr;
1071 }
1072 SkSTArray<1, VkImageSubresourceRange> ranges;
1073 bool inRange = false;
1074 GrVkImage* texImage = tex->textureImage();
1075 for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1076 if (levelClearMask & (1U << i)) {
1077 if (inRange) {
1078 ranges.back().levelCount++;
1079 } else {
1080 auto& range = ranges.push_back();
1081 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1082 range.baseArrayLayer = 0;
1083 range.baseMipLevel = i;
1084 range.layerCount = 1;
1085 range.levelCount = 1;
1086 inRange = true;
1087 }
1088 } else if (inRange) {
1089 inRange = false;
1090 }
1091 }
1092 SkASSERT(!ranges.empty());
1093 static constexpr VkClearColorValue kZeroClearColor = {};
1094 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1095 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1096 this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1097 ranges.count(), ranges.begin());
1098 }
1099 return std::move(tex);
1100 }
1101
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)1102 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1103 const GrBackendFormat& format,
1104 SkBudgeted budgeted,
1105 GrMipmapped mipMapped,
1106 GrProtected isProtected,
1107 const void* data, size_t dataSize) {
1108 VkFormat pixelFormat;
1109 SkAssertResult(format.asVkFormat(&pixelFormat));
1110 SkASSERT(GrVkFormatIsCompressed(pixelFormat));
1111
1112 int numMipLevels = 1;
1113 if (mipMapped == GrMipmapped::kYes) {
1114 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1115 }
1116
1117 GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
1118 : GrMipmapStatus::kNotAllocated;
1119
1120 auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1121 numMipLevels, isProtected, mipmapStatus);
1122 if (!tex) {
1123 return nullptr;
1124 }
1125
1126 SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1127 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1128 dimensions, mipMapped, data, dataSize)) {
1129 return nullptr;
1130 }
1131
1132 return std::move(tex);
1133 }
1134
1135 ////////////////////////////////////////////////////////////////////////////////
1136
copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,sk_sp<GrGpuBuffer> dstBuffer,VkDeviceSize srcOffset,VkDeviceSize dstOffset,VkDeviceSize size)1137 void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
1138 sk_sp<GrGpuBuffer> dstBuffer,
1139 VkDeviceSize srcOffset,
1140 VkDeviceSize dstOffset,
1141 VkDeviceSize size) {
1142 if (!this->currentCommandBuffer()) {
1143 return;
1144 }
1145 VkBufferCopy copyRegion;
1146 copyRegion.srcOffset = srcOffset;
1147 copyRegion.dstOffset = dstOffset;
1148 copyRegion.size = size;
1149 this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), std::move(dstBuffer), 1,
1150 ©Region);
1151 }
1152
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1153 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1154 VkDeviceSize offset, VkDeviceSize size) {
1155 if (!this->currentCommandBuffer()) {
1156 return false;
1157 }
1158 // Update the buffer
1159 this->currentCommandBuffer()->updateBuffer(this, std::move(buffer), offset, size, src);
1160
1161 return true;
1162 }
1163
1164 ////////////////////////////////////////////////////////////////////////////////
1165
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1166 static bool check_image_info(const GrVkCaps& caps,
1167 const GrVkImageInfo& info,
1168 bool needsAllocation,
1169 uint32_t graphicsQueueIndex) {
1170 if (VK_NULL_HANDLE == info.fImage) {
1171 return false;
1172 }
1173
1174 if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1175 return false;
1176 }
1177
1178 if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1179 return false;
1180 }
1181
1182 if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1183 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1184 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1185 if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1186 if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1187 return false;
1188 }
1189 } else {
1190 return false;
1191 }
1192 }
1193
1194 if (info.fYcbcrConversionInfo.isValid()) {
1195 if (!caps.supportsYcbcrConversion()) {
1196 return false;
1197 }
1198 if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1199 return true;
1200 }
1201 }
1202
1203 // We currently require everything to be made with transfer bits set
1204 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1205 !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1206 return false;
1207 }
1208
1209 return true;
1210 }
1211
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1212 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1213 // We don't support directly importing multisampled textures for sampling from shaders.
1214 if (info.fSampleCount != 1) {
1215 return false;
1216 }
1217
1218 if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1219 return true;
1220 }
1221 if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1222 if (!caps.isVkFormatTexturable(info.fFormat)) {
1223 return false;
1224 }
1225 } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1226 if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1227 return false;
1228 }
1229 } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1230 if (!caps.supportsDRMFormatModifiers()) {
1231 return false;
1232 }
1233 // To be technically correct we should query the vulkan support for VkFormat and
1234 // drmFormatModifier pairs to confirm the required feature support is there. However, we
1235 // currently don't have our caps and format tables set up to do this effeciently. So
1236 // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1237 // up using valid features (checked below). In practice this should all be safe because
1238 // currently we are setting all drm format modifier textures to have a
1239 // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1240 // a shader. The video decoder isn't going to give us VkImages that don't support being
1241 // sampled.
1242 } else {
1243 SkUNREACHABLE;
1244 }
1245
1246 // We currently require all textures to be made with sample support
1247 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1248 return false;
1249 }
1250
1251 return true;
1252 }
1253
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1254 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1255 if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1256 return false;
1257 }
1258 if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1259 return false;
1260 }
1261 return true;
1262 }
1263
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1264 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1265 GrWrapOwnership ownership,
1266 GrWrapCacheable cacheable,
1267 GrIOType ioType) {
1268 GrVkImageInfo imageInfo;
1269 if (!backendTex.getVkImageInfo(&imageInfo)) {
1270 return nullptr;
1271 }
1272
1273 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1274 this->queueIndex())) {
1275 return nullptr;
1276 }
1277
1278 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1279 return nullptr;
1280 }
1281
1282 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1283 return nullptr;
1284 }
1285
1286 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1287 SkASSERT(mutableState);
1288 return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1289 ioType, imageInfo, std::move(mutableState));
1290 }
1291
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1292 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1293 GrWrapOwnership ownership,
1294 GrWrapCacheable cacheable) {
1295 return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1296 }
1297
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1298 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1299 int sampleCnt,
1300 GrWrapOwnership ownership,
1301 GrWrapCacheable cacheable) {
1302 GrVkImageInfo imageInfo;
1303 if (!backendTex.getVkImageInfo(&imageInfo)) {
1304 return nullptr;
1305 }
1306
1307 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1308 this->queueIndex())) {
1309 return nullptr;
1310 }
1311
1312 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1313 return nullptr;
1314 }
1315 // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1316 // the wrapped VkImage.
1317 bool resolveOnly = sampleCnt > 1;
1318 if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1319 return nullptr;
1320 }
1321
1322 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1323 return nullptr;
1324 }
1325
1326 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1327
1328 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTex.getMutableState();
1329 SkASSERT(mutableState);
1330
1331 return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1332 sampleCnt, ownership, cacheable,
1333 imageInfo,
1334 std::move(mutableState));
1335 }
1336
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1337 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1338 GrVkImageInfo info;
1339 if (!backendRT.getVkImageInfo(&info)) {
1340 return nullptr;
1341 }
1342
1343 if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1344 return nullptr;
1345 }
1346
1347 // We will always render directly to this VkImage.
1348 static bool kResolveOnly = false;
1349 if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1350 return nullptr;
1351 }
1352
1353 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1354 return nullptr;
1355 }
1356
1357 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendRT.getMutableState();
1358 SkASSERT(mutableState);
1359
1360 sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1361 this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1362
1363 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1364 SkASSERT(!backendRT.stencilBits());
1365 if (tgt) {
1366 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1367 }
1368
1369 return std::move(tgt);
1370 }
1371
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1372 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1373 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1374 int maxSize = this->caps()->maxTextureSize();
1375 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1376 return nullptr;
1377 }
1378
1379 GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1380 if (!backendFormat.isValid()) {
1381 return nullptr;
1382 }
1383 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1384 if (!sampleCnt) {
1385 return nullptr;
1386 }
1387
1388 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1389 }
1390
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkImage * src,const SkIRect & srcRect)1391 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1392 const GrVkRenderPass& renderPass,
1393 GrAttachment* dst,
1394 GrVkImage* src,
1395 const SkIRect& srcRect) {
1396 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1397 }
1398
onRegenerateMipMapLevels(GrTexture * tex)1399 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1400 if (!this->currentCommandBuffer()) {
1401 return false;
1402 }
1403 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1404 // don't do anything for linearly tiled textures (can't have mipmaps)
1405 if (vkTex->isLinearTiled()) {
1406 SkDebugf("Trying to create mipmap for linear tiled texture");
1407 return false;
1408 }
1409 SkASSERT(tex->textureType() == GrTextureType::k2D);
1410
1411 // determine if we can blit to and from this format
1412 const GrVkCaps& caps = this->vkCaps();
1413 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1414 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1415 !caps.mipmapSupport()) {
1416 return false;
1417 }
1418
1419 int width = tex->width();
1420 int height = tex->height();
1421 VkImageBlit blitRegion;
1422 memset(&blitRegion, 0, sizeof(VkImageBlit));
1423
1424 // SkMipmap doesn't include the base level in the level count so we have to add 1
1425 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1426 SkASSERT(levelCount == vkTex->mipLevels());
1427
1428 // change layout of the layers so we can write to them.
1429 vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1430 VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1431
1432 // setup memory barrier
1433 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1434 VkImageMemoryBarrier imageMemoryBarrier = {
1435 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1436 nullptr, // pNext
1437 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1438 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1439 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout
1440 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1441 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1442 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1443 vkTex->image(), // image
1444 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1445 };
1446
1447 // Blit the miplevels
1448 uint32_t mipLevel = 1;
1449 while (mipLevel < levelCount) {
1450 int prevWidth = width;
1451 int prevHeight = height;
1452 width = std::max(1, width / 2);
1453 height = std::max(1, height / 2);
1454
1455 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1456 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1457 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1458
1459 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1460 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1461 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1462 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1463 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1464 blitRegion.dstOffsets[1] = { width, height, 1 };
1465 this->currentCommandBuffer()->blitImage(this,
1466 vkTex->resource(),
1467 vkTex->image(),
1468 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1469 vkTex->resource(),
1470 vkTex->image(),
1471 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1472 1,
1473 &blitRegion,
1474 VK_FILTER_LINEAR);
1475 ++mipLevel;
1476 }
1477 if (levelCount > 1) {
1478 // This barrier logically is not needed, but it changes the final level to the same layout
1479 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1480 // layouts and future layout changes easier. The alternative here would be to track layout
1481 // and memory accesses per layer which doesn't seem work it.
1482 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1483 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1484 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1485 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1486 }
1487 return true;
1488 }
1489
1490 ////////////////////////////////////////////////////////////////////////////////
1491
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1492 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1493 SkISize dimensions, int numStencilSamples) {
1494 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1495
1496 fStats.incStencilAttachmentCreates();
1497 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1498 }
1499
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless memoryless)1500 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1501 const GrBackendFormat& format,
1502 int numSamples,
1503 GrProtected isProtected,
1504 GrMemoryless memoryless) {
1505 VkFormat pixelFormat;
1506 SkAssertResult(format.asVkFormat(&pixelFormat));
1507 SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
1508 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1509
1510 fStats.incMSAAAttachmentCreates();
1511 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1512 }
1513
1514 ////////////////////////////////////////////////////////////////////////////////
1515
copy_src_data(char * mapPtr,VkFormat vkFormat,const SkTArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1516 bool copy_src_data(char* mapPtr,
1517 VkFormat vkFormat,
1518 const SkTArray<size_t>& individualMipOffsets,
1519 const GrPixmap srcData[],
1520 int numMipLevels) {
1521 SkASSERT(srcData && numMipLevels);
1522 SkASSERT(!GrVkFormatIsCompressed(vkFormat));
1523 SkASSERT(individualMipOffsets.count() == numMipLevels);
1524 SkASSERT(mapPtr);
1525
1526 size_t bytesPerPixel = GrVkFormatBytesPerBlock(vkFormat);
1527
1528 for (int level = 0; level < numMipLevels; ++level) {
1529 const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1530
1531 SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1532 srcData[level].addr(), srcData[level].rowBytes(),
1533 trimRB, srcData[level].height());
1534 }
1535 return true;
1536 }
1537
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,GrMipmapped mipMapped,GrVkImageInfo * info,GrProtected isProtected)1538 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1539 SkISize dimensions,
1540 int sampleCnt,
1541 GrTexturable texturable,
1542 GrRenderable renderable,
1543 GrMipmapped mipMapped,
1544 GrVkImageInfo* info,
1545 GrProtected isProtected) {
1546 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1547
1548 if (fProtectedContext != isProtected) {
1549 return false;
1550 }
1551
1552 if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1553 return false;
1554 }
1555
1556 // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1557 if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1558 return false;
1559 }
1560
1561 if (renderable == GrRenderable::kYes) {
1562 sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1563 if (!sampleCnt) {
1564 return false;
1565 }
1566 }
1567
1568
1569 int numMipLevels = 1;
1570 if (mipMapped == GrMipmapped::kYes) {
1571 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1572 }
1573
1574 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1575 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1576 if (texturable == GrTexturable::kYes) {
1577 usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1578 }
1579 if (renderable == GrRenderable::kYes) {
1580 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1581 // We always make our render targets support being used as input attachments
1582 usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1583 }
1584
1585 GrVkImage::ImageDesc imageDesc;
1586 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1587 imageDesc.fFormat = vkFormat;
1588 imageDesc.fWidth = dimensions.width();
1589 imageDesc.fHeight = dimensions.height();
1590 imageDesc.fLevels = numMipLevels;
1591 imageDesc.fSamples = sampleCnt;
1592 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1593 imageDesc.fUsageFlags = usageFlags;
1594 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1595 imageDesc.fIsProtected = fProtectedContext;
1596
1597 if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1598 SkDebugf("Failed to init image info\n");
1599 return false;
1600 }
1601
1602 return true;
1603 }
1604
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)1605 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1606 sk_sp<GrRefCntedCallback> finishedCallback,
1607 std::array<float, 4> color) {
1608 GrVkImageInfo info;
1609 SkAssertResult(backendTexture.getVkImageInfo(&info));
1610
1611 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1612 SkASSERT(mutableState);
1613 sk_sp<GrVkTexture> texture =
1614 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1615 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1616 kRW_GrIOType, info, std::move(mutableState));
1617 if (!texture) {
1618 return false;
1619 }
1620 GrVkImage* texImage = texture->textureImage();
1621
1622 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1623 if (!cmdBuffer) {
1624 return false;
1625 }
1626
1627 texImage->setImageLayout(this,
1628 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1629 VK_ACCESS_TRANSFER_WRITE_BIT,
1630 VK_PIPELINE_STAGE_TRANSFER_BIT,
1631 false);
1632
1633 // CmdClearColorImage doesn't work for compressed formats
1634 SkASSERT(!GrVkFormatIsCompressed(info.fFormat));
1635
1636 VkClearColorValue vkColor;
1637 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1638 // uint32 union members in those cases.
1639 vkColor.float32[0] = color[0];
1640 vkColor.float32[1] = color[1];
1641 vkColor.float32[2] = color[2];
1642 vkColor.float32[3] = color[3];
1643 VkImageSubresourceRange range;
1644 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1645 range.baseArrayLayer = 0;
1646 range.baseMipLevel = 0;
1647 range.layerCount = 1;
1648 range.levelCount = info.fLevelCount;
1649 cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1650
1651 // Change image layout to shader read since if we use this texture as a borrowed
1652 // texture within Ganesh we require that its layout be set to that
1653 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1654 VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1655 false);
1656
1657 if (finishedCallback) {
1658 this->addFinishedCallback(std::move(finishedCallback));
1659 }
1660 return true;
1661 }
1662
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)1663 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1664 const GrBackendFormat& format,
1665 GrRenderable renderable,
1666 GrMipmapped mipMapped,
1667 GrProtected isProtected) {
1668 const GrVkCaps& caps = this->vkCaps();
1669
1670 if (fProtectedContext != isProtected) {
1671 return {};
1672 }
1673
1674 VkFormat vkFormat;
1675 if (!format.asVkFormat(&vkFormat)) {
1676 return {};
1677 }
1678
1679 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1680 if (!caps.isVkFormatTexturable(vkFormat)) {
1681 return {};
1682 }
1683
1684 if (GrVkFormatNeedsYcbcrSampler(vkFormat)) {
1685 return {};
1686 }
1687
1688 GrVkImageInfo info;
1689 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1690 renderable, mipMapped, &info, isProtected)) {
1691 return {};
1692 }
1693
1694 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1695 }
1696
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)1697 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
1698 SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1699 GrProtected isProtected) {
1700 return this->onCreateBackendTexture(dimensions, format, GrRenderable::kNo, mipMapped,
1701 isProtected);
1702 }
1703
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t size)1704 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1705 sk_sp<GrRefCntedCallback> finishedCallback,
1706 const void* data,
1707 size_t size) {
1708 GrVkImageInfo info;
1709 SkAssertResult(backendTexture.getVkImageInfo(&info));
1710
1711 sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
1712 SkASSERT(mutableState);
1713 sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1714 backendTexture.dimensions(),
1715 kBorrow_GrWrapOwnership,
1716 GrWrapCacheable::kNo,
1717 kRW_GrIOType,
1718 info,
1719 std::move(mutableState));
1720 if (!texture) {
1721 return false;
1722 }
1723
1724 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1725 if (!cmdBuffer) {
1726 return false;
1727 }
1728 GrVkImage* image = texture->textureImage();
1729 image->setImageLayout(this,
1730 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1731 VK_ACCESS_TRANSFER_WRITE_BIT,
1732 VK_PIPELINE_STAGE_TRANSFER_BIT,
1733 false);
1734
1735 SkImage::CompressionType compression =
1736 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1737
1738 SkTArray<VkBufferImageCopy> regions;
1739 SkTArray<size_t> individualMipOffsets;
1740 GrStagingBufferManager::Slice slice;
1741
1742 fill_in_compressed_regions(&fStagingBufferManager,
1743 ®ions,
1744 &individualMipOffsets,
1745 &slice,
1746 compression,
1747 info.fFormat,
1748 backendTexture.dimensions(),
1749 backendTexture.fMipmapped);
1750
1751 if (!slice.fBuffer) {
1752 return false;
1753 }
1754
1755 memcpy(slice.fOffsetMapPtr, data, size);
1756
1757 cmdBuffer->addGrSurface(texture);
1758 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1759 // because we don't need the command buffer to ref the buffer here. The reason being is that
1760 // the buffer is coming from the staging manager and the staging manager will make sure the
1761 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1762 // every upload in the frame.
1763 cmdBuffer->copyBufferToImage(this,
1764 static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1765 image,
1766 image->currentLayout(),
1767 regions.count(),
1768 regions.begin());
1769
1770 // Change image layout to shader read since if we use this texture as a borrowed
1771 // texture within Ganesh we require that its layout be set to that
1772 image->setImageLayout(this,
1773 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1774 VK_ACCESS_SHADER_READ_BIT,
1775 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1776 false);
1777
1778 if (finishedCallback) {
1779 this->addFinishedCallback(std::move(finishedCallback));
1780 }
1781 return true;
1782 }
1783
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,const GrVkSharedImageInfo & newInfo)1784 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1785 const GrVkSharedImageInfo& newInfo) {
1786 // Even though internally we use this helper for getting src access flags and stages they
1787 // can also be used for general dst flags since we don't know exactly what the client
1788 // plans on using the image for.
1789 VkImageLayout newLayout = newInfo.getImageLayout();
1790 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1791 newLayout = image->currentLayout();
1792 }
1793 VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1794 VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1795
1796 uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1797 uint32_t newQueueFamilyIndex = newInfo.getQueueFamilyIndex();
1798 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1799 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1800 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1801 };
1802 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1803 // It is illegal to have both the new and old queue be special queue families (i.e. external
1804 // or foreign).
1805 return;
1806 }
1807
1808 image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1809 newQueueFamilyIndex);
1810 }
1811
setBackendSurfaceState(GrVkImageInfo info,sk_sp<GrBackendSurfaceMutableStateImpl> currentState,SkISize dimensions,const GrVkSharedImageInfo & newInfo,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1812 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1813 sk_sp<GrBackendSurfaceMutableStateImpl> currentState,
1814 SkISize dimensions,
1815 const GrVkSharedImageInfo& newInfo,
1816 GrBackendSurfaceMutableState* previousState,
1817 sk_sp<GrRefCntedCallback> finishedCallback) {
1818 sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
1819 dimensions,
1820 info,
1821 std::move(currentState),
1822 GrVkImage::UsageFlags::kColorAttachment,
1823 kBorrow_GrWrapOwnership,
1824 GrWrapCacheable::kNo,
1825 /*forSecondaryCB=*/false);
1826 SkASSERT(texture);
1827 if (!texture) {
1828 return false;
1829 }
1830 if (previousState) {
1831 previousState->setVulkanState(texture->currentLayout(),
1832 texture->currentQueueFamilyIndex());
1833 }
1834 set_layout_and_queue_from_mutable_state(this, texture.get(), newInfo);
1835 if (finishedCallback) {
1836 this->addFinishedCallback(std::move(finishedCallback));
1837 }
1838 return true;
1839 }
1840
setBackendTextureState(const GrBackendTexture & backendTeture,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1841 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1842 const GrBackendSurfaceMutableState& newState,
1843 GrBackendSurfaceMutableState* previousState,
1844 sk_sp<GrRefCntedCallback> finishedCallback) {
1845 GrVkImageInfo info;
1846 SkAssertResult(backendTeture.getVkImageInfo(&info));
1847 sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendTeture.getMutableState();
1848 SkASSERT(currentState);
1849 SkASSERT(newState.isValid() && newState.fBackend == GrBackend::kVulkan);
1850 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1851 newState.fVkState, previousState,
1852 std::move(finishedCallback));
1853 }
1854
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const GrBackendSurfaceMutableState & newState,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)1855 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1856 const GrBackendSurfaceMutableState& newState,
1857 GrBackendSurfaceMutableState* previousState,
1858 sk_sp<GrRefCntedCallback> finishedCallback) {
1859 GrVkImageInfo info;
1860 SkAssertResult(backendRenderTarget.getVkImageInfo(&info));
1861 sk_sp<GrBackendSurfaceMutableStateImpl> currentState = backendRenderTarget.getMutableState();
1862 SkASSERT(currentState);
1863 SkASSERT(newState.fBackend == GrBackend::kVulkan);
1864 return this->setBackendSurfaceState(info, std::move(currentState),
1865 backendRenderTarget.dimensions(), newState.fVkState,
1866 previousState, std::move(finishedCallback));
1867 }
1868
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)1869 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
1870 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1871 VkPipelineStageFlags dstStage;
1872 VkAccessFlags dstAccess;
1873 if (barrierType == kBlend_GrXferBarrierType) {
1874 dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1875 dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
1876 } else {
1877 SkASSERT(barrierType == kTexture_GrXferBarrierType);
1878 dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1879 dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
1880 }
1881 GrVkImage* image = vkRT->colorAttachment();
1882 VkImageMemoryBarrier barrier;
1883 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1884 barrier.pNext = nullptr;
1885 barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1886 barrier.dstAccessMask = dstAccess;
1887 barrier.oldLayout = image->currentLayout();
1888 barrier.newLayout = barrier.oldLayout;
1889 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1890 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1891 barrier.image = image->image();
1892 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
1893 this->addImageMemoryBarrier(image->resource(),
1894 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1895 dstStage, true, &barrier);
1896 }
1897
deleteBackendTexture(const GrBackendTexture & tex)1898 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1899 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1900
1901 GrVkImageInfo info;
1902 if (tex.getVkImageInfo(&info)) {
1903 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1904 }
1905 }
1906
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)1907 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1908 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
1909 GrVkRenderPass::AttachmentFlags attachmentFlags;
1910 GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
1911 &attachmentsDescriptor, &attachmentFlags);
1912
1913 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
1914 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
1915 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
1916 }
1917 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
1918 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
1919 }
1920
1921 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
1922 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
1923 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
1924 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
1925 }
1926 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
1927 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
1928 if (!renderPass) {
1929 return false;
1930 }
1931
1932 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
1933
1934 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1935 desc,
1936 programInfo,
1937 renderPass->vkRenderPass(),
1938 &stat);
1939 if (!pipelineState) {
1940 return false;
1941 }
1942
1943 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
1944 }
1945
1946 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const1947 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1948 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
1949
1950 GrVkImageInfo backend;
1951 if (!tex.getVkImageInfo(&backend)) {
1952 return false;
1953 }
1954
1955 if (backend.fImage && backend.fAlloc.fMemory) {
1956 VkMemoryRequirements req;
1957 memset(&req, 0, sizeof(req));
1958 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1959 backend.fImage,
1960 &req));
1961 // TODO: find a better check
1962 // This will probably fail with a different driver
1963 return (req.size > 0) && (req.size <= 8192 * 8192);
1964 }
1965
1966 return false;
1967 }
1968
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)1969 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1970 GrColorType ct,
1971 int sampleCnt,
1972 GrProtected isProtected) {
1973 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
1974 dimensions.height() > this->caps()->maxRenderTargetSize()) {
1975 return {};
1976 }
1977
1978 VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
1979
1980 GrVkImageInfo info;
1981 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo,
1982 GrRenderable::kYes, GrMipmapped::kNo, &info,
1983 isProtected)) {
1984 return {};
1985 }
1986 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info);
1987 }
1988
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)1989 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1990 SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
1991
1992 GrVkImageInfo info;
1993 if (rt.getVkImageInfo(&info)) {
1994 // something in the command buffer may still be using this, so force submit
1995 SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
1996 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
1997 }
1998 }
1999 #endif
2000
2001 ////////////////////////////////////////////////////////////////////////////////
2002
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2003 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2004 VkPipelineStageFlags srcStageMask,
2005 VkPipelineStageFlags dstStageMask,
2006 bool byRegion,
2007 VkBufferMemoryBarrier* barrier) const {
2008 if (!this->currentCommandBuffer()) {
2009 return;
2010 }
2011 SkASSERT(resource);
2012 this->currentCommandBuffer()->pipelineBarrier(this,
2013 resource,
2014 srcStageMask,
2015 dstStageMask,
2016 byRegion,
2017 GrVkCommandBuffer::kBufferMemory_BarrierType,
2018 barrier);
2019 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2020 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2021 VkPipelineStageFlags dstStageMask,
2022 bool byRegion,
2023 VkBufferMemoryBarrier* barrier) const {
2024 if (!this->currentCommandBuffer()) {
2025 return;
2026 }
2027 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2028 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2029 // command with the buffer on the command buffer. Thus those other commands will already cause
2030 // the command buffer to be holding a ref to the buffer.
2031 this->currentCommandBuffer()->pipelineBarrier(this,
2032 /*resource=*/nullptr,
2033 srcStageMask,
2034 dstStageMask,
2035 byRegion,
2036 GrVkCommandBuffer::kBufferMemory_BarrierType,
2037 barrier);
2038 }
2039
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2040 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2041 VkPipelineStageFlags srcStageMask,
2042 VkPipelineStageFlags dstStageMask,
2043 bool byRegion,
2044 VkImageMemoryBarrier* barrier) const {
2045 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2046 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2047 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2048 // have a current command buffer. Thus we won't do the queue transfer.
2049 if (!this->currentCommandBuffer()) {
2050 return;
2051 }
2052 SkASSERT(resource);
2053 this->currentCommandBuffer()->pipelineBarrier(this,
2054 resource,
2055 srcStageMask,
2056 dstStageMask,
2057 byRegion,
2058 GrVkCommandBuffer::kImageMemory_BarrierType,
2059 barrier);
2060 }
2061
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)2062 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2063 SkSpan<GrSurfaceProxy*> proxies,
2064 SkSurface::BackendSurfaceAccess access,
2065 const GrBackendSurfaceMutableState* newState) {
2066 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2067 // not effect what we do here.
2068 if (!proxies.empty() && (access == SkSurface::BackendSurfaceAccess::kPresent || newState)) {
2069 // We currently don't support passing in new surface state for multiple proxies here. The
2070 // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2071 // state updates anyways. Additionally if we have a newState than we must not have any
2072 // BackendSurfaceAccess.
2073 SkASSERT(!newState || proxies.size() == 1);
2074 SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
2075 GrVkImage* image;
2076 for (GrSurfaceProxy* proxy : proxies) {
2077 SkASSERT(proxy->isInstantiated());
2078 if (GrTexture* tex = proxy->peekTexture()) {
2079 image = static_cast<GrVkTexture*>(tex)->textureImage();
2080 } else {
2081 GrRenderTarget* rt = proxy->peekRenderTarget();
2082 SkASSERT(rt);
2083 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2084 image = vkRT->externalAttachment();
2085 }
2086 if (newState) {
2087 const GrVkSharedImageInfo& newInfo = newState->fVkState;
2088 set_layout_and_queue_from_mutable_state(this, image, newInfo);
2089 } else {
2090 SkASSERT(access == SkSurface::BackendSurfaceAccess::kPresent);
2091 image->prepareForPresent(this);
2092 }
2093 }
2094 }
2095 }
2096
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)2097 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2098 GrGpuFinishedContext finishedContext) {
2099 SkASSERT(finishedProc);
2100 this->addFinishedCallback(GrRefCntedCallback::Make(finishedProc, finishedContext));
2101 }
2102
addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback)2103 void GrVkGpu::addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback) {
2104 SkASSERT(finishedCallback);
2105 fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2106 }
2107
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2108 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2109 this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2110 }
2111
onSubmitToGpu(bool syncCpu)2112 bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
2113 if (syncCpu) {
2114 return this->submitCommandBuffer(kForce_SyncQueue);
2115 } else {
2116 return this->submitCommandBuffer(kSkip_SyncQueue);
2117 }
2118 }
2119
finishOutstandingGpuWork()2120 void GrVkGpu::finishOutstandingGpuWork() {
2121 VK_CALL(QueueWaitIdle(fQueue));
2122
2123 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2124 fResourceProvider.forceSyncAllCommandBuffers();
2125 }
2126 }
2127
onReportSubmitHistograms()2128 void GrVkGpu::onReportSubmitHistograms() {
2129 #if SK_HISTOGRAMS_ENABLED
2130 uint64_t allocatedMemory = fMemoryAllocator->totalAllocatedMemory();
2131 uint64_t usedMemory = fMemoryAllocator->totalUsedMemory();
2132 SkASSERT(usedMemory <= allocatedMemory);
2133 if (allocatedMemory > 0) {
2134 SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2135 (usedMemory * 100) / allocatedMemory);
2136 }
2137 // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2138 // supports samples up to around 500MB which should support the amounts of memory we allocate.
2139 SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2140 #endif
2141 }
2142
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2143 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2144 GrSurface* src,
2145 GrVkImage* dstImage,
2146 GrVkImage* srcImage,
2147 const SkIRect& srcRect,
2148 const SkIPoint& dstPoint) {
2149 if (!this->currentCommandBuffer()) {
2150 return;
2151 }
2152
2153 #ifdef SK_DEBUG
2154 int dstSampleCnt = dstImage->numSamples();
2155 int srcSampleCnt = srcImage->numSamples();
2156 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2157 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2158 VkFormat dstFormat = dstImage->imageFormat();
2159 VkFormat srcFormat;
2160 SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2161 SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2162 srcFormat, srcSampleCnt, srcHasYcbcr));
2163 #endif
2164 if (src->isProtected() && !dst->isProtected()) {
2165 SkDebugf("Can't copy from protected memory to non-protected");
2166 return;
2167 }
2168
2169 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2170 // the cache is flushed since it is only being written to.
2171 dstImage->setImageLayout(this,
2172 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2173 VK_ACCESS_TRANSFER_WRITE_BIT,
2174 VK_PIPELINE_STAGE_TRANSFER_BIT,
2175 false);
2176
2177 srcImage->setImageLayout(this,
2178 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2179 VK_ACCESS_TRANSFER_READ_BIT,
2180 VK_PIPELINE_STAGE_TRANSFER_BIT,
2181 false);
2182
2183 VkImageCopy copyRegion;
2184 memset(©Region, 0, sizeof(VkImageCopy));
2185 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2186 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2187 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2188 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2189 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2190
2191 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2192 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2193 this->currentCommandBuffer()->copyImage(this,
2194 srcImage,
2195 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2196 dstImage,
2197 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2198 1,
2199 ©Region);
2200
2201 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2202 srcRect.width(), srcRect.height());
2203 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2204 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2205 }
2206
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2207 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2208 GrSurface* src,
2209 GrVkImage* dstImage,
2210 GrVkImage* srcImage,
2211 const SkIRect& srcRect,
2212 const SkIPoint& dstPoint) {
2213 if (!this->currentCommandBuffer()) {
2214 return;
2215 }
2216
2217 #ifdef SK_DEBUG
2218 int dstSampleCnt = dstImage->numSamples();
2219 int srcSampleCnt = srcImage->numSamples();
2220 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2221 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2222 VkFormat dstFormat = dstImage->imageFormat();
2223 VkFormat srcFormat;
2224 SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2225 SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2226 dstSampleCnt,
2227 dstImage->isLinearTiled(),
2228 dstHasYcbcr,
2229 srcFormat,
2230 srcSampleCnt,
2231 srcImage->isLinearTiled(),
2232 srcHasYcbcr));
2233
2234 #endif
2235 if (src->isProtected() && !dst->isProtected()) {
2236 SkDebugf("Can't copy from protected memory to non-protected");
2237 return;
2238 }
2239
2240 dstImage->setImageLayout(this,
2241 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2242 VK_ACCESS_TRANSFER_WRITE_BIT,
2243 VK_PIPELINE_STAGE_TRANSFER_BIT,
2244 false);
2245
2246 srcImage->setImageLayout(this,
2247 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2248 VK_ACCESS_TRANSFER_READ_BIT,
2249 VK_PIPELINE_STAGE_TRANSFER_BIT,
2250 false);
2251
2252 // Flip rect if necessary
2253 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
2254 srcRect.height());
2255
2256 VkImageBlit blitRegion;
2257 memset(&blitRegion, 0, sizeof(VkImageBlit));
2258 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2259 blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2260 blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2261 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2262 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2263 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2264
2265 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2266 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2267 this->currentCommandBuffer()->blitImage(this,
2268 *srcImage,
2269 *dstImage,
2270 1,
2271 &blitRegion,
2272 VK_FILTER_NEAREST); // We never scale so any filter works here
2273
2274 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2275 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2276 }
2277
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2278 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2279 const SkIPoint& dstPoint) {
2280 if (src->isProtected() && !dst->isProtected()) {
2281 SkDebugf("Can't copy from protected memory to non-protected");
2282 return;
2283 }
2284 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2285 this->resolveImage(dst, srcRT, srcRect, dstPoint);
2286 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2287 srcRect.width(), srcRect.height());
2288 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2289 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2290 }
2291
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2292 bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2293 const SkIPoint& dstPoint) {
2294 #ifdef SK_DEBUG
2295 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2296 SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2297 }
2298 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2299 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2300 }
2301 #endif
2302 if (src->isProtected() && !dst->isProtected()) {
2303 SkDebugf("Can't copy from protected memory to non-protected");
2304 return false;
2305 }
2306
2307 GrVkImage* dstImage;
2308 GrVkImage* srcImage;
2309 GrRenderTarget* dstRT = dst->asRenderTarget();
2310 if (dstRT) {
2311 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2312 if (vkRT->wrapsSecondaryCommandBuffer()) {
2313 return false;
2314 }
2315 // This will technically return true for single sample rts that used DMSAA in which case we
2316 // don't have to pick the resolve attachment. But in that case the resolve and color
2317 // attachments will be the same anyways.
2318 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2319 dstImage = vkRT->resolveAttachment();
2320 } else {
2321 dstImage = vkRT->colorAttachment();
2322 }
2323 } else if (dst->asTexture()) {
2324 dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2325 } else {
2326 // The surface in a GrAttachment already
2327 dstImage = static_cast<GrVkImage*>(dst);
2328 }
2329 GrRenderTarget* srcRT = src->asRenderTarget();
2330 if (srcRT) {
2331 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2332 // This will technically return true for single sample rts that used DMSAA in which case we
2333 // don't have to pick the resolve attachment. But in that case the resolve and color
2334 // attachments will be the same anyways.
2335 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2336 srcImage = vkRT->resolveAttachment();
2337 } else {
2338 srcImage = vkRT->colorAttachment();
2339 }
2340 } else if (src->asTexture()) {
2341 SkASSERT(src->asTexture());
2342 srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2343 } else {
2344 // The surface in a GrAttachment already
2345 srcImage = static_cast<GrVkImage*>(src);
2346 }
2347
2348 VkFormat dstFormat = dstImage->imageFormat();
2349 VkFormat srcFormat = srcImage->imageFormat();
2350
2351 int dstSampleCnt = dstImage->numSamples();
2352 int srcSampleCnt = srcImage->numSamples();
2353
2354 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2355 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2356
2357 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2358 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2359 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2360 return true;
2361 }
2362
2363 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2364 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2365 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2366 return true;
2367 }
2368
2369 if (this->vkCaps().canCopyAsBlit(dstFormat,
2370 dstSampleCnt,
2371 dstImage->isLinearTiled(),
2372 dstHasYcbcr,
2373 srcFormat,
2374 srcSampleCnt,
2375 srcImage->isLinearTiled(),
2376 srcHasYcbcr)) {
2377 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
2378 return true;
2379 }
2380
2381 return false;
2382 }
2383
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2384 bool GrVkGpu::onReadPixels(GrSurface* surface,
2385 SkIRect rect,
2386 GrColorType surfaceColorType,
2387 GrColorType dstColorType,
2388 void* buffer,
2389 size_t rowBytes) {
2390 if (surface->isProtected()) {
2391 return false;
2392 }
2393
2394 if (!this->currentCommandBuffer()) {
2395 return false;
2396 }
2397
2398 GrVkImage* image = nullptr;
2399 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2400 if (rt) {
2401 // Reading from render targets that wrap a secondary command buffer is not allowed since
2402 // it would require us to know the VkImage, which we don't have, as well as need us to
2403 // stop and start the VkRenderPass which we don't have access to.
2404 if (rt->wrapsSecondaryCommandBuffer()) {
2405 return false;
2406 }
2407 image = rt->nonMSAAAttachment();
2408 } else {
2409 image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2410 }
2411
2412 if (!image) {
2413 return false;
2414 }
2415
2416 if (dstColorType == GrColorType::kUnknown ||
2417 dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2418 return false;
2419 }
2420
2421 // Change layout of our target so it can be used as copy
2422 image->setImageLayout(this,
2423 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2424 VK_ACCESS_TRANSFER_READ_BIT,
2425 VK_PIPELINE_STAGE_TRANSFER_BIT,
2426 false);
2427
2428 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2429 if (GrVkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2430 return false;
2431 }
2432 size_t tightRowBytes = bpp*rect.width();
2433
2434 VkBufferImageCopy region;
2435 memset(®ion, 0, sizeof(VkBufferImageCopy));
2436 VkOffset3D offset = { rect.left(), rect.top(), 0 };
2437 region.imageOffset = offset;
2438 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2439
2440 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2441 size_t imageRows = region.imageExtent.height;
2442 GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2443 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2444 transBufferRowBytes * imageRows, GrGpuBufferType::kXferGpuToCpu,
2445 kDynamic_GrAccessPattern);
2446
2447 if (!transferBuffer) {
2448 return false;
2449 }
2450
2451 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2452
2453 // Copy the image to a buffer so we can map it to cpu memory
2454 region.bufferOffset = 0;
2455 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2456 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2457 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2458
2459 this->currentCommandBuffer()->copyImageToBuffer(this,
2460 image,
2461 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2462 transferBuffer,
2463 1,
2464 ®ion);
2465
2466 // make sure the copy to buffer has finished
2467 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2468 VK_ACCESS_HOST_READ_BIT,
2469 VK_PIPELINE_STAGE_TRANSFER_BIT,
2470 VK_PIPELINE_STAGE_HOST_BIT,
2471 false);
2472
2473 // We need to submit the current command buffer to the Queue and make sure it finishes before
2474 // we can copy the data out of the buffer.
2475 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2476 return false;
2477 }
2478 void* mappedMemory = transferBuffer->map();
2479
2480 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2481
2482 transferBuffer->unmap();
2483 return true;
2484 }
2485
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2486 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2487 sk_sp<const GrVkFramebuffer> framebuffer,
2488 const VkClearValue* colorClear,
2489 const GrSurface* target,
2490 const SkIRect& renderPassBounds,
2491 bool forSecondaryCB) {
2492 if (!this->currentCommandBuffer()) {
2493 return false;
2494 }
2495 SkASSERT (!framebuffer->isExternal());
2496
2497 #ifdef SK_DEBUG
2498 uint32_t index;
2499 bool result = renderPass->colorAttachmentIndex(&index);
2500 SkASSERT(result && 0 == index);
2501 result = renderPass->stencilAttachmentIndex(&index);
2502 if (result) {
2503 SkASSERT(1 == index);
2504 }
2505 #endif
2506 VkClearValue clears[3];
2507 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2508 clears[0].color = colorClear->color;
2509 clears[stencilIndex].depthStencil.depth = 0.0f;
2510 clears[stencilIndex].depthStencil.stencil = 0;
2511
2512 return this->currentCommandBuffer()->beginRenderPass(
2513 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2514 }
2515
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2516 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2517 const SkIRect& bounds) {
2518 // We had a command buffer when we started the render pass, we should have one now as well.
2519 SkASSERT(this->currentCommandBuffer());
2520 this->currentCommandBuffer()->endRenderPass(this);
2521 this->didWriteToSurface(target, origin, &bounds);
2522 }
2523
checkVkResult(VkResult result)2524 bool GrVkGpu::checkVkResult(VkResult result) {
2525 switch (result) {
2526 case VK_SUCCESS:
2527 return true;
2528 case VK_ERROR_DEVICE_LOST:
2529 fDeviceIsLost = true;
2530 return false;
2531 case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2532 case VK_ERROR_OUT_OF_HOST_MEMORY:
2533 this->setOOMed();
2534 return false;
2535 default:
2536 return false;
2537 }
2538 }
2539
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2540 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2541 if (!this->currentCommandBuffer()) {
2542 return;
2543 }
2544 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2545 }
2546
submit(GrOpsRenderPass * renderPass)2547 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2548 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2549
2550 fCachedOpsRenderPass->submit();
2551 fCachedOpsRenderPass->reset();
2552 }
2553
insertFence()2554 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2555 VkFenceCreateInfo createInfo;
2556 memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2557 createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2558 createInfo.pNext = nullptr;
2559 createInfo.flags = 0;
2560 VkFence fence = VK_NULL_HANDLE;
2561 VkResult result;
2562
2563 VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
2564 if (result != VK_SUCCESS) {
2565 return 0;
2566 }
2567 VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
2568 if (result != VK_SUCCESS) {
2569 VK_CALL(DestroyFence(this->device(), fence, nullptr));
2570 return 0;
2571 }
2572
2573 static_assert(sizeof(GrFence) >= sizeof(VkFence));
2574 return (GrFence)fence;
2575 }
2576
waitFence(GrFence fence)2577 bool GrVkGpu::waitFence(GrFence fence) {
2578 SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2579
2580 VkResult result;
2581 VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0));
2582 return (VK_SUCCESS == result);
2583 }
2584
deleteFence(GrFence fence) const2585 void GrVkGpu::deleteFence(GrFence fence) const {
2586 VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2587 }
2588
makeSemaphore(bool isOwned)2589 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2590 return GrVkSemaphore::Make(this, isOwned);
2591 }
2592
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType wrapType,GrWrapOwnership ownership)2593 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2594 GrSemaphoreWrapType wrapType,
2595 GrWrapOwnership ownership) {
2596 return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2597 }
2598
insertSemaphore(GrSemaphore * semaphore)2599 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2600 SkASSERT(semaphore);
2601
2602 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2603
2604 GrVkSemaphore::Resource* resource = vkSem->getResource();
2605 if (resource->shouldSignal()) {
2606 resource->ref();
2607 fSemaphoresToSignal.push_back(resource);
2608 }
2609 }
2610
waitSemaphore(GrSemaphore * semaphore)2611 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2612 SkASSERT(semaphore);
2613
2614 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2615
2616 GrVkSemaphore::Resource* resource = vkSem->getResource();
2617 if (resource->shouldWait()) {
2618 resource->ref();
2619 fSemaphoresToWaitOn.push_back(resource);
2620 }
2621 }
2622
prepareTextureForCrossContextUsage(GrTexture * texture)2623 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2624 SkASSERT(texture);
2625 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2626 vkTexture->setImageLayout(this,
2627 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2628 VK_ACCESS_SHADER_READ_BIT,
2629 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2630 false);
2631 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2632 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2633 // Eventually we will abandon the whole GPU if this fails.
2634 this->submitToGpu(false);
2635
2636 // The image layout change serves as a barrier, so no semaphore is needed.
2637 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2638 // thread safe so that only the first thread that tries to use the semaphore actually submits
2639 // it. This additionally would also require thread safety in command buffer submissions to
2640 // queues in general.
2641 return nullptr;
2642 }
2643
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2644 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2645 fDrawables.emplace_back(std::move(drawable));
2646 }
2647
storeVkPipelineCacheData()2648 void GrVkGpu::storeVkPipelineCacheData() {
2649 if (this->getContext()->priv().getPersistentCache()) {
2650 this->resourceProvider().storePipelineCacheData();
2651 }
2652 }
2653